VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60188

Last change on this file since 60188 was 60188, checked in by vboxsync, 9 years ago

IEM: Fixed a couple of edge cases and broken verification mode.

  • Update enmCpuMode after loading hidden CS flags (prep for recompiling).
  • Fixed retf in 64-bit mode where we would load CS.BASE with zero when returning to 16-bit or 32-bit code.
  • Fixed ESP/SP handling for protected mode exception injection.
  • Fixed handling of lock prefixed INT xx and INT3.
  • Implemented the two string I/O notification functions that would assert in verification mode.
  • The IEMExec* methods must call iemUninitExec to undo poisoning of decoding data members as it will otherwise interfere with verification mode opcode fetching optimizations and other stuff.

The above makes the current bs3-cpu-basic-2 code work in --execute-all-in-iem mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 447.2 KB
Line 
1/* $Id: IEMAll.cpp 60188 2016-03-24 17:44:05Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Calculates the CPU mode.
766 *
767 * This is mainly for updating IEMCPU::enmCpuMode.
768 *
769 * @returns CPU mode.
770 * @param pCtx The register context for the CPU.
771 */
772DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
773{
774 if (CPUMIsGuestIn64BitCodeEx(pCtx))
775 return IEMMODE_64BIT;
776 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
777 return IEMMODE_32BIT;
778 return IEMMODE_16BIT;
779}
780
781
782/**
783 * Initializes the execution state.
784 *
785 * @param pIemCpu The per CPU IEM state.
786 * @param fBypassHandlers Whether to bypass access handlers.
787 *
788 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
789 * side-effects in strict builds.
790 */
791DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
792{
793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
794 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
795
796 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
797 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
798
799#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
808#endif
809
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
812#endif
813 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
814 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
815#ifdef VBOX_STRICT
816 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
817 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
818 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
819 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
820 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
821 pIemCpu->uRexReg = 127;
822 pIemCpu->uRexB = 127;
823 pIemCpu->uRexIndex = 127;
824 pIemCpu->iEffSeg = 127;
825 pIemCpu->offOpcode = 127;
826 pIemCpu->cbOpcode = 127;
827#endif
828
829 pIemCpu->cActiveMappings = 0;
830 pIemCpu->iNextMapping = 0;
831 pIemCpu->rcPassUp = VINF_SUCCESS;
832 pIemCpu->fBypassHandlers = fBypassHandlers;
833#ifdef VBOX_WITH_RAW_MODE_NOT_R0
834 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
835 && pCtx->cs.u64Base == 0
836 && pCtx->cs.u32Limit == UINT32_MAX
837 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
838 if (!pIemCpu->fInPatchCode)
839 CPUMRawLeave(pVCpu, VINF_SUCCESS);
840#endif
841
842#ifdef IEM_VERIFICATION_MODE_FULL
843 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
844 pIemCpu->fNoRem = true;
845#endif
846}
847
848
849/**
850 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
851 *
852 * @param pIemCpu The per CPU IEM state.
853 */
854DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
855{
856#ifdef IEM_VERIFICATION_MODE_FULL
857 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
858#endif
859#ifdef VBOX_STRICT
860 pIemCpu->cbOpcode = 0;
861#else
862 NOREF(pIemCpu);
863#endif
864}
865
866
867/**
868 * Initializes the decoder state.
869 *
870 * @param pIemCpu The per CPU IEM state.
871 * @param fBypassHandlers Whether to bypass access handlers.
872 */
873DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
877
878 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
879 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
880
881#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
882 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
890#endif
891
892#ifdef VBOX_WITH_RAW_MODE_NOT_R0
893 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
894#endif
895 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
896#ifdef IEM_VERIFICATION_MODE_FULL
897 if (pIemCpu->uInjectCpl != UINT8_MAX)
898 pIemCpu->uCpl = pIemCpu->uInjectCpl;
899#endif
900 IEMMODE enmMode = iemCalcCpuMode(pCtx);
901 pIemCpu->enmCpuMode = enmMode;
902 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
903 pIemCpu->enmEffAddrMode = enmMode;
904 if (enmMode != IEMMODE_64BIT)
905 {
906 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
907 pIemCpu->enmEffOpSize = enmMode;
908 }
909 else
910 {
911 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
912 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
913 }
914 pIemCpu->fPrefixes = 0;
915 pIemCpu->uRexReg = 0;
916 pIemCpu->uRexB = 0;
917 pIemCpu->uRexIndex = 0;
918 pIemCpu->iEffSeg = X86_SREG_DS;
919 pIemCpu->offOpcode = 0;
920 pIemCpu->cbOpcode = 0;
921 pIemCpu->cActiveMappings = 0;
922 pIemCpu->iNextMapping = 0;
923 pIemCpu->rcPassUp = VINF_SUCCESS;
924 pIemCpu->fBypassHandlers = fBypassHandlers;
925#ifdef VBOX_WITH_RAW_MODE_NOT_R0
926 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
927 && pCtx->cs.u64Base == 0
928 && pCtx->cs.u32Limit == UINT32_MAX
929 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
930 if (!pIemCpu->fInPatchCode)
931 CPUMRawLeave(pVCpu, VINF_SUCCESS);
932#endif
933
934#ifdef DBGFTRACE_ENABLED
935 switch (enmMode)
936 {
937 case IEMMODE_64BIT:
938 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
939 break;
940 case IEMMODE_32BIT:
941 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
942 break;
943 case IEMMODE_16BIT:
944 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
945 break;
946 }
947#endif
948}
949
950
951/**
952 * Prefetch opcodes the first time when starting executing.
953 *
954 * @returns Strict VBox status code.
955 * @param pIemCpu The IEM state.
956 * @param fBypassHandlers Whether to bypass access handlers.
957 */
958IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
959{
960#ifdef IEM_VERIFICATION_MODE_FULL
961 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
962#endif
963 iemInitDecoder(pIemCpu, fBypassHandlers);
964
965 /*
966 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
967 *
968 * First translate CS:rIP to a physical address.
969 */
970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
971 uint32_t cbToTryRead;
972 RTGCPTR GCPtrPC;
973 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
974 {
975 cbToTryRead = PAGE_SIZE;
976 GCPtrPC = pCtx->rip;
977 if (!IEM_IS_CANONICAL(GCPtrPC))
978 return iemRaiseGeneralProtectionFault0(pIemCpu);
979 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
980 }
981 else
982 {
983 uint32_t GCPtrPC32 = pCtx->eip;
984 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
985 if (GCPtrPC32 > pCtx->cs.u32Limit)
986 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
987 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
988 if (!cbToTryRead) /* overflowed */
989 {
990 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
991 cbToTryRead = UINT32_MAX;
992 }
993 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
994 Assert(GCPtrPC <= UINT32_MAX);
995 }
996
997#ifdef VBOX_WITH_RAW_MODE_NOT_R0
998 /* Allow interpretation of patch manager code blocks since they can for
999 instance throw #PFs for perfectly good reasons. */
1000 if (pIemCpu->fInPatchCode)
1001 {
1002 size_t cbRead = 0;
1003 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1004 AssertRCReturn(rc, rc);
1005 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1006 return VINF_SUCCESS;
1007 }
1008#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1009
1010 RTGCPHYS GCPhys;
1011 uint64_t fFlags;
1012 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1013 if (RT_FAILURE(rc))
1014 {
1015 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1016 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1017 }
1018 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1019 {
1020 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1021 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1022 }
1023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1024 {
1025 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1026 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1029 /** @todo Check reserved bits and such stuff. PGM is better at doing
1030 * that, so do it when implementing the guest virtual address
1031 * TLB... */
1032
1033#ifdef IEM_VERIFICATION_MODE_FULL
1034 /*
1035 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1036 * instruction.
1037 */
1038 /** @todo optimize this differently by not using PGMPhysRead. */
1039 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1040 pIemCpu->GCPhysOpcodes = GCPhys;
1041 if ( offPrevOpcodes < cbOldOpcodes
1042 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1043 {
1044 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1045 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1046 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1047 pIemCpu->cbOpcode = cbNew;
1048 return VINF_SUCCESS;
1049 }
1050#endif
1051
1052 /*
1053 * Read the bytes at this address.
1054 */
1055 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1056#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1057 size_t cbActual;
1058 if ( PATMIsEnabled(pVM)
1059 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1060 {
1061 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1062 Assert(cbActual > 0);
1063 pIemCpu->cbOpcode = (uint8_t)cbActual;
1064 }
1065 else
1066#endif
1067 {
1068 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1069 if (cbToTryRead > cbLeftOnPage)
1070 cbToTryRead = cbLeftOnPage;
1071 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1072 cbToTryRead = sizeof(pIemCpu->abOpcode);
1073
1074 if (!pIemCpu->fBypassHandlers)
1075 {
1076 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1077 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1078 { /* likely */ }
1079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1080 {
1081 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1082 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1083 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1091 return rcStrict;
1092 }
1093 }
1094 else
1095 {
1096 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1097 if (RT_SUCCESS(rc))
1098 { /* likely */ }
1099 else
1100 {
1101 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1102 GCPtrPC, GCPhys, rc, cbToTryRead));
1103 return rc;
1104 }
1105 }
1106 pIemCpu->cbOpcode = cbToTryRead;
1107 }
1108
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1115 * exception if it fails.
1116 *
1117 * @returns Strict VBox status code.
1118 * @param pIemCpu The IEM state.
1119 * @param cbMin The minimum number of bytes relative offOpcode
1120 * that must be read.
1121 */
1122IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1123{
1124 /*
1125 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1126 *
1127 * First translate CS:rIP to a physical address.
1128 */
1129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1130 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1131 uint32_t cbToTryRead;
1132 RTGCPTR GCPtrNext;
1133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1134 {
1135 cbToTryRead = PAGE_SIZE;
1136 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1137 if (!IEM_IS_CANONICAL(GCPtrNext))
1138 return iemRaiseGeneralProtectionFault0(pIemCpu);
1139 }
1140 else
1141 {
1142 uint32_t GCPtrNext32 = pCtx->eip;
1143 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1144 GCPtrNext32 += pIemCpu->cbOpcode;
1145 if (GCPtrNext32 > pCtx->cs.u32Limit)
1146 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1147 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1148 if (!cbToTryRead) /* overflowed */
1149 {
1150 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1151 cbToTryRead = UINT32_MAX;
1152 /** @todo check out wrapping around the code segment. */
1153 }
1154 if (cbToTryRead < cbMin - cbLeft)
1155 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1156 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1157 }
1158
1159 /* Only read up to the end of the page, and make sure we don't read more
1160 than the opcode buffer can hold. */
1161 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1162 if (cbToTryRead > cbLeftOnPage)
1163 cbToTryRead = cbLeftOnPage;
1164 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1165 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1166/** @todo r=bird: Convert assertion into undefined opcode exception? */
1167 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1168
1169#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1170 /* Allow interpretation of patch manager code blocks since they can for
1171 instance throw #PFs for perfectly good reasons. */
1172 if (pIemCpu->fInPatchCode)
1173 {
1174 size_t cbRead = 0;
1175 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1176 AssertRCReturn(rc, rc);
1177 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1178 return VINF_SUCCESS;
1179 }
1180#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1181
1182 RTGCPHYS GCPhys;
1183 uint64_t fFlags;
1184 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1185 if (RT_FAILURE(rc))
1186 {
1187 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1188 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1189 }
1190 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1191 {
1192 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1193 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1194 }
1195 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1196 {
1197 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1198 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1199 }
1200 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1201 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1202 /** @todo Check reserved bits and such stuff. PGM is better at doing
1203 * that, so do it when implementing the guest virtual address
1204 * TLB... */
1205
1206 /*
1207 * Read the bytes at this address.
1208 *
1209 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1210 * and since PATM should only patch the start of an instruction there
1211 * should be no need to check again here.
1212 */
1213 if (!pIemCpu->fBypassHandlers)
1214 {
1215 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1216 cbToTryRead, PGMACCESSORIGIN_IEM);
1217 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1218 { /* likely */ }
1219 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1220 {
1221 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1222 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1223 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1224 }
1225 else
1226 {
1227 Log((RT_SUCCESS(rcStrict)
1228 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1229 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1230 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1231 return rcStrict;
1232 }
1233 }
1234 else
1235 {
1236 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1237 if (RT_SUCCESS(rc))
1238 { /* likely */ }
1239 else
1240 {
1241 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1242 return rc;
1243 }
1244 }
1245 pIemCpu->cbOpcode += cbToTryRead;
1246 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1247
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pIemCpu The IEM state.
1257 * @param pb Where to return the opcode byte.
1258 */
1259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1260{
1261 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1262 if (rcStrict == VINF_SUCCESS)
1263 {
1264 uint8_t offOpcode = pIemCpu->offOpcode;
1265 *pb = pIemCpu->abOpcode[offOpcode];
1266 pIemCpu->offOpcode = offOpcode + 1;
1267 }
1268 else
1269 *pb = 0;
1270 return rcStrict;
1271}
1272
1273
1274/**
1275 * Fetches the next opcode byte.
1276 *
1277 * @returns Strict VBox status code.
1278 * @param pIemCpu The IEM state.
1279 * @param pu8 Where to return the opcode byte.
1280 */
1281DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1282{
1283 uint8_t const offOpcode = pIemCpu->offOpcode;
1284 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1285 {
1286 *pu8 = pIemCpu->abOpcode[offOpcode];
1287 pIemCpu->offOpcode = offOpcode + 1;
1288 return VINF_SUCCESS;
1289 }
1290 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1291}
1292
1293
1294/**
1295 * Fetches the next opcode byte, returns automatically on failure.
1296 *
1297 * @param a_pu8 Where to return the opcode byte.
1298 * @remark Implicitly references pIemCpu.
1299 */
1300#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1301 do \
1302 { \
1303 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1304 if (rcStrict2 != VINF_SUCCESS) \
1305 return rcStrict2; \
1306 } while (0)
1307
1308
1309/**
1310 * Fetches the next signed byte from the opcode stream.
1311 *
1312 * @returns Strict VBox status code.
1313 * @param pIemCpu The IEM state.
1314 * @param pi8 Where to return the signed byte.
1315 */
1316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1317{
1318 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1319}
1320
1321
1322/**
1323 * Fetches the next signed byte from the opcode stream, returning automatically
1324 * on failure.
1325 *
1326 * @param a_pi8 Where to return the signed byte.
1327 * @remark Implicitly references pIemCpu.
1328 */
1329#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1330 do \
1331 { \
1332 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1333 if (rcStrict2 != VINF_SUCCESS) \
1334 return rcStrict2; \
1335 } while (0)
1336
1337
1338/**
1339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pIemCpu The IEM state.
1343 * @param pu16 Where to return the opcode dword.
1344 */
1345DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1346{
1347 uint8_t u8;
1348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1349 if (rcStrict == VINF_SUCCESS)
1350 *pu16 = (int8_t)u8;
1351 return rcStrict;
1352}
1353
1354
1355/**
1356 * Fetches the next signed byte from the opcode stream, extending it to
1357 * unsigned 16-bit.
1358 *
1359 * @returns Strict VBox status code.
1360 * @param pIemCpu The IEM state.
1361 * @param pu16 Where to return the unsigned word.
1362 */
1363DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1364{
1365 uint8_t const offOpcode = pIemCpu->offOpcode;
1366 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1367 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1368
1369 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1370 pIemCpu->offOpcode = offOpcode + 1;
1371 return VINF_SUCCESS;
1372}
1373
1374
1375/**
1376 * Fetches the next signed byte from the opcode stream and sign-extending it to
1377 * a word, returning automatically on failure.
1378 *
1379 * @param a_pu16 Where to return the word.
1380 * @remark Implicitly references pIemCpu.
1381 */
1382#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1383 do \
1384 { \
1385 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1386 if (rcStrict2 != VINF_SUCCESS) \
1387 return rcStrict2; \
1388 } while (0)
1389
1390
1391/**
1392 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1393 *
1394 * @returns Strict VBox status code.
1395 * @param pIemCpu The IEM state.
1396 * @param pu32 Where to return the opcode dword.
1397 */
1398DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1399{
1400 uint8_t u8;
1401 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1402 if (rcStrict == VINF_SUCCESS)
1403 *pu32 = (int8_t)u8;
1404 return rcStrict;
1405}
1406
1407
1408/**
1409 * Fetches the next signed byte from the opcode stream, extending it to
1410 * unsigned 32-bit.
1411 *
1412 * @returns Strict VBox status code.
1413 * @param pIemCpu The IEM state.
1414 * @param pu32 Where to return the unsigned dword.
1415 */
1416DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1417{
1418 uint8_t const offOpcode = pIemCpu->offOpcode;
1419 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1420 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1421
1422 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1423 pIemCpu->offOpcode = offOpcode + 1;
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Fetches the next signed byte from the opcode stream and sign-extending it to
1430 * a word, returning automatically on failure.
1431 *
1432 * @param a_pu32 Where to return the word.
1433 * @remark Implicitly references pIemCpu.
1434 */
1435#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1436 do \
1437 { \
1438 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1439 if (rcStrict2 != VINF_SUCCESS) \
1440 return rcStrict2; \
1441 } while (0)
1442
1443
1444/**
1445 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1446 *
1447 * @returns Strict VBox status code.
1448 * @param pIemCpu The IEM state.
1449 * @param pu64 Where to return the opcode qword.
1450 */
1451DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1452{
1453 uint8_t u8;
1454 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1455 if (rcStrict == VINF_SUCCESS)
1456 *pu64 = (int8_t)u8;
1457 return rcStrict;
1458}
1459
1460
1461/**
1462 * Fetches the next signed byte from the opcode stream, extending it to
1463 * unsigned 64-bit.
1464 *
1465 * @returns Strict VBox status code.
1466 * @param pIemCpu The IEM state.
1467 * @param pu64 Where to return the unsigned qword.
1468 */
1469DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1470{
1471 uint8_t const offOpcode = pIemCpu->offOpcode;
1472 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1473 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1474
1475 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1476 pIemCpu->offOpcode = offOpcode + 1;
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Fetches the next signed byte from the opcode stream and sign-extending it to
1483 * a word, returning automatically on failure.
1484 *
1485 * @param a_pu64 Where to return the word.
1486 * @remark Implicitly references pIemCpu.
1487 */
1488#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1489 do \
1490 { \
1491 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1492 if (rcStrict2 != VINF_SUCCESS) \
1493 return rcStrict2; \
1494 } while (0)
1495
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pIemCpu The IEM state.
1502 * @param pu16 Where to return the opcode word.
1503 */
1504DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1505{
1506 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1507 if (rcStrict == VINF_SUCCESS)
1508 {
1509 uint8_t offOpcode = pIemCpu->offOpcode;
1510 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1511 pIemCpu->offOpcode = offOpcode + 2;
1512 }
1513 else
1514 *pu16 = 0;
1515 return rcStrict;
1516}
1517
1518
1519/**
1520 * Fetches the next opcode word.
1521 *
1522 * @returns Strict VBox status code.
1523 * @param pIemCpu The IEM state.
1524 * @param pu16 Where to return the opcode word.
1525 */
1526DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1527{
1528 uint8_t const offOpcode = pIemCpu->offOpcode;
1529 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1530 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1531
1532 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1533 pIemCpu->offOpcode = offOpcode + 2;
1534 return VINF_SUCCESS;
1535}
1536
1537
1538/**
1539 * Fetches the next opcode word, returns automatically on failure.
1540 *
1541 * @param a_pu16 Where to return the opcode word.
1542 * @remark Implicitly references pIemCpu.
1543 */
1544#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1545 do \
1546 { \
1547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1548 if (rcStrict2 != VINF_SUCCESS) \
1549 return rcStrict2; \
1550 } while (0)
1551
1552
1553/**
1554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1555 *
1556 * @returns Strict VBox status code.
1557 * @param pIemCpu The IEM state.
1558 * @param pu32 Where to return the opcode double word.
1559 */
1560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1561{
1562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1563 if (rcStrict == VINF_SUCCESS)
1564 {
1565 uint8_t offOpcode = pIemCpu->offOpcode;
1566 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1567 pIemCpu->offOpcode = offOpcode + 2;
1568 }
1569 else
1570 *pu32 = 0;
1571 return rcStrict;
1572}
1573
1574
1575/**
1576 * Fetches the next opcode word, zero extending it to a double word.
1577 *
1578 * @returns Strict VBox status code.
1579 * @param pIemCpu The IEM state.
1580 * @param pu32 Where to return the opcode double word.
1581 */
1582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1583{
1584 uint8_t const offOpcode = pIemCpu->offOpcode;
1585 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1586 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1587
1588 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1589 pIemCpu->offOpcode = offOpcode + 2;
1590 return VINF_SUCCESS;
1591}
1592
1593
1594/**
1595 * Fetches the next opcode word and zero extends it to a double word, returns
1596 * automatically on failure.
1597 *
1598 * @param a_pu32 Where to return the opcode double word.
1599 * @remark Implicitly references pIemCpu.
1600 */
1601#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1602 do \
1603 { \
1604 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1605 if (rcStrict2 != VINF_SUCCESS) \
1606 return rcStrict2; \
1607 } while (0)
1608
1609
1610/**
1611 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1612 *
1613 * @returns Strict VBox status code.
1614 * @param pIemCpu The IEM state.
1615 * @param pu64 Where to return the opcode quad word.
1616 */
1617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1618{
1619 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1620 if (rcStrict == VINF_SUCCESS)
1621 {
1622 uint8_t offOpcode = pIemCpu->offOpcode;
1623 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1624 pIemCpu->offOpcode = offOpcode + 2;
1625 }
1626 else
1627 *pu64 = 0;
1628 return rcStrict;
1629}
1630
1631
1632/**
1633 * Fetches the next opcode word, zero extending it to a quad word.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu64 Where to return the opcode quad word.
1638 */
1639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1640{
1641 uint8_t const offOpcode = pIemCpu->offOpcode;
1642 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1643 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1644
1645 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1646 pIemCpu->offOpcode = offOpcode + 2;
1647 return VINF_SUCCESS;
1648}
1649
1650
1651/**
1652 * Fetches the next opcode word and zero extends it to a quad word, returns
1653 * automatically on failure.
1654 *
1655 * @param a_pu64 Where to return the opcode quad word.
1656 * @remark Implicitly references pIemCpu.
1657 */
1658#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1659 do \
1660 { \
1661 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1662 if (rcStrict2 != VINF_SUCCESS) \
1663 return rcStrict2; \
1664 } while (0)
1665
1666
1667/**
1668 * Fetches the next signed word from the opcode stream.
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pIemCpu The IEM state.
1672 * @param pi16 Where to return the signed word.
1673 */
1674DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1675{
1676 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1677}
1678
1679
1680/**
1681 * Fetches the next signed word from the opcode stream, returning automatically
1682 * on failure.
1683 *
1684 * @param a_pi16 Where to return the signed word.
1685 * @remark Implicitly references pIemCpu.
1686 */
1687#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1688 do \
1689 { \
1690 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1691 if (rcStrict2 != VINF_SUCCESS) \
1692 return rcStrict2; \
1693 } while (0)
1694
1695
1696/**
1697 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1698 *
1699 * @returns Strict VBox status code.
1700 * @param pIemCpu The IEM state.
1701 * @param pu32 Where to return the opcode dword.
1702 */
1703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1704{
1705 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1706 if (rcStrict == VINF_SUCCESS)
1707 {
1708 uint8_t offOpcode = pIemCpu->offOpcode;
1709 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1710 pIemCpu->abOpcode[offOpcode + 1],
1711 pIemCpu->abOpcode[offOpcode + 2],
1712 pIemCpu->abOpcode[offOpcode + 3]);
1713 pIemCpu->offOpcode = offOpcode + 4;
1714 }
1715 else
1716 *pu32 = 0;
1717 return rcStrict;
1718}
1719
1720
1721/**
1722 * Fetches the next opcode dword.
1723 *
1724 * @returns Strict VBox status code.
1725 * @param pIemCpu The IEM state.
1726 * @param pu32 Where to return the opcode double word.
1727 */
1728DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1729{
1730 uint8_t const offOpcode = pIemCpu->offOpcode;
1731 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1732 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1733
1734 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1735 pIemCpu->abOpcode[offOpcode + 1],
1736 pIemCpu->abOpcode[offOpcode + 2],
1737 pIemCpu->abOpcode[offOpcode + 3]);
1738 pIemCpu->offOpcode = offOpcode + 4;
1739 return VINF_SUCCESS;
1740}
1741
1742
1743/**
1744 * Fetches the next opcode dword, returns automatically on failure.
1745 *
1746 * @param a_pu32 Where to return the opcode dword.
1747 * @remark Implicitly references pIemCpu.
1748 */
1749#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1750 do \
1751 { \
1752 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1753 if (rcStrict2 != VINF_SUCCESS) \
1754 return rcStrict2; \
1755 } while (0)
1756
1757
1758/**
1759 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1760 *
1761 * @returns Strict VBox status code.
1762 * @param pIemCpu The IEM state.
1763 * @param pu64 Where to return the opcode dword.
1764 */
1765DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1766{
1767 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1768 if (rcStrict == VINF_SUCCESS)
1769 {
1770 uint8_t offOpcode = pIemCpu->offOpcode;
1771 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1772 pIemCpu->abOpcode[offOpcode + 1],
1773 pIemCpu->abOpcode[offOpcode + 2],
1774 pIemCpu->abOpcode[offOpcode + 3]);
1775 pIemCpu->offOpcode = offOpcode + 4;
1776 }
1777 else
1778 *pu64 = 0;
1779 return rcStrict;
1780}
1781
1782
1783/**
1784 * Fetches the next opcode dword, zero extending it to a quad word.
1785 *
1786 * @returns Strict VBox status code.
1787 * @param pIemCpu The IEM state.
1788 * @param pu64 Where to return the opcode quad word.
1789 */
1790DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1791{
1792 uint8_t const offOpcode = pIemCpu->offOpcode;
1793 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1794 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1795
1796 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1797 pIemCpu->abOpcode[offOpcode + 1],
1798 pIemCpu->abOpcode[offOpcode + 2],
1799 pIemCpu->abOpcode[offOpcode + 3]);
1800 pIemCpu->offOpcode = offOpcode + 4;
1801 return VINF_SUCCESS;
1802}
1803
1804
1805/**
1806 * Fetches the next opcode dword and zero extends it to a quad word, returns
1807 * automatically on failure.
1808 *
1809 * @param a_pu64 Where to return the opcode quad word.
1810 * @remark Implicitly references pIemCpu.
1811 */
1812#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1813 do \
1814 { \
1815 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1816 if (rcStrict2 != VINF_SUCCESS) \
1817 return rcStrict2; \
1818 } while (0)
1819
1820
1821/**
1822 * Fetches the next signed double word from the opcode stream.
1823 *
1824 * @returns Strict VBox status code.
1825 * @param pIemCpu The IEM state.
1826 * @param pi32 Where to return the signed double word.
1827 */
1828DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1829{
1830 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1831}
1832
1833/**
1834 * Fetches the next signed double word from the opcode stream, returning
1835 * automatically on failure.
1836 *
1837 * @param a_pi32 Where to return the signed double word.
1838 * @remark Implicitly references pIemCpu.
1839 */
1840#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1841 do \
1842 { \
1843 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1844 if (rcStrict2 != VINF_SUCCESS) \
1845 return rcStrict2; \
1846 } while (0)
1847
1848
1849/**
1850 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1851 *
1852 * @returns Strict VBox status code.
1853 * @param pIemCpu The IEM state.
1854 * @param pu64 Where to return the opcode qword.
1855 */
1856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1857{
1858 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1859 if (rcStrict == VINF_SUCCESS)
1860 {
1861 uint8_t offOpcode = pIemCpu->offOpcode;
1862 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1863 pIemCpu->abOpcode[offOpcode + 1],
1864 pIemCpu->abOpcode[offOpcode + 2],
1865 pIemCpu->abOpcode[offOpcode + 3]);
1866 pIemCpu->offOpcode = offOpcode + 4;
1867 }
1868 else
1869 *pu64 = 0;
1870 return rcStrict;
1871}
1872
1873
1874/**
1875 * Fetches the next opcode dword, sign extending it into a quad word.
1876 *
1877 * @returns Strict VBox status code.
1878 * @param pIemCpu The IEM state.
1879 * @param pu64 Where to return the opcode quad word.
1880 */
1881DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1882{
1883 uint8_t const offOpcode = pIemCpu->offOpcode;
1884 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1885 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1886
1887 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1888 pIemCpu->abOpcode[offOpcode + 1],
1889 pIemCpu->abOpcode[offOpcode + 2],
1890 pIemCpu->abOpcode[offOpcode + 3]);
1891 *pu64 = i32;
1892 pIemCpu->offOpcode = offOpcode + 4;
1893 return VINF_SUCCESS;
1894}
1895
1896
1897/**
1898 * Fetches the next opcode double word and sign extends it to a quad word,
1899 * returns automatically on failure.
1900 *
1901 * @param a_pu64 Where to return the opcode quad word.
1902 * @remark Implicitly references pIemCpu.
1903 */
1904#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1905 do \
1906 { \
1907 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1908 if (rcStrict2 != VINF_SUCCESS) \
1909 return rcStrict2; \
1910 } while (0)
1911
1912
1913/**
1914 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1915 *
1916 * @returns Strict VBox status code.
1917 * @param pIemCpu The IEM state.
1918 * @param pu64 Where to return the opcode qword.
1919 */
1920DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1921{
1922 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1923 if (rcStrict == VINF_SUCCESS)
1924 {
1925 uint8_t offOpcode = pIemCpu->offOpcode;
1926 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1927 pIemCpu->abOpcode[offOpcode + 1],
1928 pIemCpu->abOpcode[offOpcode + 2],
1929 pIemCpu->abOpcode[offOpcode + 3],
1930 pIemCpu->abOpcode[offOpcode + 4],
1931 pIemCpu->abOpcode[offOpcode + 5],
1932 pIemCpu->abOpcode[offOpcode + 6],
1933 pIemCpu->abOpcode[offOpcode + 7]);
1934 pIemCpu->offOpcode = offOpcode + 8;
1935 }
1936 else
1937 *pu64 = 0;
1938 return rcStrict;
1939}
1940
1941
1942/**
1943 * Fetches the next opcode qword.
1944 *
1945 * @returns Strict VBox status code.
1946 * @param pIemCpu The IEM state.
1947 * @param pu64 Where to return the opcode qword.
1948 */
1949DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1950{
1951 uint8_t const offOpcode = pIemCpu->offOpcode;
1952 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1953 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1954
1955 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1956 pIemCpu->abOpcode[offOpcode + 1],
1957 pIemCpu->abOpcode[offOpcode + 2],
1958 pIemCpu->abOpcode[offOpcode + 3],
1959 pIemCpu->abOpcode[offOpcode + 4],
1960 pIemCpu->abOpcode[offOpcode + 5],
1961 pIemCpu->abOpcode[offOpcode + 6],
1962 pIemCpu->abOpcode[offOpcode + 7]);
1963 pIemCpu->offOpcode = offOpcode + 8;
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Fetches the next opcode quad word, returns automatically on failure.
1970 *
1971 * @param a_pu64 Where to return the opcode quad word.
1972 * @remark Implicitly references pIemCpu.
1973 */
1974#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1975 do \
1976 { \
1977 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1978 if (rcStrict2 != VINF_SUCCESS) \
1979 return rcStrict2; \
1980 } while (0)
1981
1982
1983/** @name Misc Worker Functions.
1984 * @{
1985 */
1986
1987
1988/**
1989 * Validates a new SS segment.
1990 *
1991 * @returns VBox strict status code.
1992 * @param pIemCpu The IEM per CPU instance data.
1993 * @param pCtx The CPU context.
1994 * @param NewSS The new SS selctor.
1995 * @param uCpl The CPL to load the stack for.
1996 * @param pDesc Where to return the descriptor.
1997 */
1998IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1999{
2000 NOREF(pCtx);
2001
2002 /* Null selectors are not allowed (we're not called for dispatching
2003 interrupts with SS=0 in long mode). */
2004 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2005 {
2006 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2007 return iemRaiseTaskSwitchFault0(pIemCpu);
2008 }
2009
2010 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2011 if ((NewSS & X86_SEL_RPL) != uCpl)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2014 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2015 }
2016
2017 /*
2018 * Read the descriptor.
2019 */
2020 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2021 if (rcStrict != VINF_SUCCESS)
2022 return rcStrict;
2023
2024 /*
2025 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2026 */
2027 if (!pDesc->Legacy.Gen.u1DescType)
2028 {
2029 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2030 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2031 }
2032
2033 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2034 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2035 {
2036 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2037 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2038 }
2039 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2040 {
2041 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2042 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2043 }
2044
2045 /* Is it there? */
2046 /** @todo testcase: Is this checked before the canonical / limit check below? */
2047 if (!pDesc->Legacy.Gen.u1Present)
2048 {
2049 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2050 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2051 }
2052
2053 return VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2059 * not.
2060 *
2061 * @param a_pIemCpu The IEM per CPU data.
2062 * @param a_pCtx The CPU context.
2063 */
2064#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2065# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2066 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2067 ? (a_pCtx)->eflags.u \
2068 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2069#else
2070# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2071 ( (a_pCtx)->eflags.u )
2072#endif
2073
2074/**
2075 * Updates the EFLAGS in the correct manner wrt. PATM.
2076 *
2077 * @param a_pIemCpu The IEM per CPU data.
2078 * @param a_pCtx The CPU context.
2079 * @param a_fEfl The new EFLAGS.
2080 */
2081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2082# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2083 do { \
2084 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2085 (a_pCtx)->eflags.u = (a_fEfl); \
2086 else \
2087 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2088 } while (0)
2089#else
2090# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2091 do { \
2092 (a_pCtx)->eflags.u = (a_fEfl); \
2093 } while (0)
2094#endif
2095
2096
2097/** @} */
2098
2099/** @name Raising Exceptions.
2100 *
2101 * @{
2102 */
2103
2104/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2105 * @{ */
2106/** CPU exception. */
2107#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2108/** External interrupt (from PIC, APIC, whatever). */
2109#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2110/** Software interrupt (int or into, not bound).
2111 * Returns to the following instruction */
2112#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2113/** Takes an error code. */
2114#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2115/** Takes a CR2. */
2116#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2117/** Generated by the breakpoint instruction. */
2118#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2119/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2120#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2121/** @} */
2122
2123
2124/**
2125 * Loads the specified stack far pointer from the TSS.
2126 *
2127 * @returns VBox strict status code.
2128 * @param pIemCpu The IEM per CPU instance data.
2129 * @param pCtx The CPU context.
2130 * @param uCpl The CPL to load the stack for.
2131 * @param pSelSS Where to return the new stack segment.
2132 * @param puEsp Where to return the new stack pointer.
2133 */
2134IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2135 PRTSEL pSelSS, uint32_t *puEsp)
2136{
2137 VBOXSTRICTRC rcStrict;
2138 Assert(uCpl < 4);
2139
2140 switch (pCtx->tr.Attr.n.u4Type)
2141 {
2142 /*
2143 * 16-bit TSS (X86TSS16).
2144 */
2145 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2146 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2147 {
2148 uint32_t off = uCpl * 4 + 2;
2149 if (off + 4 > pCtx->tr.u32Limit)
2150 {
2151 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2152 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2153 }
2154
2155/** @todo check actual access pattern here. */
2156 uint32_t u32Tmp = 0; /* gcc maybe... */
2157 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2158 if (rcStrict == VINF_SUCCESS)
2159 {
2160 *puEsp = RT_LOWORD(u32Tmp);
2161 *pSelSS = RT_HIWORD(u32Tmp);
2162 return VINF_SUCCESS;
2163 }
2164 break;
2165 }
2166
2167 /*
2168 * 32-bit TSS (X86TSS32).
2169 */
2170 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2171 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2172 {
2173 uint32_t off = uCpl * 8 + 4;
2174 if (off + 7 > pCtx->tr.u32Limit)
2175 {
2176 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2177 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2178 }
2179
2180/** @todo check actual access pattern here. */
2181 uint64_t u64Tmp;
2182 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2183 if (rcStrict == VINF_SUCCESS)
2184 {
2185 *puEsp = u64Tmp & UINT32_MAX;
2186 *pSelSS = (RTSEL)(u64Tmp >> 32);
2187 return VINF_SUCCESS;
2188 }
2189 break;
2190 }
2191
2192 default:
2193 AssertFailed();
2194 rcStrict = VERR_IEM_IPE_4;
2195 break;
2196 }
2197
2198 *puEsp = 0; /* make gcc happy */
2199 *pSelSS = 0; /* make gcc happy */
2200 return rcStrict;
2201}
2202
2203
2204/**
2205 * Loads the specified stack pointer from the 64-bit TSS.
2206 *
2207 * @returns VBox strict status code.
2208 * @param pIemCpu The IEM per CPU instance data.
2209 * @param pCtx The CPU context.
2210 * @param uCpl The CPL to load the stack for.
2211 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2212 * @param puRsp Where to return the new stack pointer.
2213 */
2214IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2215{
2216 Assert(uCpl < 4);
2217 Assert(uIst < 8);
2218 *puRsp = 0; /* make gcc happy */
2219
2220 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2221
2222 uint32_t off;
2223 if (uIst)
2224 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2225 else
2226 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2227 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2228 {
2229 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2230 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2231 }
2232
2233 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2234}
2235
2236
2237/**
2238 * Adjust the CPU state according to the exception being raised.
2239 *
2240 * @param pCtx The CPU context.
2241 * @param u8Vector The exception that has been raised.
2242 */
2243DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2244{
2245 switch (u8Vector)
2246 {
2247 case X86_XCPT_DB:
2248 pCtx->dr[7] &= ~X86_DR7_GD;
2249 break;
2250 /** @todo Read the AMD and Intel exception reference... */
2251 }
2252}
2253
2254
2255/**
2256 * Implements exceptions and interrupts for real mode.
2257 *
2258 * @returns VBox strict status code.
2259 * @param pIemCpu The IEM per CPU instance data.
2260 * @param pCtx The CPU context.
2261 * @param cbInstr The number of bytes to offset rIP by in the return
2262 * address.
2263 * @param u8Vector The interrupt / exception vector number.
2264 * @param fFlags The flags.
2265 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2266 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2267 */
2268IEM_STATIC VBOXSTRICTRC
2269iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2270 PCPUMCTX pCtx,
2271 uint8_t cbInstr,
2272 uint8_t u8Vector,
2273 uint32_t fFlags,
2274 uint16_t uErr,
2275 uint64_t uCr2)
2276{
2277 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2278 NOREF(uErr); NOREF(uCr2);
2279
2280 /*
2281 * Read the IDT entry.
2282 */
2283 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2284 {
2285 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2286 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2287 }
2288 RTFAR16 Idte;
2289 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2290 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2291 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2292 return rcStrict;
2293
2294 /*
2295 * Push the stack frame.
2296 */
2297 uint16_t *pu16Frame;
2298 uint64_t uNewRsp;
2299 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2300 if (rcStrict != VINF_SUCCESS)
2301 return rcStrict;
2302
2303 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2304 pu16Frame[2] = (uint16_t)fEfl;
2305 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2306 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2307 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2308 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2309 return rcStrict;
2310
2311 /*
2312 * Load the vector address into cs:ip and make exception specific state
2313 * adjustments.
2314 */
2315 pCtx->cs.Sel = Idte.sel;
2316 pCtx->cs.ValidSel = Idte.sel;
2317 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2318 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2319 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2320 pCtx->rip = Idte.off;
2321 fEfl &= ~X86_EFL_IF;
2322 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2323
2324 /** @todo do we actually do this in real mode? */
2325 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2326 iemRaiseXcptAdjustState(pCtx, u8Vector);
2327
2328 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2329}
2330
2331
2332/**
2333 * Loads a NULL data selector into when coming from V8086 mode.
2334 *
2335 * @param pIemCpu The IEM per CPU instance data.
2336 * @param pSReg Pointer to the segment register.
2337 */
2338IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2339{
2340 pSReg->Sel = 0;
2341 pSReg->ValidSel = 0;
2342 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2343 {
2344 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2345 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2346 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2347 }
2348 else
2349 {
2350 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2351 /** @todo check this on AMD-V */
2352 pSReg->u64Base = 0;
2353 pSReg->u32Limit = 0;
2354 }
2355}
2356
2357
2358/**
2359 * Loads a segment selector during a task switch in V8086 mode.
2360 *
2361 * @param pIemCpu The IEM per CPU instance data.
2362 * @param pSReg Pointer to the segment register.
2363 * @param uSel The selector value to load.
2364 */
2365IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2366{
2367 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2368 pSReg->Sel = uSel;
2369 pSReg->ValidSel = uSel;
2370 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2371 pSReg->u64Base = uSel << 4;
2372 pSReg->u32Limit = 0xffff;
2373 pSReg->Attr.u = 0xf3;
2374}
2375
2376
2377/**
2378 * Loads a NULL data selector into a selector register, both the hidden and
2379 * visible parts, in protected mode.
2380 *
2381 * @param pIemCpu The IEM state of the calling EMT.
2382 * @param pSReg Pointer to the segment register.
2383 * @param uRpl The RPL.
2384 */
2385IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2386{
2387 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2388 * data selector in protected mode. */
2389 pSReg->Sel = uRpl;
2390 pSReg->ValidSel = uRpl;
2391 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2392 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2393 {
2394 /* VT-x (Intel 3960x) observed doing something like this. */
2395 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2396 pSReg->u32Limit = UINT32_MAX;
2397 pSReg->u64Base = 0;
2398 }
2399 else
2400 {
2401 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2402 pSReg->u32Limit = 0;
2403 pSReg->u64Base = 0;
2404 }
2405}
2406
2407
2408/**
2409 * Loads a segment selector during a task switch in protected mode.
2410 *
2411 * In this task switch scenario, we would throw \#TS exceptions rather than
2412 * \#GPs.
2413 *
2414 * @returns VBox strict status code.
2415 * @param pIemCpu The IEM per CPU instance data.
2416 * @param pSReg Pointer to the segment register.
2417 * @param uSel The new selector value.
2418 *
2419 * @remarks This does _not_ handle CS or SS.
2420 * @remarks This expects pIemCpu->uCpl to be up to date.
2421 */
2422IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2423{
2424 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2425
2426 /* Null data selector. */
2427 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2428 {
2429 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2430 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2431 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2432 return VINF_SUCCESS;
2433 }
2434
2435 /* Fetch the descriptor. */
2436 IEMSELDESC Desc;
2437 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2438 if (rcStrict != VINF_SUCCESS)
2439 {
2440 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2441 VBOXSTRICTRC_VAL(rcStrict)));
2442 return rcStrict;
2443 }
2444
2445 /* Must be a data segment or readable code segment. */
2446 if ( !Desc.Legacy.Gen.u1DescType
2447 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2448 {
2449 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2450 Desc.Legacy.Gen.u4Type));
2451 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2452 }
2453
2454 /* Check privileges for data segments and non-conforming code segments. */
2455 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2456 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2457 {
2458 /* The RPL and the new CPL must be less than or equal to the DPL. */
2459 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2460 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2461 {
2462 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2463 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2464 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2465 }
2466 }
2467
2468 /* Is it there? */
2469 if (!Desc.Legacy.Gen.u1Present)
2470 {
2471 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2472 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2473 }
2474
2475 /* The base and limit. */
2476 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2477 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2478
2479 /*
2480 * Ok, everything checked out fine. Now set the accessed bit before
2481 * committing the result into the registers.
2482 */
2483 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2484 {
2485 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2486 if (rcStrict != VINF_SUCCESS)
2487 return rcStrict;
2488 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2489 }
2490
2491 /* Commit */
2492 pSReg->Sel = uSel;
2493 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2494 pSReg->u32Limit = cbLimit;
2495 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2496 pSReg->ValidSel = uSel;
2497 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2498 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2499 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2500
2501 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2502 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2503 return VINF_SUCCESS;
2504}
2505
2506
2507/**
2508 * Performs a task switch.
2509 *
2510 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2511 * caller is responsible for performing the necessary checks (like DPL, TSS
2512 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2513 * reference for JMP, CALL, IRET.
2514 *
2515 * If the task switch is the due to a software interrupt or hardware exception,
2516 * the caller is responsible for validating the TSS selector and descriptor. See
2517 * Intel Instruction reference for INT n.
2518 *
2519 * @returns VBox strict status code.
2520 * @param pIemCpu The IEM per CPU instance data.
2521 * @param pCtx The CPU context.
2522 * @param enmTaskSwitch What caused this task switch.
2523 * @param uNextEip The EIP effective after the task switch.
2524 * @param fFlags The flags.
2525 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2526 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2527 * @param SelTSS The TSS selector of the new task.
2528 * @param pNewDescTSS Pointer to the new TSS descriptor.
2529 */
2530IEM_STATIC VBOXSTRICTRC
2531iemTaskSwitch(PIEMCPU pIemCpu,
2532 PCPUMCTX pCtx,
2533 IEMTASKSWITCH enmTaskSwitch,
2534 uint32_t uNextEip,
2535 uint32_t fFlags,
2536 uint16_t uErr,
2537 uint64_t uCr2,
2538 RTSEL SelTSS,
2539 PIEMSELDESC pNewDescTSS)
2540{
2541 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2542 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2543
2544 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2545 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2546 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2547 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2548 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2549
2550 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2551 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2552
2553 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2554 fIsNewTSS386, pCtx->eip, uNextEip));
2555
2556 /* Update CR2 in case it's a page-fault. */
2557 /** @todo This should probably be done much earlier in IEM/PGM. See
2558 * @bugref{5653#c49}. */
2559 if (fFlags & IEM_XCPT_FLAGS_CR2)
2560 pCtx->cr2 = uCr2;
2561
2562 /*
2563 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2564 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2565 */
2566 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2567 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2568 if (uNewTSSLimit < uNewTSSLimitMin)
2569 {
2570 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2571 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2572 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2573 }
2574
2575 /*
2576 * Check the current TSS limit. The last written byte to the current TSS during the
2577 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2578 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2579 *
2580 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2581 * end up with smaller than "legal" TSS limits.
2582 */
2583 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2584 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2585 if (uCurTSSLimit < uCurTSSLimitMin)
2586 {
2587 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2588 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2589 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2590 }
2591
2592 /*
2593 * Verify that the new TSS can be accessed and map it. Map only the required contents
2594 * and not the entire TSS.
2595 */
2596 void *pvNewTSS;
2597 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2598 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2599 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2600 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2601 * not perform correct translation if this happens. See Intel spec. 7.2.1
2602 * "Task-State Segment" */
2603 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2604 if (rcStrict != VINF_SUCCESS)
2605 {
2606 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2607 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2608 return rcStrict;
2609 }
2610
2611 /*
2612 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2613 */
2614 uint32_t u32EFlags = pCtx->eflags.u32;
2615 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2616 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2617 {
2618 PX86DESC pDescCurTSS;
2619 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2620 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2621 if (rcStrict != VINF_SUCCESS)
2622 {
2623 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2624 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2625 return rcStrict;
2626 }
2627
2628 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2629 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2630 if (rcStrict != VINF_SUCCESS)
2631 {
2632 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2633 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2634 return rcStrict;
2635 }
2636
2637 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2638 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2639 {
2640 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2641 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2642 u32EFlags &= ~X86_EFL_NT;
2643 }
2644 }
2645
2646 /*
2647 * Save the CPU state into the current TSS.
2648 */
2649 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2650 if (GCPtrNewTSS == GCPtrCurTSS)
2651 {
2652 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2653 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2654 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2655 }
2656 if (fIsNewTSS386)
2657 {
2658 /*
2659 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2660 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2661 */
2662 void *pvCurTSS32;
2663 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2664 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2665 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2666 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2667 if (rcStrict != VINF_SUCCESS)
2668 {
2669 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2670 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2671 return rcStrict;
2672 }
2673
2674 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2675 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2676 pCurTSS32->eip = uNextEip;
2677 pCurTSS32->eflags = u32EFlags;
2678 pCurTSS32->eax = pCtx->eax;
2679 pCurTSS32->ecx = pCtx->ecx;
2680 pCurTSS32->edx = pCtx->edx;
2681 pCurTSS32->ebx = pCtx->ebx;
2682 pCurTSS32->esp = pCtx->esp;
2683 pCurTSS32->ebp = pCtx->ebp;
2684 pCurTSS32->esi = pCtx->esi;
2685 pCurTSS32->edi = pCtx->edi;
2686 pCurTSS32->es = pCtx->es.Sel;
2687 pCurTSS32->cs = pCtx->cs.Sel;
2688 pCurTSS32->ss = pCtx->ss.Sel;
2689 pCurTSS32->ds = pCtx->ds.Sel;
2690 pCurTSS32->fs = pCtx->fs.Sel;
2691 pCurTSS32->gs = pCtx->gs.Sel;
2692
2693 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2694 if (rcStrict != VINF_SUCCESS)
2695 {
2696 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2697 VBOXSTRICTRC_VAL(rcStrict)));
2698 return rcStrict;
2699 }
2700 }
2701 else
2702 {
2703 /*
2704 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2705 */
2706 void *pvCurTSS16;
2707 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2708 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2709 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2710 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2711 if (rcStrict != VINF_SUCCESS)
2712 {
2713 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2714 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2715 return rcStrict;
2716 }
2717
2718 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2719 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2720 pCurTSS16->ip = uNextEip;
2721 pCurTSS16->flags = u32EFlags;
2722 pCurTSS16->ax = pCtx->ax;
2723 pCurTSS16->cx = pCtx->cx;
2724 pCurTSS16->dx = pCtx->dx;
2725 pCurTSS16->bx = pCtx->bx;
2726 pCurTSS16->sp = pCtx->sp;
2727 pCurTSS16->bp = pCtx->bp;
2728 pCurTSS16->si = pCtx->si;
2729 pCurTSS16->di = pCtx->di;
2730 pCurTSS16->es = pCtx->es.Sel;
2731 pCurTSS16->cs = pCtx->cs.Sel;
2732 pCurTSS16->ss = pCtx->ss.Sel;
2733 pCurTSS16->ds = pCtx->ds.Sel;
2734
2735 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2736 if (rcStrict != VINF_SUCCESS)
2737 {
2738 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2739 VBOXSTRICTRC_VAL(rcStrict)));
2740 return rcStrict;
2741 }
2742 }
2743
2744 /*
2745 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2746 */
2747 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2748 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2749 {
2750 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2751 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2752 pNewTSS->selPrev = pCtx->tr.Sel;
2753 }
2754
2755 /*
2756 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2757 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2758 */
2759 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2760 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2761 bool fNewDebugTrap;
2762 if (fIsNewTSS386)
2763 {
2764 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2765 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2766 uNewEip = pNewTSS32->eip;
2767 uNewEflags = pNewTSS32->eflags;
2768 uNewEax = pNewTSS32->eax;
2769 uNewEcx = pNewTSS32->ecx;
2770 uNewEdx = pNewTSS32->edx;
2771 uNewEbx = pNewTSS32->ebx;
2772 uNewEsp = pNewTSS32->esp;
2773 uNewEbp = pNewTSS32->ebp;
2774 uNewEsi = pNewTSS32->esi;
2775 uNewEdi = pNewTSS32->edi;
2776 uNewES = pNewTSS32->es;
2777 uNewCS = pNewTSS32->cs;
2778 uNewSS = pNewTSS32->ss;
2779 uNewDS = pNewTSS32->ds;
2780 uNewFS = pNewTSS32->fs;
2781 uNewGS = pNewTSS32->gs;
2782 uNewLdt = pNewTSS32->selLdt;
2783 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2784 }
2785 else
2786 {
2787 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2788 uNewCr3 = 0;
2789 uNewEip = pNewTSS16->ip;
2790 uNewEflags = pNewTSS16->flags;
2791 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2792 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2793 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2794 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2795 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2796 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2797 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2798 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2799 uNewES = pNewTSS16->es;
2800 uNewCS = pNewTSS16->cs;
2801 uNewSS = pNewTSS16->ss;
2802 uNewDS = pNewTSS16->ds;
2803 uNewFS = 0;
2804 uNewGS = 0;
2805 uNewLdt = pNewTSS16->selLdt;
2806 fNewDebugTrap = false;
2807 }
2808
2809 if (GCPtrNewTSS == GCPtrCurTSS)
2810 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2811 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2812
2813 /*
2814 * We're done accessing the new TSS.
2815 */
2816 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2817 if (rcStrict != VINF_SUCCESS)
2818 {
2819 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822
2823 /*
2824 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2825 */
2826 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2827 {
2828 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2829 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2830 if (rcStrict != VINF_SUCCESS)
2831 {
2832 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2833 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2834 return rcStrict;
2835 }
2836
2837 /* Check that the descriptor indicates the new TSS is available (not busy). */
2838 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2839 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2840 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2841
2842 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2843 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2844 if (rcStrict != VINF_SUCCESS)
2845 {
2846 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2847 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2848 return rcStrict;
2849 }
2850 }
2851
2852 /*
2853 * From this point on, we're technically in the new task. We will defer exceptions
2854 * until the completion of the task switch but before executing any instructions in the new task.
2855 */
2856 pCtx->tr.Sel = SelTSS;
2857 pCtx->tr.ValidSel = SelTSS;
2858 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2859 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2860 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2861 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2862 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2863
2864 /* Set the busy bit in TR. */
2865 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2866 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2867 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2868 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2869 {
2870 uNewEflags |= X86_EFL_NT;
2871 }
2872
2873 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2874 pCtx->cr0 |= X86_CR0_TS;
2875 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2876
2877 pCtx->eip = uNewEip;
2878 pCtx->eax = uNewEax;
2879 pCtx->ecx = uNewEcx;
2880 pCtx->edx = uNewEdx;
2881 pCtx->ebx = uNewEbx;
2882 pCtx->esp = uNewEsp;
2883 pCtx->ebp = uNewEbp;
2884 pCtx->esi = uNewEsi;
2885 pCtx->edi = uNewEdi;
2886
2887 uNewEflags &= X86_EFL_LIVE_MASK;
2888 uNewEflags |= X86_EFL_RA1_MASK;
2889 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2890
2891 /*
2892 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2893 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2894 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2895 */
2896 pCtx->es.Sel = uNewES;
2897 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2898 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2899
2900 pCtx->cs.Sel = uNewCS;
2901 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2902 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2903
2904 pCtx->ss.Sel = uNewSS;
2905 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2906 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2907
2908 pCtx->ds.Sel = uNewDS;
2909 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2910 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2911
2912 pCtx->fs.Sel = uNewFS;
2913 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2914 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2915
2916 pCtx->gs.Sel = uNewGS;
2917 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2918 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2919 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2920
2921 pCtx->ldtr.Sel = uNewLdt;
2922 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2923 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2924 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2925
2926 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2927 {
2928 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2929 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2930 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2931 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2932 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2933 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2934 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2935 }
2936
2937 /*
2938 * Switch CR3 for the new task.
2939 */
2940 if ( fIsNewTSS386
2941 && (pCtx->cr0 & X86_CR0_PG))
2942 {
2943 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2944 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2945 {
2946 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2947 AssertRCSuccessReturn(rc, rc);
2948 }
2949 else
2950 pCtx->cr3 = uNewCr3;
2951
2952 /* Inform PGM. */
2953 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2954 {
2955 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2956 AssertRCReturn(rc, rc);
2957 /* ignore informational status codes */
2958 }
2959 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2960 }
2961
2962 /*
2963 * Switch LDTR for the new task.
2964 */
2965 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2966 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2967 else
2968 {
2969 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2970
2971 IEMSELDESC DescNewLdt;
2972 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2973 if (rcStrict != VINF_SUCCESS)
2974 {
2975 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2976 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2977 return rcStrict;
2978 }
2979 if ( !DescNewLdt.Legacy.Gen.u1Present
2980 || DescNewLdt.Legacy.Gen.u1DescType
2981 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2982 {
2983 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2984 uNewLdt, DescNewLdt.Legacy.u));
2985 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2986 }
2987
2988 pCtx->ldtr.ValidSel = uNewLdt;
2989 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2990 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2991 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2992 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2993 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2994 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2995 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2996 }
2997
2998 IEMSELDESC DescSS;
2999 if (IEM_IS_V86_MODE(pIemCpu))
3000 {
3001 pIemCpu->uCpl = 3;
3002 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3003 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3004 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3005 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3006 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3007 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3008 }
3009 else
3010 {
3011 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3012
3013 /*
3014 * Load the stack segment for the new task.
3015 */
3016 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3017 {
3018 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3019 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3020 }
3021
3022 /* Fetch the descriptor. */
3023 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3024 if (rcStrict != VINF_SUCCESS)
3025 {
3026 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3027 VBOXSTRICTRC_VAL(rcStrict)));
3028 return rcStrict;
3029 }
3030
3031 /* SS must be a data segment and writable. */
3032 if ( !DescSS.Legacy.Gen.u1DescType
3033 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3034 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3035 {
3036 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3037 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3038 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3039 }
3040
3041 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3042 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3043 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3044 {
3045 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3046 uNewCpl));
3047 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3048 }
3049
3050 /* Is it there? */
3051 if (!DescSS.Legacy.Gen.u1Present)
3052 {
3053 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3054 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3055 }
3056
3057 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3058 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3059
3060 /* Set the accessed bit before committing the result into SS. */
3061 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3062 {
3063 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3064 if (rcStrict != VINF_SUCCESS)
3065 return rcStrict;
3066 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3067 }
3068
3069 /* Commit SS. */
3070 pCtx->ss.Sel = uNewSS;
3071 pCtx->ss.ValidSel = uNewSS;
3072 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3073 pCtx->ss.u32Limit = cbLimit;
3074 pCtx->ss.u64Base = u64Base;
3075 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3077
3078 /* CPL has changed, update IEM before loading rest of segments. */
3079 pIemCpu->uCpl = uNewCpl;
3080
3081 /*
3082 * Load the data segments for the new task.
3083 */
3084 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3085 if (rcStrict != VINF_SUCCESS)
3086 return rcStrict;
3087 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3088 if (rcStrict != VINF_SUCCESS)
3089 return rcStrict;
3090 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3091 if (rcStrict != VINF_SUCCESS)
3092 return rcStrict;
3093 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3094 if (rcStrict != VINF_SUCCESS)
3095 return rcStrict;
3096
3097 /*
3098 * Load the code segment for the new task.
3099 */
3100 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3101 {
3102 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3103 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3104 }
3105
3106 /* Fetch the descriptor. */
3107 IEMSELDESC DescCS;
3108 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3109 if (rcStrict != VINF_SUCCESS)
3110 {
3111 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3112 return rcStrict;
3113 }
3114
3115 /* CS must be a code segment. */
3116 if ( !DescCS.Legacy.Gen.u1DescType
3117 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3118 {
3119 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3120 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3121 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3122 }
3123
3124 /* For conforming CS, DPL must be less than or equal to the RPL. */
3125 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3126 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3127 {
3128 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3129 DescCS.Legacy.Gen.u2Dpl));
3130 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3131 }
3132
3133 /* For non-conforming CS, DPL must match RPL. */
3134 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3135 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3136 {
3137 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3138 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3139 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3140 }
3141
3142 /* Is it there? */
3143 if (!DescCS.Legacy.Gen.u1Present)
3144 {
3145 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3146 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3147 }
3148
3149 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3150 u64Base = X86DESC_BASE(&DescCS.Legacy);
3151
3152 /* Set the accessed bit before committing the result into CS. */
3153 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3154 {
3155 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3156 if (rcStrict != VINF_SUCCESS)
3157 return rcStrict;
3158 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3159 }
3160
3161 /* Commit CS. */
3162 pCtx->cs.Sel = uNewCS;
3163 pCtx->cs.ValidSel = uNewCS;
3164 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3165 pCtx->cs.u32Limit = cbLimit;
3166 pCtx->cs.u64Base = u64Base;
3167 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3168 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3169 }
3170
3171 /** @todo Debug trap. */
3172 if (fIsNewTSS386 && fNewDebugTrap)
3173 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3174
3175 /*
3176 * Construct the error code masks based on what caused this task switch.
3177 * See Intel Instruction reference for INT.
3178 */
3179 uint16_t uExt;
3180 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3181 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3182 {
3183 uExt = 1;
3184 }
3185 else
3186 uExt = 0;
3187
3188 /*
3189 * Push any error code on to the new stack.
3190 */
3191 if (fFlags & IEM_XCPT_FLAGS_ERR)
3192 {
3193 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3194 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3195 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3196
3197 /* Check that there is sufficient space on the stack. */
3198 /** @todo Factor out segment limit checking for normal/expand down segments
3199 * into a separate function. */
3200 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3201 {
3202 if ( pCtx->esp - 1 > cbLimitSS
3203 || pCtx->esp < cbStackFrame)
3204 {
3205 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3206 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3207 cbStackFrame));
3208 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3209 }
3210 }
3211 else
3212 {
3213 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3214 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3215 {
3216 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3217 cbStackFrame));
3218 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3219 }
3220 }
3221
3222
3223 if (fIsNewTSS386)
3224 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3225 else
3226 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3227 if (rcStrict != VINF_SUCCESS)
3228 {
3229 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3230 VBOXSTRICTRC_VAL(rcStrict)));
3231 return rcStrict;
3232 }
3233 }
3234
3235 /* Check the new EIP against the new CS limit. */
3236 if (pCtx->eip > pCtx->cs.u32Limit)
3237 {
3238 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3239 pCtx->eip, pCtx->cs.u32Limit));
3240 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3241 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3242 }
3243
3244 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3245 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3246}
3247
3248
3249/**
3250 * Implements exceptions and interrupts for protected mode.
3251 *
3252 * @returns VBox strict status code.
3253 * @param pIemCpu The IEM per CPU instance data.
3254 * @param pCtx The CPU context.
3255 * @param cbInstr The number of bytes to offset rIP by in the return
3256 * address.
3257 * @param u8Vector The interrupt / exception vector number.
3258 * @param fFlags The flags.
3259 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3260 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3261 */
3262IEM_STATIC VBOXSTRICTRC
3263iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3264 PCPUMCTX pCtx,
3265 uint8_t cbInstr,
3266 uint8_t u8Vector,
3267 uint32_t fFlags,
3268 uint16_t uErr,
3269 uint64_t uCr2)
3270{
3271 /*
3272 * Read the IDT entry.
3273 */
3274 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3275 {
3276 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3277 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3278 }
3279 X86DESC Idte;
3280 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3281 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3282 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3283 return rcStrict;
3284 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3285 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3286 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3287
3288 /*
3289 * Check the descriptor type, DPL and such.
3290 * ASSUMES this is done in the same order as described for call-gate calls.
3291 */
3292 if (Idte.Gate.u1DescType)
3293 {
3294 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3295 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3296 }
3297 bool fTaskGate = false;
3298 uint8_t f32BitGate = true;
3299 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3300 switch (Idte.Gate.u4Type)
3301 {
3302 case X86_SEL_TYPE_SYS_UNDEFINED:
3303 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3304 case X86_SEL_TYPE_SYS_LDT:
3305 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3306 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3307 case X86_SEL_TYPE_SYS_UNDEFINED2:
3308 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3309 case X86_SEL_TYPE_SYS_UNDEFINED3:
3310 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3311 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3312 case X86_SEL_TYPE_SYS_UNDEFINED4:
3313 {
3314 /** @todo check what actually happens when the type is wrong...
3315 * esp. call gates. */
3316 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3317 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3318 }
3319
3320 case X86_SEL_TYPE_SYS_286_INT_GATE:
3321 f32BitGate = false;
3322 case X86_SEL_TYPE_SYS_386_INT_GATE:
3323 fEflToClear |= X86_EFL_IF;
3324 break;
3325
3326 case X86_SEL_TYPE_SYS_TASK_GATE:
3327 fTaskGate = true;
3328#ifndef IEM_IMPLEMENTS_TASKSWITCH
3329 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3330#endif
3331 break;
3332
3333 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3334 f32BitGate = false;
3335 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3336 break;
3337
3338 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3339 }
3340
3341 /* Check DPL against CPL if applicable. */
3342 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3343 {
3344 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3345 {
3346 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3347 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3348 }
3349 }
3350
3351 /* Is it there? */
3352 if (!Idte.Gate.u1Present)
3353 {
3354 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3355 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3356 }
3357
3358 /* Is it a task-gate? */
3359 if (fTaskGate)
3360 {
3361 /*
3362 * Construct the error code masks based on what caused this task switch.
3363 * See Intel Instruction reference for INT.
3364 */
3365 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3366 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3367 RTSEL SelTSS = Idte.Gate.u16Sel;
3368
3369 /*
3370 * Fetch the TSS descriptor in the GDT.
3371 */
3372 IEMSELDESC DescTSS;
3373 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3374 if (rcStrict != VINF_SUCCESS)
3375 {
3376 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3377 VBOXSTRICTRC_VAL(rcStrict)));
3378 return rcStrict;
3379 }
3380
3381 /* The TSS descriptor must be a system segment and be available (not busy). */
3382 if ( DescTSS.Legacy.Gen.u1DescType
3383 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3384 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3385 {
3386 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3387 u8Vector, SelTSS, DescTSS.Legacy.au64));
3388 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3389 }
3390
3391 /* The TSS must be present. */
3392 if (!DescTSS.Legacy.Gen.u1Present)
3393 {
3394 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3395 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3396 }
3397
3398 /* Do the actual task switch. */
3399 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3400 }
3401
3402 /* A null CS is bad. */
3403 RTSEL NewCS = Idte.Gate.u16Sel;
3404 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3405 {
3406 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3407 return iemRaiseGeneralProtectionFault0(pIemCpu);
3408 }
3409
3410 /* Fetch the descriptor for the new CS. */
3411 IEMSELDESC DescCS;
3412 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3413 if (rcStrict != VINF_SUCCESS)
3414 {
3415 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3416 return rcStrict;
3417 }
3418
3419 /* Must be a code segment. */
3420 if (!DescCS.Legacy.Gen.u1DescType)
3421 {
3422 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3423 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3424 }
3425 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3426 {
3427 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3428 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3429 }
3430
3431 /* Don't allow lowering the privilege level. */
3432 /** @todo Does the lowering of privileges apply to software interrupts
3433 * only? This has bearings on the more-privileged or
3434 * same-privilege stack behavior further down. A testcase would
3435 * be nice. */
3436 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3437 {
3438 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3439 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3440 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3441 }
3442
3443 /* Make sure the selector is present. */
3444 if (!DescCS.Legacy.Gen.u1Present)
3445 {
3446 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3447 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3448 }
3449
3450 /* Check the new EIP against the new CS limit. */
3451 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3452 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3453 ? Idte.Gate.u16OffsetLow
3454 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3455 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3456 if (uNewEip > cbLimitCS)
3457 {
3458 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3459 u8Vector, uNewEip, cbLimitCS, NewCS));
3460 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3461 }
3462
3463 /* Calc the flag image to push. */
3464 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3465 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3466 fEfl &= ~X86_EFL_RF;
3467 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3468 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3469
3470 /* From V8086 mode only go to CPL 0. */
3471 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3472 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3473 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3474 {
3475 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3476 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3477 }
3478
3479 /*
3480 * If the privilege level changes, we need to get a new stack from the TSS.
3481 * This in turns means validating the new SS and ESP...
3482 */
3483 if (uNewCpl != pIemCpu->uCpl)
3484 {
3485 RTSEL NewSS;
3486 uint32_t uNewEsp;
3487 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3488 if (rcStrict != VINF_SUCCESS)
3489 return rcStrict;
3490
3491 IEMSELDESC DescSS;
3492 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3493 if (rcStrict != VINF_SUCCESS)
3494 return rcStrict;
3495
3496 /* Check that there is sufficient space for the stack frame. */
3497 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3498 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3499 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3500 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3501
3502 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3503 {
3504 if ( uNewEsp - 1 > cbLimitSS
3505 || uNewEsp < cbStackFrame)
3506 {
3507 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3508 u8Vector, NewSS, uNewEsp, cbStackFrame));
3509 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3510 }
3511 }
3512 else
3513 {
3514 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3515 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3516 {
3517 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3518 u8Vector, NewSS, uNewEsp, cbStackFrame));
3519 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3520 }
3521 }
3522
3523 /*
3524 * Start making changes.
3525 */
3526
3527 /* Create the stack frame. */
3528 RTPTRUNION uStackFrame;
3529 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3530 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3531 if (rcStrict != VINF_SUCCESS)
3532 return rcStrict;
3533 void * const pvStackFrame = uStackFrame.pv;
3534 if (f32BitGate)
3535 {
3536 if (fFlags & IEM_XCPT_FLAGS_ERR)
3537 *uStackFrame.pu32++ = uErr;
3538 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3539 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3540 uStackFrame.pu32[2] = fEfl;
3541 uStackFrame.pu32[3] = pCtx->esp;
3542 uStackFrame.pu32[4] = pCtx->ss.Sel;
3543 if (fEfl & X86_EFL_VM)
3544 {
3545 uStackFrame.pu32[1] = pCtx->cs.Sel;
3546 uStackFrame.pu32[5] = pCtx->es.Sel;
3547 uStackFrame.pu32[6] = pCtx->ds.Sel;
3548 uStackFrame.pu32[7] = pCtx->fs.Sel;
3549 uStackFrame.pu32[8] = pCtx->gs.Sel;
3550 }
3551 }
3552 else
3553 {
3554 if (fFlags & IEM_XCPT_FLAGS_ERR)
3555 *uStackFrame.pu16++ = uErr;
3556 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3557 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3558 uStackFrame.pu16[2] = fEfl;
3559 uStackFrame.pu16[3] = pCtx->sp;
3560 uStackFrame.pu16[4] = pCtx->ss.Sel;
3561 if (fEfl & X86_EFL_VM)
3562 {
3563 uStackFrame.pu16[1] = pCtx->cs.Sel;
3564 uStackFrame.pu16[5] = pCtx->es.Sel;
3565 uStackFrame.pu16[6] = pCtx->ds.Sel;
3566 uStackFrame.pu16[7] = pCtx->fs.Sel;
3567 uStackFrame.pu16[8] = pCtx->gs.Sel;
3568 }
3569 }
3570 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3571 if (rcStrict != VINF_SUCCESS)
3572 return rcStrict;
3573
3574 /* Mark the selectors 'accessed' (hope this is the correct time). */
3575 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3576 * after pushing the stack frame? (Write protect the gdt + stack to
3577 * find out.) */
3578 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3579 {
3580 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3581 if (rcStrict != VINF_SUCCESS)
3582 return rcStrict;
3583 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3584 }
3585
3586 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3587 {
3588 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3589 if (rcStrict != VINF_SUCCESS)
3590 return rcStrict;
3591 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3592 }
3593
3594 /*
3595 * Start comitting the register changes (joins with the DPL=CPL branch).
3596 */
3597 pCtx->ss.Sel = NewSS;
3598 pCtx->ss.ValidSel = NewSS;
3599 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3600 pCtx->ss.u32Limit = cbLimitSS;
3601 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3602 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3603 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3604 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3605 * SP is loaded).
3606 * Need to check the other combinations too:
3607 * - 16-bit TSS, 32-bit handler
3608 * - 32-bit TSS, 16-bit handler */
3609 if (!pCtx->ss.Attr.n.u1DefBig)
3610 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3611 else
3612 pCtx->rsp = uNewEsp - cbStackFrame;
3613 pIemCpu->uCpl = uNewCpl;
3614
3615 if (fEfl & X86_EFL_VM)
3616 {
3617 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3618 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3619 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3620 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3621 }
3622 }
3623 /*
3624 * Same privilege, no stack change and smaller stack frame.
3625 */
3626 else
3627 {
3628 uint64_t uNewRsp;
3629 RTPTRUNION uStackFrame;
3630 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3631 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3632 if (rcStrict != VINF_SUCCESS)
3633 return rcStrict;
3634 void * const pvStackFrame = uStackFrame.pv;
3635
3636 if (f32BitGate)
3637 {
3638 if (fFlags & IEM_XCPT_FLAGS_ERR)
3639 *uStackFrame.pu32++ = uErr;
3640 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3641 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3642 uStackFrame.pu32[2] = fEfl;
3643 }
3644 else
3645 {
3646 if (fFlags & IEM_XCPT_FLAGS_ERR)
3647 *uStackFrame.pu16++ = uErr;
3648 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3649 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3650 uStackFrame.pu16[2] = fEfl;
3651 }
3652 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3653 if (rcStrict != VINF_SUCCESS)
3654 return rcStrict;
3655
3656 /* Mark the CS selector as 'accessed'. */
3657 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3658 {
3659 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3660 if (rcStrict != VINF_SUCCESS)
3661 return rcStrict;
3662 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3663 }
3664
3665 /*
3666 * Start committing the register changes (joins with the other branch).
3667 */
3668 pCtx->rsp = uNewRsp;
3669 }
3670
3671 /* ... register committing continues. */
3672 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3673 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3674 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3675 pCtx->cs.u32Limit = cbLimitCS;
3676 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3677 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3678
3679 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3680 fEfl &= ~fEflToClear;
3681 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3682
3683 if (fFlags & IEM_XCPT_FLAGS_CR2)
3684 pCtx->cr2 = uCr2;
3685
3686 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3687 iemRaiseXcptAdjustState(pCtx, u8Vector);
3688
3689 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3690}
3691
3692
3693/**
3694 * Implements exceptions and interrupts for long mode.
3695 *
3696 * @returns VBox strict status code.
3697 * @param pIemCpu The IEM per CPU instance data.
3698 * @param pCtx The CPU context.
3699 * @param cbInstr The number of bytes to offset rIP by in the return
3700 * address.
3701 * @param u8Vector The interrupt / exception vector number.
3702 * @param fFlags The flags.
3703 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3704 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3705 */
3706IEM_STATIC VBOXSTRICTRC
3707iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3708 PCPUMCTX pCtx,
3709 uint8_t cbInstr,
3710 uint8_t u8Vector,
3711 uint32_t fFlags,
3712 uint16_t uErr,
3713 uint64_t uCr2)
3714{
3715 /*
3716 * Read the IDT entry.
3717 */
3718 uint16_t offIdt = (uint16_t)u8Vector << 4;
3719 if (pCtx->idtr.cbIdt < offIdt + 7)
3720 {
3721 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3722 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3723 }
3724 X86DESC64 Idte;
3725 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3726 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3727 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3728 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3729 return rcStrict;
3730 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3731 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3732 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3733
3734 /*
3735 * Check the descriptor type, DPL and such.
3736 * ASSUMES this is done in the same order as described for call-gate calls.
3737 */
3738 if (Idte.Gate.u1DescType)
3739 {
3740 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3741 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3742 }
3743 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3744 switch (Idte.Gate.u4Type)
3745 {
3746 case AMD64_SEL_TYPE_SYS_INT_GATE:
3747 fEflToClear |= X86_EFL_IF;
3748 break;
3749 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3750 break;
3751
3752 default:
3753 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3754 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3755 }
3756
3757 /* Check DPL against CPL if applicable. */
3758 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3759 {
3760 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3761 {
3762 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3763 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3764 }
3765 }
3766
3767 /* Is it there? */
3768 if (!Idte.Gate.u1Present)
3769 {
3770 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3771 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3772 }
3773
3774 /* A null CS is bad. */
3775 RTSEL NewCS = Idte.Gate.u16Sel;
3776 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3777 {
3778 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3779 return iemRaiseGeneralProtectionFault0(pIemCpu);
3780 }
3781
3782 /* Fetch the descriptor for the new CS. */
3783 IEMSELDESC DescCS;
3784 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3785 if (rcStrict != VINF_SUCCESS)
3786 {
3787 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3788 return rcStrict;
3789 }
3790
3791 /* Must be a 64-bit code segment. */
3792 if (!DescCS.Long.Gen.u1DescType)
3793 {
3794 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3795 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3796 }
3797 if ( !DescCS.Long.Gen.u1Long
3798 || DescCS.Long.Gen.u1DefBig
3799 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3800 {
3801 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3802 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3803 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3804 }
3805
3806 /* Don't allow lowering the privilege level. For non-conforming CS
3807 selectors, the CS.DPL sets the privilege level the trap/interrupt
3808 handler runs at. For conforming CS selectors, the CPL remains
3809 unchanged, but the CS.DPL must be <= CPL. */
3810 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3811 * when CPU in Ring-0. Result \#GP? */
3812 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3813 {
3814 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3815 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3816 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3817 }
3818
3819
3820 /* Make sure the selector is present. */
3821 if (!DescCS.Legacy.Gen.u1Present)
3822 {
3823 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3824 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3825 }
3826
3827 /* Check that the new RIP is canonical. */
3828 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3829 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3830 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3831 if (!IEM_IS_CANONICAL(uNewRip))
3832 {
3833 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3834 return iemRaiseGeneralProtectionFault0(pIemCpu);
3835 }
3836
3837 /*
3838 * If the privilege level changes or if the IST isn't zero, we need to get
3839 * a new stack from the TSS.
3840 */
3841 uint64_t uNewRsp;
3842 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3843 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3844 if ( uNewCpl != pIemCpu->uCpl
3845 || Idte.Gate.u3IST != 0)
3846 {
3847 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3848 if (rcStrict != VINF_SUCCESS)
3849 return rcStrict;
3850 }
3851 else
3852 uNewRsp = pCtx->rsp;
3853 uNewRsp &= ~(uint64_t)0xf;
3854
3855 /*
3856 * Calc the flag image to push.
3857 */
3858 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3859 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3860 fEfl &= ~X86_EFL_RF;
3861 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3862 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3863
3864 /*
3865 * Start making changes.
3866 */
3867
3868 /* Create the stack frame. */
3869 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3870 RTPTRUNION uStackFrame;
3871 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3872 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3873 if (rcStrict != VINF_SUCCESS)
3874 return rcStrict;
3875 void * const pvStackFrame = uStackFrame.pv;
3876
3877 if (fFlags & IEM_XCPT_FLAGS_ERR)
3878 *uStackFrame.pu64++ = uErr;
3879 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3880 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3881 uStackFrame.pu64[2] = fEfl;
3882 uStackFrame.pu64[3] = pCtx->rsp;
3883 uStackFrame.pu64[4] = pCtx->ss.Sel;
3884 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3885 if (rcStrict != VINF_SUCCESS)
3886 return rcStrict;
3887
3888 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3889 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3890 * after pushing the stack frame? (Write protect the gdt + stack to
3891 * find out.) */
3892 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3893 {
3894 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3895 if (rcStrict != VINF_SUCCESS)
3896 return rcStrict;
3897 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3898 }
3899
3900 /*
3901 * Start comitting the register changes.
3902 */
3903 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3904 * hidden registers when interrupting 32-bit or 16-bit code! */
3905 if (uNewCpl != pIemCpu->uCpl)
3906 {
3907 pCtx->ss.Sel = 0 | uNewCpl;
3908 pCtx->ss.ValidSel = 0 | uNewCpl;
3909 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3910 pCtx->ss.u32Limit = UINT32_MAX;
3911 pCtx->ss.u64Base = 0;
3912 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3913 }
3914 pCtx->rsp = uNewRsp - cbStackFrame;
3915 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3916 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3917 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3918 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3919 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3920 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3921 pCtx->rip = uNewRip;
3922 pIemCpu->uCpl = uNewCpl;
3923
3924 fEfl &= ~fEflToClear;
3925 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3926
3927 if (fFlags & IEM_XCPT_FLAGS_CR2)
3928 pCtx->cr2 = uCr2;
3929
3930 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3931 iemRaiseXcptAdjustState(pCtx, u8Vector);
3932
3933 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3934}
3935
3936
3937/**
3938 * Implements exceptions and interrupts.
3939 *
3940 * All exceptions and interrupts goes thru this function!
3941 *
3942 * @returns VBox strict status code.
3943 * @param pIemCpu The IEM per CPU instance data.
3944 * @param cbInstr The number of bytes to offset rIP by in the return
3945 * address.
3946 * @param u8Vector The interrupt / exception vector number.
3947 * @param fFlags The flags.
3948 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3949 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3950 */
3951DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3952iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3953 uint8_t cbInstr,
3954 uint8_t u8Vector,
3955 uint32_t fFlags,
3956 uint16_t uErr,
3957 uint64_t uCr2)
3958{
3959 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3960#ifdef IN_RING0
3961 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3962 AssertRCReturn(rc, rc);
3963#endif
3964
3965 /*
3966 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3967 */
3968 if ( pCtx->eflags.Bits.u1VM
3969 && pCtx->eflags.Bits.u2IOPL != 3
3970 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3971 && (pCtx->cr0 & X86_CR0_PE) )
3972 {
3973 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3974 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3975 u8Vector = X86_XCPT_GP;
3976 uErr = 0;
3977 }
3978#ifdef DBGFTRACE_ENABLED
3979 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3980 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3981 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3982#endif
3983
3984 /*
3985 * Do recursion accounting.
3986 */
3987 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3988 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3989 if (pIemCpu->cXcptRecursions == 0)
3990 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3991 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3992 else
3993 {
3994 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3995 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3996
3997 /** @todo double and tripple faults. */
3998 if (pIemCpu->cXcptRecursions >= 3)
3999 {
4000#ifdef DEBUG_bird
4001 AssertFailed();
4002#endif
4003 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4004 }
4005
4006 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4007 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4008 {
4009 ....
4010 } */
4011 }
4012 pIemCpu->cXcptRecursions++;
4013 pIemCpu->uCurXcpt = u8Vector;
4014 pIemCpu->fCurXcpt = fFlags;
4015
4016 /*
4017 * Extensive logging.
4018 */
4019#if defined(LOG_ENABLED) && defined(IN_RING3)
4020 if (LogIs3Enabled())
4021 {
4022 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4023 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4024 char szRegs[4096];
4025 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4026 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4027 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4028 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4029 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4030 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4031 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4032 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4033 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4034 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4035 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4036 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4037 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4038 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4039 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4040 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4041 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4042 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4043 " efer=%016VR{efer}\n"
4044 " pat=%016VR{pat}\n"
4045 " sf_mask=%016VR{sf_mask}\n"
4046 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4047 " lstar=%016VR{lstar}\n"
4048 " star=%016VR{star} cstar=%016VR{cstar}\n"
4049 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4050 );
4051
4052 char szInstr[256];
4053 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4054 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4055 szInstr, sizeof(szInstr), NULL);
4056 Log3(("%s%s\n", szRegs, szInstr));
4057 }
4058#endif /* LOG_ENABLED */
4059
4060 /*
4061 * Call the mode specific worker function.
4062 */
4063 VBOXSTRICTRC rcStrict;
4064 if (!(pCtx->cr0 & X86_CR0_PE))
4065 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4066 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4067 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4068 else
4069 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4070
4071 /*
4072 * Unwind.
4073 */
4074 pIemCpu->cXcptRecursions--;
4075 pIemCpu->uCurXcpt = uPrevXcpt;
4076 pIemCpu->fCurXcpt = fPrevXcpt;
4077 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4078 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4079 return rcStrict;
4080}
4081
4082
4083/** \#DE - 00. */
4084DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4085{
4086 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4087}
4088
4089
4090/** \#DB - 01.
4091 * @note This automatically clear DR7.GD. */
4092DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4093{
4094 /** @todo set/clear RF. */
4095 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4096 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4097}
4098
4099
4100/** \#UD - 06. */
4101DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4102{
4103 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4104}
4105
4106
4107/** \#NM - 07. */
4108DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4109{
4110 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4111}
4112
4113
4114/** \#TS(err) - 0a. */
4115DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4116{
4117 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4118}
4119
4120
4121/** \#TS(tr) - 0a. */
4122DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4123{
4124 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4125 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4126}
4127
4128
4129/** \#TS(0) - 0a. */
4130DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4131{
4132 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4133 0, 0);
4134}
4135
4136
4137/** \#TS(err) - 0a. */
4138DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4139{
4140 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4141 uSel & X86_SEL_MASK_OFF_RPL, 0);
4142}
4143
4144
4145/** \#NP(err) - 0b. */
4146DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4147{
4148 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4149}
4150
4151
4152/** \#NP(seg) - 0b. */
4153DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4154{
4155 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4156 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4157}
4158
4159
4160/** \#NP(sel) - 0b. */
4161DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4162{
4163 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4164 uSel & ~X86_SEL_RPL, 0);
4165}
4166
4167
4168/** \#SS(seg) - 0c. */
4169DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4170{
4171 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4172 uSel & ~X86_SEL_RPL, 0);
4173}
4174
4175
4176/** \#SS(err) - 0c. */
4177DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4178{
4179 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4180}
4181
4182
4183/** \#GP(n) - 0d. */
4184DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4185{
4186 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4187}
4188
4189
4190/** \#GP(0) - 0d. */
4191DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4192{
4193 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4194}
4195
4196
4197/** \#GP(sel) - 0d. */
4198DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4199{
4200 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4201 Sel & ~X86_SEL_RPL, 0);
4202}
4203
4204
4205/** \#GP(0) - 0d. */
4206DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4207{
4208 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4209}
4210
4211
4212/** \#GP(sel) - 0d. */
4213DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4214{
4215 NOREF(iSegReg); NOREF(fAccess);
4216 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4217 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220
4221/** \#GP(sel) - 0d. */
4222DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4223{
4224 NOREF(Sel);
4225 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4226}
4227
4228
4229/** \#GP(sel) - 0d. */
4230DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4231{
4232 NOREF(iSegReg); NOREF(fAccess);
4233 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4234}
4235
4236
4237/** \#PF(n) - 0e. */
4238DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4239{
4240 uint16_t uErr;
4241 switch (rc)
4242 {
4243 case VERR_PAGE_NOT_PRESENT:
4244 case VERR_PAGE_TABLE_NOT_PRESENT:
4245 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4246 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4247 uErr = 0;
4248 break;
4249
4250 default:
4251 AssertMsgFailed(("%Rrc\n", rc));
4252 case VERR_ACCESS_DENIED:
4253 uErr = X86_TRAP_PF_P;
4254 break;
4255
4256 /** @todo reserved */
4257 }
4258
4259 if (pIemCpu->uCpl == 3)
4260 uErr |= X86_TRAP_PF_US;
4261
4262 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4263 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4264 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4265 uErr |= X86_TRAP_PF_ID;
4266
4267#if 0 /* This is so much non-sense, really. Why was it done like that? */
4268 /* Note! RW access callers reporting a WRITE protection fault, will clear
4269 the READ flag before calling. So, read-modify-write accesses (RW)
4270 can safely be reported as READ faults. */
4271 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4272 uErr |= X86_TRAP_PF_RW;
4273#else
4274 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4275 {
4276 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4277 uErr |= X86_TRAP_PF_RW;
4278 }
4279#endif
4280
4281 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4282 uErr, GCPtrWhere);
4283}
4284
4285
4286/** \#MF(0) - 10. */
4287DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4288{
4289 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4290}
4291
4292
4293/** \#AC(0) - 11. */
4294DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4295{
4296 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4297}
4298
4299
4300/**
4301 * Macro for calling iemCImplRaiseDivideError().
4302 *
4303 * This enables us to add/remove arguments and force different levels of
4304 * inlining as we wish.
4305 *
4306 * @return Strict VBox status code.
4307 */
4308#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4309IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4310{
4311 NOREF(cbInstr);
4312 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4313}
4314
4315
4316/**
4317 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4318 *
4319 * This enables us to add/remove arguments and force different levels of
4320 * inlining as we wish.
4321 *
4322 * @return Strict VBox status code.
4323 */
4324#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4325IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4326{
4327 NOREF(cbInstr);
4328 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4329}
4330
4331
4332/**
4333 * Macro for calling iemCImplRaiseInvalidOpcode().
4334 *
4335 * This enables us to add/remove arguments and force different levels of
4336 * inlining as we wish.
4337 *
4338 * @return Strict VBox status code.
4339 */
4340#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4341IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4342{
4343 NOREF(cbInstr);
4344 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4345}
4346
4347
4348/** @} */
4349
4350
4351/*
4352 *
4353 * Helpers routines.
4354 * Helpers routines.
4355 * Helpers routines.
4356 *
4357 */
4358
4359/**
4360 * Recalculates the effective operand size.
4361 *
4362 * @param pIemCpu The IEM state.
4363 */
4364IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4365{
4366 switch (pIemCpu->enmCpuMode)
4367 {
4368 case IEMMODE_16BIT:
4369 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4370 break;
4371 case IEMMODE_32BIT:
4372 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4373 break;
4374 case IEMMODE_64BIT:
4375 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4376 {
4377 case 0:
4378 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4379 break;
4380 case IEM_OP_PRF_SIZE_OP:
4381 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4382 break;
4383 case IEM_OP_PRF_SIZE_REX_W:
4384 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4385 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4386 break;
4387 }
4388 break;
4389 default:
4390 AssertFailed();
4391 }
4392}
4393
4394
4395/**
4396 * Sets the default operand size to 64-bit and recalculates the effective
4397 * operand size.
4398 *
4399 * @param pIemCpu The IEM state.
4400 */
4401IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4402{
4403 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4404 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4405 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4406 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4407 else
4408 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4409}
4410
4411
4412/*
4413 *
4414 * Common opcode decoders.
4415 * Common opcode decoders.
4416 * Common opcode decoders.
4417 *
4418 */
4419//#include <iprt/mem.h>
4420
4421/**
4422 * Used to add extra details about a stub case.
4423 * @param pIemCpu The IEM per CPU state.
4424 */
4425IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4426{
4427#if defined(LOG_ENABLED) && defined(IN_RING3)
4428 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4429 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4430 char szRegs[4096];
4431 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4432 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4433 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4434 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4435 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4436 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4437 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4438 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4439 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4440 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4441 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4442 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4443 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4444 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4445 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4446 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4447 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4448 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4449 " efer=%016VR{efer}\n"
4450 " pat=%016VR{pat}\n"
4451 " sf_mask=%016VR{sf_mask}\n"
4452 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4453 " lstar=%016VR{lstar}\n"
4454 " star=%016VR{star} cstar=%016VR{cstar}\n"
4455 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4456 );
4457
4458 char szInstr[256];
4459 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4460 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4461 szInstr, sizeof(szInstr), NULL);
4462
4463 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4464#else
4465 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4466#endif
4467}
4468
4469/**
4470 * Complains about a stub.
4471 *
4472 * Providing two versions of this macro, one for daily use and one for use when
4473 * working on IEM.
4474 */
4475#if 0
4476# define IEMOP_BITCH_ABOUT_STUB() \
4477 do { \
4478 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4479 iemOpStubMsg2(pIemCpu); \
4480 RTAssertPanic(); \
4481 } while (0)
4482#else
4483# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4484#endif
4485
4486/** Stubs an opcode. */
4487#define FNIEMOP_STUB(a_Name) \
4488 FNIEMOP_DEF(a_Name) \
4489 { \
4490 IEMOP_BITCH_ABOUT_STUB(); \
4491 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4492 } \
4493 typedef int ignore_semicolon
4494
4495/** Stubs an opcode. */
4496#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4497 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4498 { \
4499 IEMOP_BITCH_ABOUT_STUB(); \
4500 NOREF(a_Name0); \
4501 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4502 } \
4503 typedef int ignore_semicolon
4504
4505/** Stubs an opcode which currently should raise \#UD. */
4506#define FNIEMOP_UD_STUB(a_Name) \
4507 FNIEMOP_DEF(a_Name) \
4508 { \
4509 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4510 return IEMOP_RAISE_INVALID_OPCODE(); \
4511 } \
4512 typedef int ignore_semicolon
4513
4514/** Stubs an opcode which currently should raise \#UD. */
4515#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4516 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4517 { \
4518 NOREF(a_Name0); \
4519 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4520 return IEMOP_RAISE_INVALID_OPCODE(); \
4521 } \
4522 typedef int ignore_semicolon
4523
4524
4525
4526/** @name Register Access.
4527 * @{
4528 */
4529
4530/**
4531 * Gets a reference (pointer) to the specified hidden segment register.
4532 *
4533 * @returns Hidden register reference.
4534 * @param pIemCpu The per CPU data.
4535 * @param iSegReg The segment register.
4536 */
4537IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4538{
4539 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4540 PCPUMSELREG pSReg;
4541 switch (iSegReg)
4542 {
4543 case X86_SREG_ES: pSReg = &pCtx->es; break;
4544 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4545 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4546 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4547 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4548 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4549 default:
4550 AssertFailedReturn(NULL);
4551 }
4552#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4553 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4554 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4555#else
4556 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4557#endif
4558 return pSReg;
4559}
4560
4561
4562/**
4563 * Gets a reference (pointer) to the specified segment register (the selector
4564 * value).
4565 *
4566 * @returns Pointer to the selector variable.
4567 * @param pIemCpu The per CPU data.
4568 * @param iSegReg The segment register.
4569 */
4570IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4571{
4572 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4573 switch (iSegReg)
4574 {
4575 case X86_SREG_ES: return &pCtx->es.Sel;
4576 case X86_SREG_CS: return &pCtx->cs.Sel;
4577 case X86_SREG_SS: return &pCtx->ss.Sel;
4578 case X86_SREG_DS: return &pCtx->ds.Sel;
4579 case X86_SREG_FS: return &pCtx->fs.Sel;
4580 case X86_SREG_GS: return &pCtx->gs.Sel;
4581 }
4582 AssertFailedReturn(NULL);
4583}
4584
4585
4586/**
4587 * Fetches the selector value of a segment register.
4588 *
4589 * @returns The selector value.
4590 * @param pIemCpu The per CPU data.
4591 * @param iSegReg The segment register.
4592 */
4593IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4594{
4595 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4596 switch (iSegReg)
4597 {
4598 case X86_SREG_ES: return pCtx->es.Sel;
4599 case X86_SREG_CS: return pCtx->cs.Sel;
4600 case X86_SREG_SS: return pCtx->ss.Sel;
4601 case X86_SREG_DS: return pCtx->ds.Sel;
4602 case X86_SREG_FS: return pCtx->fs.Sel;
4603 case X86_SREG_GS: return pCtx->gs.Sel;
4604 }
4605 AssertFailedReturn(0xffff);
4606}
4607
4608
4609/**
4610 * Gets a reference (pointer) to the specified general register.
4611 *
4612 * @returns Register reference.
4613 * @param pIemCpu The per CPU data.
4614 * @param iReg The general register.
4615 */
4616IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4617{
4618 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4619 switch (iReg)
4620 {
4621 case X86_GREG_xAX: return &pCtx->rax;
4622 case X86_GREG_xCX: return &pCtx->rcx;
4623 case X86_GREG_xDX: return &pCtx->rdx;
4624 case X86_GREG_xBX: return &pCtx->rbx;
4625 case X86_GREG_xSP: return &pCtx->rsp;
4626 case X86_GREG_xBP: return &pCtx->rbp;
4627 case X86_GREG_xSI: return &pCtx->rsi;
4628 case X86_GREG_xDI: return &pCtx->rdi;
4629 case X86_GREG_x8: return &pCtx->r8;
4630 case X86_GREG_x9: return &pCtx->r9;
4631 case X86_GREG_x10: return &pCtx->r10;
4632 case X86_GREG_x11: return &pCtx->r11;
4633 case X86_GREG_x12: return &pCtx->r12;
4634 case X86_GREG_x13: return &pCtx->r13;
4635 case X86_GREG_x14: return &pCtx->r14;
4636 case X86_GREG_x15: return &pCtx->r15;
4637 }
4638 AssertFailedReturn(NULL);
4639}
4640
4641
4642/**
4643 * Gets a reference (pointer) to the specified 8-bit general register.
4644 *
4645 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4646 *
4647 * @returns Register reference.
4648 * @param pIemCpu The per CPU data.
4649 * @param iReg The register.
4650 */
4651IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4652{
4653 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4654 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4655
4656 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4657 if (iReg >= 4)
4658 pu8Reg++;
4659 return pu8Reg;
4660}
4661
4662
4663/**
4664 * Fetches the value of a 8-bit general register.
4665 *
4666 * @returns The register value.
4667 * @param pIemCpu The per CPU data.
4668 * @param iReg The register.
4669 */
4670IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4671{
4672 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4673 return *pbSrc;
4674}
4675
4676
4677/**
4678 * Fetches the value of a 16-bit general register.
4679 *
4680 * @returns The register value.
4681 * @param pIemCpu The per CPU data.
4682 * @param iReg The register.
4683 */
4684IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4685{
4686 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4687}
4688
4689
4690/**
4691 * Fetches the value of a 32-bit general register.
4692 *
4693 * @returns The register value.
4694 * @param pIemCpu The per CPU data.
4695 * @param iReg The register.
4696 */
4697IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4698{
4699 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4700}
4701
4702
4703/**
4704 * Fetches the value of a 64-bit general register.
4705 *
4706 * @returns The register value.
4707 * @param pIemCpu The per CPU data.
4708 * @param iReg The register.
4709 */
4710IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4711{
4712 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4713}
4714
4715
4716/**
4717 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4718 *
4719 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4720 * segment limit.
4721 *
4722 * @param pIemCpu The per CPU data.
4723 * @param offNextInstr The offset of the next instruction.
4724 */
4725IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4726{
4727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4728 switch (pIemCpu->enmEffOpSize)
4729 {
4730 case IEMMODE_16BIT:
4731 {
4732 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4733 if ( uNewIp > pCtx->cs.u32Limit
4734 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4735 return iemRaiseGeneralProtectionFault0(pIemCpu);
4736 pCtx->rip = uNewIp;
4737 break;
4738 }
4739
4740 case IEMMODE_32BIT:
4741 {
4742 Assert(pCtx->rip <= UINT32_MAX);
4743 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4744
4745 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4746 if (uNewEip > pCtx->cs.u32Limit)
4747 return iemRaiseGeneralProtectionFault0(pIemCpu);
4748 pCtx->rip = uNewEip;
4749 break;
4750 }
4751
4752 case IEMMODE_64BIT:
4753 {
4754 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4755
4756 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4757 if (!IEM_IS_CANONICAL(uNewRip))
4758 return iemRaiseGeneralProtectionFault0(pIemCpu);
4759 pCtx->rip = uNewRip;
4760 break;
4761 }
4762
4763 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4764 }
4765
4766 pCtx->eflags.Bits.u1RF = 0;
4767 return VINF_SUCCESS;
4768}
4769
4770
4771/**
4772 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4773 *
4774 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4775 * segment limit.
4776 *
4777 * @returns Strict VBox status code.
4778 * @param pIemCpu The per CPU data.
4779 * @param offNextInstr The offset of the next instruction.
4780 */
4781IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4782{
4783 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4784 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4785
4786 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4787 if ( uNewIp > pCtx->cs.u32Limit
4788 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4789 return iemRaiseGeneralProtectionFault0(pIemCpu);
4790 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4791 pCtx->rip = uNewIp;
4792 pCtx->eflags.Bits.u1RF = 0;
4793
4794 return VINF_SUCCESS;
4795}
4796
4797
4798/**
4799 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4800 *
4801 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4802 * segment limit.
4803 *
4804 * @returns Strict VBox status code.
4805 * @param pIemCpu The per CPU data.
4806 * @param offNextInstr The offset of the next instruction.
4807 */
4808IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4809{
4810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4811 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4812
4813 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4814 {
4815 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4816
4817 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4818 if (uNewEip > pCtx->cs.u32Limit)
4819 return iemRaiseGeneralProtectionFault0(pIemCpu);
4820 pCtx->rip = uNewEip;
4821 }
4822 else
4823 {
4824 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4825
4826 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4827 if (!IEM_IS_CANONICAL(uNewRip))
4828 return iemRaiseGeneralProtectionFault0(pIemCpu);
4829 pCtx->rip = uNewRip;
4830 }
4831 pCtx->eflags.Bits.u1RF = 0;
4832 return VINF_SUCCESS;
4833}
4834
4835
4836/**
4837 * Performs a near jump to the specified address.
4838 *
4839 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4840 * segment limit.
4841 *
4842 * @param pIemCpu The per CPU data.
4843 * @param uNewRip The new RIP value.
4844 */
4845IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4846{
4847 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4848 switch (pIemCpu->enmEffOpSize)
4849 {
4850 case IEMMODE_16BIT:
4851 {
4852 Assert(uNewRip <= UINT16_MAX);
4853 if ( uNewRip > pCtx->cs.u32Limit
4854 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4855 return iemRaiseGeneralProtectionFault0(pIemCpu);
4856 /** @todo Test 16-bit jump in 64-bit mode. */
4857 pCtx->rip = uNewRip;
4858 break;
4859 }
4860
4861 case IEMMODE_32BIT:
4862 {
4863 Assert(uNewRip <= UINT32_MAX);
4864 Assert(pCtx->rip <= UINT32_MAX);
4865 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4866
4867 if (uNewRip > pCtx->cs.u32Limit)
4868 return iemRaiseGeneralProtectionFault0(pIemCpu);
4869 pCtx->rip = uNewRip;
4870 break;
4871 }
4872
4873 case IEMMODE_64BIT:
4874 {
4875 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4876
4877 if (!IEM_IS_CANONICAL(uNewRip))
4878 return iemRaiseGeneralProtectionFault0(pIemCpu);
4879 pCtx->rip = uNewRip;
4880 break;
4881 }
4882
4883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4884 }
4885
4886 pCtx->eflags.Bits.u1RF = 0;
4887 return VINF_SUCCESS;
4888}
4889
4890
4891/**
4892 * Get the address of the top of the stack.
4893 *
4894 * @param pIemCpu The per CPU data.
4895 * @param pCtx The CPU context which SP/ESP/RSP should be
4896 * read.
4897 */
4898DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4899{
4900 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4901 return pCtx->rsp;
4902 if (pCtx->ss.Attr.n.u1DefBig)
4903 return pCtx->esp;
4904 return pCtx->sp;
4905}
4906
4907
4908/**
4909 * Updates the RIP/EIP/IP to point to the next instruction.
4910 *
4911 * This function leaves the EFLAGS.RF flag alone.
4912 *
4913 * @param pIemCpu The per CPU data.
4914 * @param cbInstr The number of bytes to add.
4915 */
4916IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4917{
4918 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4919 switch (pIemCpu->enmCpuMode)
4920 {
4921 case IEMMODE_16BIT:
4922 Assert(pCtx->rip <= UINT16_MAX);
4923 pCtx->eip += cbInstr;
4924 pCtx->eip &= UINT32_C(0xffff);
4925 break;
4926
4927 case IEMMODE_32BIT:
4928 pCtx->eip += cbInstr;
4929 Assert(pCtx->rip <= UINT32_MAX);
4930 break;
4931
4932 case IEMMODE_64BIT:
4933 pCtx->rip += cbInstr;
4934 break;
4935 default: AssertFailed();
4936 }
4937}
4938
4939
4940#if 0
4941/**
4942 * Updates the RIP/EIP/IP to point to the next instruction.
4943 *
4944 * @param pIemCpu The per CPU data.
4945 */
4946IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4947{
4948 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4949}
4950#endif
4951
4952
4953
4954/**
4955 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4956 *
4957 * @param pIemCpu The per CPU data.
4958 * @param cbInstr The number of bytes to add.
4959 */
4960IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4961{
4962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4963
4964 pCtx->eflags.Bits.u1RF = 0;
4965
4966 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4967 switch (pIemCpu->enmCpuMode)
4968 {
4969 /** @todo investigate if EIP or RIP is really incremented. */
4970 case IEMMODE_16BIT:
4971 case IEMMODE_32BIT:
4972 pCtx->eip += cbInstr;
4973 Assert(pCtx->rip <= UINT32_MAX);
4974 break;
4975
4976 case IEMMODE_64BIT:
4977 pCtx->rip += cbInstr;
4978 break;
4979 default: AssertFailed();
4980 }
4981}
4982
4983
4984/**
4985 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4986 *
4987 * @param pIemCpu The per CPU data.
4988 */
4989IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4990{
4991 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4992}
4993
4994
4995/**
4996 * Adds to the stack pointer.
4997 *
4998 * @param pIemCpu The per CPU data.
4999 * @param pCtx The CPU context which SP/ESP/RSP should be
5000 * updated.
5001 * @param cbToAdd The number of bytes to add.
5002 */
5003DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5004{
5005 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5006 pCtx->rsp += cbToAdd;
5007 else if (pCtx->ss.Attr.n.u1DefBig)
5008 pCtx->esp += cbToAdd;
5009 else
5010 pCtx->sp += cbToAdd;
5011}
5012
5013
5014/**
5015 * Subtracts from the stack pointer.
5016 *
5017 * @param pIemCpu The per CPU data.
5018 * @param pCtx The CPU context which SP/ESP/RSP should be
5019 * updated.
5020 * @param cbToSub The number of bytes to subtract.
5021 */
5022DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5023{
5024 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5025 pCtx->rsp -= cbToSub;
5026 else if (pCtx->ss.Attr.n.u1DefBig)
5027 pCtx->esp -= cbToSub;
5028 else
5029 pCtx->sp -= cbToSub;
5030}
5031
5032
5033/**
5034 * Adds to the temporary stack pointer.
5035 *
5036 * @param pIemCpu The per CPU data.
5037 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5038 * @param cbToAdd The number of bytes to add.
5039 * @param pCtx Where to get the current stack mode.
5040 */
5041DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5042{
5043 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5044 pTmpRsp->u += cbToAdd;
5045 else if (pCtx->ss.Attr.n.u1DefBig)
5046 pTmpRsp->DWords.dw0 += cbToAdd;
5047 else
5048 pTmpRsp->Words.w0 += cbToAdd;
5049}
5050
5051
5052/**
5053 * Subtracts from the temporary stack pointer.
5054 *
5055 * @param pIemCpu The per CPU data.
5056 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5057 * @param cbToSub The number of bytes to subtract.
5058 * @param pCtx Where to get the current stack mode.
5059 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5060 * expecting that.
5061 */
5062DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5063{
5064 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5065 pTmpRsp->u -= cbToSub;
5066 else if (pCtx->ss.Attr.n.u1DefBig)
5067 pTmpRsp->DWords.dw0 -= cbToSub;
5068 else
5069 pTmpRsp->Words.w0 -= cbToSub;
5070}
5071
5072
5073/**
5074 * Calculates the effective stack address for a push of the specified size as
5075 * well as the new RSP value (upper bits may be masked).
5076 *
5077 * @returns Effective stack addressf for the push.
5078 * @param pIemCpu The IEM per CPU data.
5079 * @param pCtx Where to get the current stack mode.
5080 * @param cbItem The size of the stack item to pop.
5081 * @param puNewRsp Where to return the new RSP value.
5082 */
5083DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5084{
5085 RTUINT64U uTmpRsp;
5086 RTGCPTR GCPtrTop;
5087 uTmpRsp.u = pCtx->rsp;
5088
5089 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5090 GCPtrTop = uTmpRsp.u -= cbItem;
5091 else if (pCtx->ss.Attr.n.u1DefBig)
5092 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5093 else
5094 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5095 *puNewRsp = uTmpRsp.u;
5096 return GCPtrTop;
5097}
5098
5099
5100/**
5101 * Gets the current stack pointer and calculates the value after a pop of the
5102 * specified size.
5103 *
5104 * @returns Current stack pointer.
5105 * @param pIemCpu The per CPU data.
5106 * @param pCtx Where to get the current stack mode.
5107 * @param cbItem The size of the stack item to pop.
5108 * @param puNewRsp Where to return the new RSP value.
5109 */
5110DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5111{
5112 RTUINT64U uTmpRsp;
5113 RTGCPTR GCPtrTop;
5114 uTmpRsp.u = pCtx->rsp;
5115
5116 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5117 {
5118 GCPtrTop = uTmpRsp.u;
5119 uTmpRsp.u += cbItem;
5120 }
5121 else if (pCtx->ss.Attr.n.u1DefBig)
5122 {
5123 GCPtrTop = uTmpRsp.DWords.dw0;
5124 uTmpRsp.DWords.dw0 += cbItem;
5125 }
5126 else
5127 {
5128 GCPtrTop = uTmpRsp.Words.w0;
5129 uTmpRsp.Words.w0 += cbItem;
5130 }
5131 *puNewRsp = uTmpRsp.u;
5132 return GCPtrTop;
5133}
5134
5135
5136/**
5137 * Calculates the effective stack address for a push of the specified size as
5138 * well as the new temporary RSP value (upper bits may be masked).
5139 *
5140 * @returns Effective stack addressf for the push.
5141 * @param pIemCpu The per CPU data.
5142 * @param pCtx Where to get the current stack mode.
5143 * @param pTmpRsp The temporary stack pointer. This is updated.
5144 * @param cbItem The size of the stack item to pop.
5145 */
5146DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5147{
5148 RTGCPTR GCPtrTop;
5149
5150 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5151 GCPtrTop = pTmpRsp->u -= cbItem;
5152 else if (pCtx->ss.Attr.n.u1DefBig)
5153 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5154 else
5155 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5156 return GCPtrTop;
5157}
5158
5159
5160/**
5161 * Gets the effective stack address for a pop of the specified size and
5162 * calculates and updates the temporary RSP.
5163 *
5164 * @returns Current stack pointer.
5165 * @param pIemCpu The per CPU data.
5166 * @param pCtx Where to get the current stack mode.
5167 * @param pTmpRsp The temporary stack pointer. This is updated.
5168 * @param cbItem The size of the stack item to pop.
5169 */
5170DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5171{
5172 RTGCPTR GCPtrTop;
5173 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5174 {
5175 GCPtrTop = pTmpRsp->u;
5176 pTmpRsp->u += cbItem;
5177 }
5178 else if (pCtx->ss.Attr.n.u1DefBig)
5179 {
5180 GCPtrTop = pTmpRsp->DWords.dw0;
5181 pTmpRsp->DWords.dw0 += cbItem;
5182 }
5183 else
5184 {
5185 GCPtrTop = pTmpRsp->Words.w0;
5186 pTmpRsp->Words.w0 += cbItem;
5187 }
5188 return GCPtrTop;
5189}
5190
5191/** @} */
5192
5193
5194/** @name FPU access and helpers.
5195 *
5196 * @{
5197 */
5198
5199
5200/**
5201 * Hook for preparing to use the host FPU.
5202 *
5203 * This is necessary in ring-0 and raw-mode context.
5204 *
5205 * @param pIemCpu The IEM per CPU data.
5206 */
5207DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5208{
5209#ifdef IN_RING3
5210 NOREF(pIemCpu);
5211#else
5212/** @todo RZ: FIXME */
5213//# error "Implement me"
5214#endif
5215}
5216
5217
5218/**
5219 * Hook for preparing to use the host FPU for SSE
5220 *
5221 * This is necessary in ring-0 and raw-mode context.
5222 *
5223 * @param pIemCpu The IEM per CPU data.
5224 */
5225DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5226{
5227 iemFpuPrepareUsage(pIemCpu);
5228}
5229
5230
5231/**
5232 * Stores a QNaN value into a FPU register.
5233 *
5234 * @param pReg Pointer to the register.
5235 */
5236DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5237{
5238 pReg->au32[0] = UINT32_C(0x00000000);
5239 pReg->au32[1] = UINT32_C(0xc0000000);
5240 pReg->au16[4] = UINT16_C(0xffff);
5241}
5242
5243
5244/**
5245 * Updates the FOP, FPU.CS and FPUIP registers.
5246 *
5247 * @param pIemCpu The IEM per CPU data.
5248 * @param pCtx The CPU context.
5249 * @param pFpuCtx The FPU context.
5250 */
5251DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5252{
5253 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5254 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5255 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5256 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5257 {
5258 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5259 * happens in real mode here based on the fnsave and fnstenv images. */
5260 pFpuCtx->CS = 0;
5261 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5262 }
5263 else
5264 {
5265 pFpuCtx->CS = pCtx->cs.Sel;
5266 pFpuCtx->FPUIP = pCtx->rip;
5267 }
5268}
5269
5270
5271/**
5272 * Updates the x87.DS and FPUDP registers.
5273 *
5274 * @param pIemCpu The IEM per CPU data.
5275 * @param pCtx The CPU context.
5276 * @param pFpuCtx The FPU context.
5277 * @param iEffSeg The effective segment register.
5278 * @param GCPtrEff The effective address relative to @a iEffSeg.
5279 */
5280DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5281{
5282 RTSEL sel;
5283 switch (iEffSeg)
5284 {
5285 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5286 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5287 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5288 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5289 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5290 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5291 default:
5292 AssertMsgFailed(("%d\n", iEffSeg));
5293 sel = pCtx->ds.Sel;
5294 }
5295 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5296 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5297 {
5298 pFpuCtx->DS = 0;
5299 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5300 }
5301 else
5302 {
5303 pFpuCtx->DS = sel;
5304 pFpuCtx->FPUDP = GCPtrEff;
5305 }
5306}
5307
5308
5309/**
5310 * Rotates the stack registers in the push direction.
5311 *
5312 * @param pFpuCtx The FPU context.
5313 * @remarks This is a complete waste of time, but fxsave stores the registers in
5314 * stack order.
5315 */
5316DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5317{
5318 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5319 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5320 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5321 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5322 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5323 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5324 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5325 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5326 pFpuCtx->aRegs[0].r80 = r80Tmp;
5327}
5328
5329
5330/**
5331 * Rotates the stack registers in the pop direction.
5332 *
5333 * @param pFpuCtx The FPU context.
5334 * @remarks This is a complete waste of time, but fxsave stores the registers in
5335 * stack order.
5336 */
5337DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5338{
5339 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5340 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5341 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5342 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5343 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5344 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5345 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5346 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5347 pFpuCtx->aRegs[7].r80 = r80Tmp;
5348}
5349
5350
5351/**
5352 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5353 * exception prevents it.
5354 *
5355 * @param pIemCpu The IEM per CPU data.
5356 * @param pResult The FPU operation result to push.
5357 * @param pFpuCtx The FPU context.
5358 */
5359IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5360{
5361 /* Update FSW and bail if there are pending exceptions afterwards. */
5362 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5363 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5364 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5365 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5366 {
5367 pFpuCtx->FSW = fFsw;
5368 return;
5369 }
5370
5371 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5372 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5373 {
5374 /* All is fine, push the actual value. */
5375 pFpuCtx->FTW |= RT_BIT(iNewTop);
5376 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5377 }
5378 else if (pFpuCtx->FCW & X86_FCW_IM)
5379 {
5380 /* Masked stack overflow, push QNaN. */
5381 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5382 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5383 }
5384 else
5385 {
5386 /* Raise stack overflow, don't push anything. */
5387 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5388 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5389 return;
5390 }
5391
5392 fFsw &= ~X86_FSW_TOP_MASK;
5393 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5394 pFpuCtx->FSW = fFsw;
5395
5396 iemFpuRotateStackPush(pFpuCtx);
5397}
5398
5399
5400/**
5401 * Stores a result in a FPU register and updates the FSW and FTW.
5402 *
5403 * @param pFpuCtx The FPU context.
5404 * @param pResult The result to store.
5405 * @param iStReg Which FPU register to store it in.
5406 */
5407IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5408{
5409 Assert(iStReg < 8);
5410 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5411 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5412 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5413 pFpuCtx->FTW |= RT_BIT(iReg);
5414 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5415}
5416
5417
5418/**
5419 * Only updates the FPU status word (FSW) with the result of the current
5420 * instruction.
5421 *
5422 * @param pFpuCtx The FPU context.
5423 * @param u16FSW The FSW output of the current instruction.
5424 */
5425IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5426{
5427 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5428 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5429}
5430
5431
5432/**
5433 * Pops one item off the FPU stack if no pending exception prevents it.
5434 *
5435 * @param pFpuCtx The FPU context.
5436 */
5437IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5438{
5439 /* Check pending exceptions. */
5440 uint16_t uFSW = pFpuCtx->FSW;
5441 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5442 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5443 return;
5444
5445 /* TOP--. */
5446 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5447 uFSW &= ~X86_FSW_TOP_MASK;
5448 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5449 pFpuCtx->FSW = uFSW;
5450
5451 /* Mark the previous ST0 as empty. */
5452 iOldTop >>= X86_FSW_TOP_SHIFT;
5453 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5454
5455 /* Rotate the registers. */
5456 iemFpuRotateStackPop(pFpuCtx);
5457}
5458
5459
5460/**
5461 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5462 *
5463 * @param pIemCpu The IEM per CPU data.
5464 * @param pResult The FPU operation result to push.
5465 */
5466IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5467{
5468 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5469 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5470 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5471 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5472}
5473
5474
5475/**
5476 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5477 * and sets FPUDP and FPUDS.
5478 *
5479 * @param pIemCpu The IEM per CPU data.
5480 * @param pResult The FPU operation result to push.
5481 * @param iEffSeg The effective segment register.
5482 * @param GCPtrEff The effective address relative to @a iEffSeg.
5483 */
5484IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5485{
5486 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5487 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5488 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5489 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5490 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5491}
5492
5493
5494/**
5495 * Replace ST0 with the first value and push the second onto the FPU stack,
5496 * unless a pending exception prevents it.
5497 *
5498 * @param pIemCpu The IEM per CPU data.
5499 * @param pResult The FPU operation result to store and push.
5500 */
5501IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5502{
5503 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5504 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5505 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5506
5507 /* Update FSW and bail if there are pending exceptions afterwards. */
5508 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5509 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5510 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5511 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5512 {
5513 pFpuCtx->FSW = fFsw;
5514 return;
5515 }
5516
5517 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5518 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5519 {
5520 /* All is fine, push the actual value. */
5521 pFpuCtx->FTW |= RT_BIT(iNewTop);
5522 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5523 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5524 }
5525 else if (pFpuCtx->FCW & X86_FCW_IM)
5526 {
5527 /* Masked stack overflow, push QNaN. */
5528 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5529 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5530 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5531 }
5532 else
5533 {
5534 /* Raise stack overflow, don't push anything. */
5535 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5536 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5537 return;
5538 }
5539
5540 fFsw &= ~X86_FSW_TOP_MASK;
5541 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5542 pFpuCtx->FSW = fFsw;
5543
5544 iemFpuRotateStackPush(pFpuCtx);
5545}
5546
5547
5548/**
5549 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5550 * FOP.
5551 *
5552 * @param pIemCpu The IEM per CPU data.
5553 * @param pResult The result to store.
5554 * @param iStReg Which FPU register to store it in.
5555 */
5556IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5557{
5558 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5559 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5560 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5561 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5562}
5563
5564
5565/**
5566 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5567 * FOP, and then pops the stack.
5568 *
5569 * @param pIemCpu The IEM per CPU data.
5570 * @param pResult The result to store.
5571 * @param iStReg Which FPU register to store it in.
5572 */
5573IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5574{
5575 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5576 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5577 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5578 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5579 iemFpuMaybePopOne(pFpuCtx);
5580}
5581
5582
5583/**
5584 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5585 * FPUDP, and FPUDS.
5586 *
5587 * @param pIemCpu The IEM per CPU data.
5588 * @param pResult The result to store.
5589 * @param iStReg Which FPU register to store it in.
5590 * @param iEffSeg The effective memory operand selector register.
5591 * @param GCPtrEff The effective memory operand offset.
5592 */
5593IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5594 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5595{
5596 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5597 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5598 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5599 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5600 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5601}
5602
5603
5604/**
5605 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5606 * FPUDP, and FPUDS, and then pops the stack.
5607 *
5608 * @param pIemCpu The IEM per CPU data.
5609 * @param pResult The result to store.
5610 * @param iStReg Which FPU register to store it in.
5611 * @param iEffSeg The effective memory operand selector register.
5612 * @param GCPtrEff The effective memory operand offset.
5613 */
5614IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5615 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5616{
5617 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5618 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5619 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5620 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5621 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5622 iemFpuMaybePopOne(pFpuCtx);
5623}
5624
5625
5626/**
5627 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5628 *
5629 * @param pIemCpu The IEM per CPU data.
5630 */
5631IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5632{
5633 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5634 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5635 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5636}
5637
5638
5639/**
5640 * Marks the specified stack register as free (for FFREE).
5641 *
5642 * @param pIemCpu The IEM per CPU data.
5643 * @param iStReg The register to free.
5644 */
5645IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5646{
5647 Assert(iStReg < 8);
5648 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5649 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5650 pFpuCtx->FTW &= ~RT_BIT(iReg);
5651}
5652
5653
5654/**
5655 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5656 *
5657 * @param pIemCpu The IEM per CPU data.
5658 */
5659IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5660{
5661 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5662 uint16_t uFsw = pFpuCtx->FSW;
5663 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5664 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5665 uFsw &= ~X86_FSW_TOP_MASK;
5666 uFsw |= uTop;
5667 pFpuCtx->FSW = uFsw;
5668}
5669
5670
5671/**
5672 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5673 *
5674 * @param pIemCpu The IEM per CPU data.
5675 */
5676IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5677{
5678 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5679 uint16_t uFsw = pFpuCtx->FSW;
5680 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5681 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5682 uFsw &= ~X86_FSW_TOP_MASK;
5683 uFsw |= uTop;
5684 pFpuCtx->FSW = uFsw;
5685}
5686
5687
5688/**
5689 * Updates the FSW, FOP, FPUIP, and FPUCS.
5690 *
5691 * @param pIemCpu The IEM per CPU data.
5692 * @param u16FSW The FSW from the current instruction.
5693 */
5694IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5695{
5696 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5697 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5698 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5699 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5700}
5701
5702
5703/**
5704 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5705 *
5706 * @param pIemCpu The IEM per CPU data.
5707 * @param u16FSW The FSW from the current instruction.
5708 */
5709IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5710{
5711 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5712 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5713 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5714 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5715 iemFpuMaybePopOne(pFpuCtx);
5716}
5717
5718
5719/**
5720 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5721 *
5722 * @param pIemCpu The IEM per CPU data.
5723 * @param u16FSW The FSW from the current instruction.
5724 * @param iEffSeg The effective memory operand selector register.
5725 * @param GCPtrEff The effective memory operand offset.
5726 */
5727IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5728{
5729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5730 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5731 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5732 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5733 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5734}
5735
5736
5737/**
5738 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5739 *
5740 * @param pIemCpu The IEM per CPU data.
5741 * @param u16FSW The FSW from the current instruction.
5742 */
5743IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5744{
5745 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5746 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5747 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5748 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5749 iemFpuMaybePopOne(pFpuCtx);
5750 iemFpuMaybePopOne(pFpuCtx);
5751}
5752
5753
5754/**
5755 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5756 *
5757 * @param pIemCpu The IEM per CPU data.
5758 * @param u16FSW The FSW from the current instruction.
5759 * @param iEffSeg The effective memory operand selector register.
5760 * @param GCPtrEff The effective memory operand offset.
5761 */
5762IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5763{
5764 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5765 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5766 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5767 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5768 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5769 iemFpuMaybePopOne(pFpuCtx);
5770}
5771
5772
5773/**
5774 * Worker routine for raising an FPU stack underflow exception.
5775 *
5776 * @param pIemCpu The IEM per CPU data.
5777 * @param pFpuCtx The FPU context.
5778 * @param iStReg The stack register being accessed.
5779 */
5780IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5781{
5782 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5783 if (pFpuCtx->FCW & X86_FCW_IM)
5784 {
5785 /* Masked underflow. */
5786 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5787 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5788 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5789 if (iStReg != UINT8_MAX)
5790 {
5791 pFpuCtx->FTW |= RT_BIT(iReg);
5792 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5793 }
5794 }
5795 else
5796 {
5797 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5798 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5799 }
5800}
5801
5802
5803/**
5804 * Raises a FPU stack underflow exception.
5805 *
5806 * @param pIemCpu The IEM per CPU data.
5807 * @param iStReg The destination register that should be loaded
5808 * with QNaN if \#IS is not masked. Specify
5809 * UINT8_MAX if none (like for fcom).
5810 */
5811DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5812{
5813 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5814 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5815 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5816 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5817}
5818
5819
5820DECL_NO_INLINE(IEM_STATIC, void)
5821iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5822{
5823 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5824 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5825 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5826 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5827 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5828}
5829
5830
5831DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5832{
5833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5834 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5835 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5836 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5837 iemFpuMaybePopOne(pFpuCtx);
5838}
5839
5840
5841DECL_NO_INLINE(IEM_STATIC, void)
5842iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5843{
5844 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5845 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5846 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5847 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5848 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5849 iemFpuMaybePopOne(pFpuCtx);
5850}
5851
5852
5853DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5854{
5855 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5856 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5857 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5858 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5859 iemFpuMaybePopOne(pFpuCtx);
5860 iemFpuMaybePopOne(pFpuCtx);
5861}
5862
5863
5864DECL_NO_INLINE(IEM_STATIC, void)
5865iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5866{
5867 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5868 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5869 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5870
5871 if (pFpuCtx->FCW & X86_FCW_IM)
5872 {
5873 /* Masked overflow - Push QNaN. */
5874 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5875 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5876 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5877 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5878 pFpuCtx->FTW |= RT_BIT(iNewTop);
5879 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5880 iemFpuRotateStackPush(pFpuCtx);
5881 }
5882 else
5883 {
5884 /* Exception pending - don't change TOP or the register stack. */
5885 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5886 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5887 }
5888}
5889
5890
5891DECL_NO_INLINE(IEM_STATIC, void)
5892iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5893{
5894 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5895 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5896 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5897
5898 if (pFpuCtx->FCW & X86_FCW_IM)
5899 {
5900 /* Masked overflow - Push QNaN. */
5901 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5902 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5903 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5904 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5905 pFpuCtx->FTW |= RT_BIT(iNewTop);
5906 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5907 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5908 iemFpuRotateStackPush(pFpuCtx);
5909 }
5910 else
5911 {
5912 /* Exception pending - don't change TOP or the register stack. */
5913 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5914 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5915 }
5916}
5917
5918
5919/**
5920 * Worker routine for raising an FPU stack overflow exception on a push.
5921 *
5922 * @param pFpuCtx The FPU context.
5923 */
5924IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5925{
5926 if (pFpuCtx->FCW & X86_FCW_IM)
5927 {
5928 /* Masked overflow. */
5929 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5930 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5931 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5932 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5933 pFpuCtx->FTW |= RT_BIT(iNewTop);
5934 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5935 iemFpuRotateStackPush(pFpuCtx);
5936 }
5937 else
5938 {
5939 /* Exception pending - don't change TOP or the register stack. */
5940 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5941 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5942 }
5943}
5944
5945
5946/**
5947 * Raises a FPU stack overflow exception on a push.
5948 *
5949 * @param pIemCpu The IEM per CPU data.
5950 */
5951DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5952{
5953 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5954 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5955 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5956 iemFpuStackPushOverflowOnly(pFpuCtx);
5957}
5958
5959
5960/**
5961 * Raises a FPU stack overflow exception on a push with a memory operand.
5962 *
5963 * @param pIemCpu The IEM per CPU data.
5964 * @param iEffSeg The effective memory operand selector register.
5965 * @param GCPtrEff The effective memory operand offset.
5966 */
5967DECL_NO_INLINE(IEM_STATIC, void)
5968iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5969{
5970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5971 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5972 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5973 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5974 iemFpuStackPushOverflowOnly(pFpuCtx);
5975}
5976
5977
5978IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5979{
5980 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5981 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5982 if (pFpuCtx->FTW & RT_BIT(iReg))
5983 return VINF_SUCCESS;
5984 return VERR_NOT_FOUND;
5985}
5986
5987
5988IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5989{
5990 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5991 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5992 if (pFpuCtx->FTW & RT_BIT(iReg))
5993 {
5994 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5995 return VINF_SUCCESS;
5996 }
5997 return VERR_NOT_FOUND;
5998}
5999
6000
6001IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6002 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6003{
6004 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6005 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6006 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6007 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6008 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6009 {
6010 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6011 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6012 return VINF_SUCCESS;
6013 }
6014 return VERR_NOT_FOUND;
6015}
6016
6017
6018IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6019{
6020 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6021 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6022 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6023 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6024 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6025 {
6026 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6027 return VINF_SUCCESS;
6028 }
6029 return VERR_NOT_FOUND;
6030}
6031
6032
6033/**
6034 * Updates the FPU exception status after FCW is changed.
6035 *
6036 * @param pFpuCtx The FPU context.
6037 */
6038IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6039{
6040 uint16_t u16Fsw = pFpuCtx->FSW;
6041 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6042 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6043 else
6044 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6045 pFpuCtx->FSW = u16Fsw;
6046}
6047
6048
6049/**
6050 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6051 *
6052 * @returns The full FTW.
6053 * @param pFpuCtx The FPU context.
6054 */
6055IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6056{
6057 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6058 uint16_t u16Ftw = 0;
6059 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6060 for (unsigned iSt = 0; iSt < 8; iSt++)
6061 {
6062 unsigned const iReg = (iSt + iTop) & 7;
6063 if (!(u8Ftw & RT_BIT(iReg)))
6064 u16Ftw |= 3 << (iReg * 2); /* empty */
6065 else
6066 {
6067 uint16_t uTag;
6068 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6069 if (pr80Reg->s.uExponent == 0x7fff)
6070 uTag = 2; /* Exponent is all 1's => Special. */
6071 else if (pr80Reg->s.uExponent == 0x0000)
6072 {
6073 if (pr80Reg->s.u64Mantissa == 0x0000)
6074 uTag = 1; /* All bits are zero => Zero. */
6075 else
6076 uTag = 2; /* Must be special. */
6077 }
6078 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6079 uTag = 0; /* Valid. */
6080 else
6081 uTag = 2; /* Must be special. */
6082
6083 u16Ftw |= uTag << (iReg * 2); /* empty */
6084 }
6085 }
6086
6087 return u16Ftw;
6088}
6089
6090
6091/**
6092 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6093 *
6094 * @returns The compressed FTW.
6095 * @param u16FullFtw The full FTW to convert.
6096 */
6097IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6098{
6099 uint8_t u8Ftw = 0;
6100 for (unsigned i = 0; i < 8; i++)
6101 {
6102 if ((u16FullFtw & 3) != 3 /*empty*/)
6103 u8Ftw |= RT_BIT(i);
6104 u16FullFtw >>= 2;
6105 }
6106
6107 return u8Ftw;
6108}
6109
6110/** @} */
6111
6112
6113/** @name Memory access.
6114 *
6115 * @{
6116 */
6117
6118
6119/**
6120 * Updates the IEMCPU::cbWritten counter if applicable.
6121 *
6122 * @param pIemCpu The IEM per CPU data.
6123 * @param fAccess The access being accounted for.
6124 * @param cbMem The access size.
6125 */
6126DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6127{
6128 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6129 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6130 pIemCpu->cbWritten += (uint32_t)cbMem;
6131}
6132
6133
6134/**
6135 * Checks if the given segment can be written to, raise the appropriate
6136 * exception if not.
6137 *
6138 * @returns VBox strict status code.
6139 *
6140 * @param pIemCpu The IEM per CPU data.
6141 * @param pHid Pointer to the hidden register.
6142 * @param iSegReg The register number.
6143 * @param pu64BaseAddr Where to return the base address to use for the
6144 * segment. (In 64-bit code it may differ from the
6145 * base in the hidden segment.)
6146 */
6147IEM_STATIC VBOXSTRICTRC
6148iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6149{
6150 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6151 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6152 else
6153 {
6154 if (!pHid->Attr.n.u1Present)
6155 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6156
6157 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6158 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6159 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6160 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6161 *pu64BaseAddr = pHid->u64Base;
6162 }
6163 return VINF_SUCCESS;
6164}
6165
6166
6167/**
6168 * Checks if the given segment can be read from, raise the appropriate
6169 * exception if not.
6170 *
6171 * @returns VBox strict status code.
6172 *
6173 * @param pIemCpu The IEM per CPU data.
6174 * @param pHid Pointer to the hidden register.
6175 * @param iSegReg The register number.
6176 * @param pu64BaseAddr Where to return the base address to use for the
6177 * segment. (In 64-bit code it may differ from the
6178 * base in the hidden segment.)
6179 */
6180IEM_STATIC VBOXSTRICTRC
6181iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6182{
6183 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6184 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6185 else
6186 {
6187 if (!pHid->Attr.n.u1Present)
6188 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6189
6190 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6191 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6192 *pu64BaseAddr = pHid->u64Base;
6193 }
6194 return VINF_SUCCESS;
6195}
6196
6197
6198/**
6199 * Applies the segment limit, base and attributes.
6200 *
6201 * This may raise a \#GP or \#SS.
6202 *
6203 * @returns VBox strict status code.
6204 *
6205 * @param pIemCpu The IEM per CPU data.
6206 * @param fAccess The kind of access which is being performed.
6207 * @param iSegReg The index of the segment register to apply.
6208 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6209 * TSS, ++).
6210 * @param cbMem The access size.
6211 * @param pGCPtrMem Pointer to the guest memory address to apply
6212 * segmentation to. Input and output parameter.
6213 */
6214IEM_STATIC VBOXSTRICTRC
6215iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6216{
6217 if (iSegReg == UINT8_MAX)
6218 return VINF_SUCCESS;
6219
6220 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6221 switch (pIemCpu->enmCpuMode)
6222 {
6223 case IEMMODE_16BIT:
6224 case IEMMODE_32BIT:
6225 {
6226 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6227 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6228
6229 Assert(pSel->Attr.n.u1Present);
6230 Assert(pSel->Attr.n.u1DescType);
6231 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6232 {
6233 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6234 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6235 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6236
6237 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6238 {
6239 /** @todo CPL check. */
6240 }
6241
6242 /*
6243 * There are two kinds of data selectors, normal and expand down.
6244 */
6245 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6246 {
6247 if ( GCPtrFirst32 > pSel->u32Limit
6248 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6249 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6250 }
6251 else
6252 {
6253 /*
6254 * The upper boundary is defined by the B bit, not the G bit!
6255 */
6256 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6257 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6258 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6259 }
6260 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6261 }
6262 else
6263 {
6264
6265 /*
6266 * Code selector and usually be used to read thru, writing is
6267 * only permitted in real and V8086 mode.
6268 */
6269 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6270 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6271 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6272 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6273 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6274
6275 if ( GCPtrFirst32 > pSel->u32Limit
6276 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6277 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6278
6279 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6280 {
6281 /** @todo CPL check. */
6282 }
6283
6284 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6285 }
6286 return VINF_SUCCESS;
6287 }
6288
6289 case IEMMODE_64BIT:
6290 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6291 *pGCPtrMem += pSel->u64Base;
6292 return VINF_SUCCESS;
6293
6294 default:
6295 AssertFailedReturn(VERR_IEM_IPE_7);
6296 }
6297}
6298
6299
6300/**
6301 * Translates a virtual address to a physical physical address and checks if we
6302 * can access the page as specified.
6303 *
6304 * @param pIemCpu The IEM per CPU data.
6305 * @param GCPtrMem The virtual address.
6306 * @param fAccess The intended access.
6307 * @param pGCPhysMem Where to return the physical address.
6308 */
6309IEM_STATIC VBOXSTRICTRC
6310iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6311{
6312 /** @todo Need a different PGM interface here. We're currently using
6313 * generic / REM interfaces. this won't cut it for R0 & RC. */
6314 RTGCPHYS GCPhys;
6315 uint64_t fFlags;
6316 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6317 if (RT_FAILURE(rc))
6318 {
6319 /** @todo Check unassigned memory in unpaged mode. */
6320 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6321 *pGCPhysMem = NIL_RTGCPHYS;
6322 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6323 }
6324
6325 /* If the page is writable and does not have the no-exec bit set, all
6326 access is allowed. Otherwise we'll have to check more carefully... */
6327 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6328 {
6329 /* Write to read only memory? */
6330 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6331 && !(fFlags & X86_PTE_RW)
6332 && ( pIemCpu->uCpl != 0
6333 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6334 {
6335 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6336 *pGCPhysMem = NIL_RTGCPHYS;
6337 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6338 }
6339
6340 /* Kernel memory accessed by userland? */
6341 if ( !(fFlags & X86_PTE_US)
6342 && pIemCpu->uCpl == 3
6343 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6344 {
6345 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6346 *pGCPhysMem = NIL_RTGCPHYS;
6347 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6348 }
6349
6350 /* Executing non-executable memory? */
6351 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6352 && (fFlags & X86_PTE_PAE_NX)
6353 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6354 {
6355 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6356 *pGCPhysMem = NIL_RTGCPHYS;
6357 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6358 VERR_ACCESS_DENIED);
6359 }
6360 }
6361
6362 /*
6363 * Set the dirty / access flags.
6364 * ASSUMES this is set when the address is translated rather than on committ...
6365 */
6366 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6367 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6368 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6369 {
6370 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6371 AssertRC(rc2);
6372 }
6373
6374 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6375 *pGCPhysMem = GCPhys;
6376 return VINF_SUCCESS;
6377}
6378
6379
6380
6381/**
6382 * Maps a physical page.
6383 *
6384 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6385 * @param pIemCpu The IEM per CPU data.
6386 * @param GCPhysMem The physical address.
6387 * @param fAccess The intended access.
6388 * @param ppvMem Where to return the mapping address.
6389 * @param pLock The PGM lock.
6390 */
6391IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6392{
6393#ifdef IEM_VERIFICATION_MODE_FULL
6394 /* Force the alternative path so we can ignore writes. */
6395 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6396 {
6397 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6398 {
6399 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6400 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6401 if (RT_FAILURE(rc2))
6402 pIemCpu->fProblematicMemory = true;
6403 }
6404 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6405 }
6406#endif
6407#ifdef IEM_LOG_MEMORY_WRITES
6408 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6409 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6410#endif
6411#ifdef IEM_VERIFICATION_MODE_MINIMAL
6412 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6413#endif
6414
6415 /** @todo This API may require some improving later. A private deal with PGM
6416 * regarding locking and unlocking needs to be struct. A couple of TLBs
6417 * living in PGM, but with publicly accessible inlined access methods
6418 * could perhaps be an even better solution. */
6419 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6420 GCPhysMem,
6421 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6422 pIemCpu->fBypassHandlers,
6423 ppvMem,
6424 pLock);
6425 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6426 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6427
6428#ifdef IEM_VERIFICATION_MODE_FULL
6429 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6430 pIemCpu->fProblematicMemory = true;
6431#endif
6432 return rc;
6433}
6434
6435
6436/**
6437 * Unmap a page previously mapped by iemMemPageMap.
6438 *
6439 * @param pIemCpu The IEM per CPU data.
6440 * @param GCPhysMem The physical address.
6441 * @param fAccess The intended access.
6442 * @param pvMem What iemMemPageMap returned.
6443 * @param pLock The PGM lock.
6444 */
6445DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6446{
6447 NOREF(pIemCpu);
6448 NOREF(GCPhysMem);
6449 NOREF(fAccess);
6450 NOREF(pvMem);
6451 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6452}
6453
6454
6455/**
6456 * Looks up a memory mapping entry.
6457 *
6458 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6459 * @param pIemCpu The IEM per CPU data.
6460 * @param pvMem The memory address.
6461 * @param fAccess The access to.
6462 */
6463DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6464{
6465 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6466 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6467 if ( pIemCpu->aMemMappings[0].pv == pvMem
6468 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6469 return 0;
6470 if ( pIemCpu->aMemMappings[1].pv == pvMem
6471 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6472 return 1;
6473 if ( pIemCpu->aMemMappings[2].pv == pvMem
6474 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6475 return 2;
6476 return VERR_NOT_FOUND;
6477}
6478
6479
6480/**
6481 * Finds a free memmap entry when using iNextMapping doesn't work.
6482 *
6483 * @returns Memory mapping index, 1024 on failure.
6484 * @param pIemCpu The IEM per CPU data.
6485 */
6486IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6487{
6488 /*
6489 * The easy case.
6490 */
6491 if (pIemCpu->cActiveMappings == 0)
6492 {
6493 pIemCpu->iNextMapping = 1;
6494 return 0;
6495 }
6496
6497 /* There should be enough mappings for all instructions. */
6498 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6499
6500 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6501 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6502 return i;
6503
6504 AssertFailedReturn(1024);
6505}
6506
6507
6508/**
6509 * Commits a bounce buffer that needs writing back and unmaps it.
6510 *
6511 * @returns Strict VBox status code.
6512 * @param pIemCpu The IEM per CPU data.
6513 * @param iMemMap The index of the buffer to commit.
6514 */
6515IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6516{
6517 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6518 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6519
6520 /*
6521 * Do the writing.
6522 */
6523#ifndef IEM_VERIFICATION_MODE_MINIMAL
6524 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6525 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6526 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6527 {
6528 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6529 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6530 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6531 if (!pIemCpu->fBypassHandlers)
6532 {
6533 /*
6534 * Carefully and efficiently dealing with access handler return
6535 * codes make this a little bloated.
6536 */
6537 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6538 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6539 pbBuf,
6540 cbFirst,
6541 PGMACCESSORIGIN_IEM);
6542 if (rcStrict == VINF_SUCCESS)
6543 {
6544 if (cbSecond)
6545 {
6546 rcStrict = PGMPhysWrite(pVM,
6547 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6548 pbBuf + cbFirst,
6549 cbSecond,
6550 PGMACCESSORIGIN_IEM);
6551 if (rcStrict == VINF_SUCCESS)
6552 { /* nothing */ }
6553 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6554 {
6555 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6556 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6557 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6558 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6559 }
6560 else
6561 {
6562 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6563 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6564 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6565 return rcStrict;
6566 }
6567 }
6568 }
6569 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6570 {
6571 if (!cbSecond)
6572 {
6573 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6574 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6575 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6576 }
6577 else
6578 {
6579 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6580 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6581 pbBuf + cbFirst,
6582 cbSecond,
6583 PGMACCESSORIGIN_IEM);
6584 if (rcStrict2 == VINF_SUCCESS)
6585 {
6586 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6587 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6588 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6589 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6590 }
6591 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6592 {
6593 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6594 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6595 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6596 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6597 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6598 }
6599 else
6600 {
6601 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6602 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6603 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6604 return rcStrict2;
6605 }
6606 }
6607 }
6608 else
6609 {
6610 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6611 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6612 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6613 return rcStrict;
6614 }
6615 }
6616 else
6617 {
6618 /*
6619 * No access handlers, much simpler.
6620 */
6621 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6622 if (RT_SUCCESS(rc))
6623 {
6624 if (cbSecond)
6625 {
6626 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6627 if (RT_SUCCESS(rc))
6628 { /* likely */ }
6629 else
6630 {
6631 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6632 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6633 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6634 return rc;
6635 }
6636 }
6637 }
6638 else
6639 {
6640 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6641 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6642 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6643 return rc;
6644 }
6645 }
6646 }
6647#endif
6648
6649#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6650 /*
6651 * Record the write(s).
6652 */
6653 if (!pIemCpu->fNoRem)
6654 {
6655 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6656 if (pEvtRec)
6657 {
6658 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6659 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6660 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6661 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6662 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6663 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6664 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6665 }
6666 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6667 {
6668 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6669 if (pEvtRec)
6670 {
6671 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6672 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6673 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6674 memcpy(pEvtRec->u.RamWrite.ab,
6675 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6676 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6677 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6678 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6679 }
6680 }
6681 }
6682#endif
6683#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6684 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6685 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6686 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6687 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6688 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6689 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6690
6691 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6692 g_cbIemWrote = cbWrote;
6693 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6694#endif
6695
6696 /*
6697 * Free the mapping entry.
6698 */
6699 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6700 Assert(pIemCpu->cActiveMappings != 0);
6701 pIemCpu->cActiveMappings--;
6702 return VINF_SUCCESS;
6703}
6704
6705
6706/**
6707 * iemMemMap worker that deals with a request crossing pages.
6708 */
6709IEM_STATIC VBOXSTRICTRC
6710iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6711{
6712 /*
6713 * Do the address translations.
6714 */
6715 RTGCPHYS GCPhysFirst;
6716 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6717 if (rcStrict != VINF_SUCCESS)
6718 return rcStrict;
6719
6720/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6721 * last byte. */
6722 RTGCPHYS GCPhysSecond;
6723 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6724 if (rcStrict != VINF_SUCCESS)
6725 return rcStrict;
6726 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6727
6728 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6729#ifdef IEM_VERIFICATION_MODE_FULL
6730 /*
6731 * Detect problematic memory when verifying so we can select
6732 * the right execution engine. (TLB: Redo this.)
6733 */
6734 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6735 {
6736 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6737 if (RT_SUCCESS(rc2))
6738 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6739 if (RT_FAILURE(rc2))
6740 pIemCpu->fProblematicMemory = true;
6741 }
6742#endif
6743
6744
6745 /*
6746 * Read in the current memory content if it's a read, execute or partial
6747 * write access.
6748 */
6749 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6750 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6751 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6752
6753 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6754 {
6755 if (!pIemCpu->fBypassHandlers)
6756 {
6757 /*
6758 * Must carefully deal with access handler status codes here,
6759 * makes the code a bit bloated.
6760 */
6761 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6762 if (rcStrict == VINF_SUCCESS)
6763 {
6764 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6765 if (rcStrict == VINF_SUCCESS)
6766 { /*likely */ }
6767 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6768 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6769 else
6770 {
6771 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6772 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6773 return rcStrict;
6774 }
6775 }
6776 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6777 {
6778 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6779 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6780 {
6781 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6782 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6783 }
6784 else
6785 {
6786 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6787 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6788 return rcStrict2;
6789 }
6790 }
6791 else
6792 {
6793 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6794 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6795 return rcStrict;
6796 }
6797 }
6798 else
6799 {
6800 /*
6801 * No informational status codes here, much more straight forward.
6802 */
6803 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6804 if (RT_SUCCESS(rc))
6805 {
6806 Assert(rc == VINF_SUCCESS);
6807 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6808 if (RT_SUCCESS(rc))
6809 Assert(rc == VINF_SUCCESS);
6810 else
6811 {
6812 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6813 return rc;
6814 }
6815 }
6816 else
6817 {
6818 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6819 return rc;
6820 }
6821 }
6822
6823#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6824 if ( !pIemCpu->fNoRem
6825 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6826 {
6827 /*
6828 * Record the reads.
6829 */
6830 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6831 if (pEvtRec)
6832 {
6833 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6834 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6835 pEvtRec->u.RamRead.cb = cbFirstPage;
6836 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6837 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6838 }
6839 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6840 if (pEvtRec)
6841 {
6842 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6843 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6844 pEvtRec->u.RamRead.cb = cbSecondPage;
6845 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6846 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6847 }
6848 }
6849#endif
6850 }
6851#ifdef VBOX_STRICT
6852 else
6853 memset(pbBuf, 0xcc, cbMem);
6854 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6855 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6856#endif
6857
6858 /*
6859 * Commit the bounce buffer entry.
6860 */
6861 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6862 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6863 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6864 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6865 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6866 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6867 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6868 pIemCpu->iNextMapping = iMemMap + 1;
6869 pIemCpu->cActiveMappings++;
6870
6871 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6872 *ppvMem = pbBuf;
6873 return VINF_SUCCESS;
6874}
6875
6876
6877/**
6878 * iemMemMap woker that deals with iemMemPageMap failures.
6879 */
6880IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6881 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6882{
6883 /*
6884 * Filter out conditions we can handle and the ones which shouldn't happen.
6885 */
6886 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6887 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6888 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6889 {
6890 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6891 return rcMap;
6892 }
6893 pIemCpu->cPotentialExits++;
6894
6895 /*
6896 * Read in the current memory content if it's a read, execute or partial
6897 * write access.
6898 */
6899 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6900 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6901 {
6902 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6903 memset(pbBuf, 0xff, cbMem);
6904 else
6905 {
6906 int rc;
6907 if (!pIemCpu->fBypassHandlers)
6908 {
6909 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6910 if (rcStrict == VINF_SUCCESS)
6911 { /* nothing */ }
6912 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6913 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6914 else
6915 {
6916 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6917 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6918 return rcStrict;
6919 }
6920 }
6921 else
6922 {
6923 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6924 if (RT_SUCCESS(rc))
6925 { /* likely */ }
6926 else
6927 {
6928 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6929 GCPhysFirst, rc));
6930 return rc;
6931 }
6932 }
6933 }
6934
6935#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6936 if ( !pIemCpu->fNoRem
6937 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6938 {
6939 /*
6940 * Record the read.
6941 */
6942 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6943 if (pEvtRec)
6944 {
6945 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6946 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6947 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6948 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6949 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6950 }
6951 }
6952#endif
6953 }
6954#ifdef VBOX_STRICT
6955 else
6956 memset(pbBuf, 0xcc, cbMem);
6957#endif
6958#ifdef VBOX_STRICT
6959 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6960 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6961#endif
6962
6963 /*
6964 * Commit the bounce buffer entry.
6965 */
6966 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6967 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6968 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6969 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6970 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6971 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6972 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6973 pIemCpu->iNextMapping = iMemMap + 1;
6974 pIemCpu->cActiveMappings++;
6975
6976 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6977 *ppvMem = pbBuf;
6978 return VINF_SUCCESS;
6979}
6980
6981
6982
6983/**
6984 * Maps the specified guest memory for the given kind of access.
6985 *
6986 * This may be using bounce buffering of the memory if it's crossing a page
6987 * boundary or if there is an access handler installed for any of it. Because
6988 * of lock prefix guarantees, we're in for some extra clutter when this
6989 * happens.
6990 *
6991 * This may raise a \#GP, \#SS, \#PF or \#AC.
6992 *
6993 * @returns VBox strict status code.
6994 *
6995 * @param pIemCpu The IEM per CPU data.
6996 * @param ppvMem Where to return the pointer to the mapped
6997 * memory.
6998 * @param cbMem The number of bytes to map. This is usually 1,
6999 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7000 * string operations it can be up to a page.
7001 * @param iSegReg The index of the segment register to use for
7002 * this access. The base and limits are checked.
7003 * Use UINT8_MAX to indicate that no segmentation
7004 * is required (for IDT, GDT and LDT accesses).
7005 * @param GCPtrMem The address of the guest memory.
7006 * @param fAccess How the memory is being accessed. The
7007 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7008 * how to map the memory, while the
7009 * IEM_ACCESS_WHAT_XXX bit is used when raising
7010 * exceptions.
7011 */
7012IEM_STATIC VBOXSTRICTRC
7013iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7014{
7015 /*
7016 * Check the input and figure out which mapping entry to use.
7017 */
7018 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7019 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7020 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7021
7022 unsigned iMemMap = pIemCpu->iNextMapping;
7023 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7024 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7025 {
7026 iMemMap = iemMemMapFindFree(pIemCpu);
7027 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7028 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7029 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7030 pIemCpu->aMemMappings[2].fAccess),
7031 VERR_IEM_IPE_9);
7032 }
7033
7034 /*
7035 * Map the memory, checking that we can actually access it. If something
7036 * slightly complicated happens, fall back on bounce buffering.
7037 */
7038 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7039 if (rcStrict != VINF_SUCCESS)
7040 return rcStrict;
7041
7042 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7043 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7044
7045 RTGCPHYS GCPhysFirst;
7046 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7047 if (rcStrict != VINF_SUCCESS)
7048 return rcStrict;
7049
7050 void *pvMem;
7051 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7052 if (rcStrict != VINF_SUCCESS)
7053 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7054
7055 /*
7056 * Fill in the mapping table entry.
7057 */
7058 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7059 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7060 pIemCpu->iNextMapping = iMemMap + 1;
7061 pIemCpu->cActiveMappings++;
7062
7063 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7064 *ppvMem = pvMem;
7065 return VINF_SUCCESS;
7066}
7067
7068
7069/**
7070 * Commits the guest memory if bounce buffered and unmaps it.
7071 *
7072 * @returns Strict VBox status code.
7073 * @param pIemCpu The IEM per CPU data.
7074 * @param pvMem The mapping.
7075 * @param fAccess The kind of access.
7076 */
7077IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7078{
7079 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7080 AssertReturn(iMemMap >= 0, iMemMap);
7081
7082 /* If it's bounce buffered, we may need to write back the buffer. */
7083 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7084 {
7085 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7086 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7087 }
7088 /* Otherwise unlock it. */
7089 else
7090 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7091
7092 /* Free the entry. */
7093 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7094 Assert(pIemCpu->cActiveMappings != 0);
7095 pIemCpu->cActiveMappings--;
7096 return VINF_SUCCESS;
7097}
7098
7099
7100/**
7101 * Rollbacks mappings, releasing page locks and such.
7102 *
7103 * The caller shall only call this after checking cActiveMappings.
7104 *
7105 * @returns Strict VBox status code to pass up.
7106 * @param pIemCpu The IEM per CPU data.
7107 */
7108IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7109{
7110 Assert(pIemCpu->cActiveMappings > 0);
7111
7112 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7113 while (iMemMap-- > 0)
7114 {
7115 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7116 if (fAccess != IEM_ACCESS_INVALID)
7117 {
7118 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7119 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7120 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7121 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7122 Assert(pIemCpu->cActiveMappings > 0);
7123 pIemCpu->cActiveMappings--;
7124 }
7125 }
7126}
7127
7128
7129/**
7130 * Fetches a data byte.
7131 *
7132 * @returns Strict VBox status code.
7133 * @param pIemCpu The IEM per CPU data.
7134 * @param pu8Dst Where to return the byte.
7135 * @param iSegReg The index of the segment register to use for
7136 * this access. The base and limits are checked.
7137 * @param GCPtrMem The address of the guest memory.
7138 */
7139IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7140{
7141 /* The lazy approach for now... */
7142 uint8_t const *pu8Src;
7143 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7144 if (rc == VINF_SUCCESS)
7145 {
7146 *pu8Dst = *pu8Src;
7147 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7148 }
7149 return rc;
7150}
7151
7152
7153/**
7154 * Fetches a data word.
7155 *
7156 * @returns Strict VBox status code.
7157 * @param pIemCpu The IEM per CPU data.
7158 * @param pu16Dst Where to return the word.
7159 * @param iSegReg The index of the segment register to use for
7160 * this access. The base and limits are checked.
7161 * @param GCPtrMem The address of the guest memory.
7162 */
7163IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7164{
7165 /* The lazy approach for now... */
7166 uint16_t const *pu16Src;
7167 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7168 if (rc == VINF_SUCCESS)
7169 {
7170 *pu16Dst = *pu16Src;
7171 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7172 }
7173 return rc;
7174}
7175
7176
7177/**
7178 * Fetches a data dword.
7179 *
7180 * @returns Strict VBox status code.
7181 * @param pIemCpu The IEM per CPU data.
7182 * @param pu32Dst Where to return the dword.
7183 * @param iSegReg The index of the segment register to use for
7184 * this access. The base and limits are checked.
7185 * @param GCPtrMem The address of the guest memory.
7186 */
7187IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7188{
7189 /* The lazy approach for now... */
7190 uint32_t const *pu32Src;
7191 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7192 if (rc == VINF_SUCCESS)
7193 {
7194 *pu32Dst = *pu32Src;
7195 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7196 }
7197 return rc;
7198}
7199
7200
7201#ifdef SOME_UNUSED_FUNCTION
7202/**
7203 * Fetches a data dword and sign extends it to a qword.
7204 *
7205 * @returns Strict VBox status code.
7206 * @param pIemCpu The IEM per CPU data.
7207 * @param pu64Dst Where to return the sign extended value.
7208 * @param iSegReg The index of the segment register to use for
7209 * this access. The base and limits are checked.
7210 * @param GCPtrMem The address of the guest memory.
7211 */
7212IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7213{
7214 /* The lazy approach for now... */
7215 int32_t const *pi32Src;
7216 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7217 if (rc == VINF_SUCCESS)
7218 {
7219 *pu64Dst = *pi32Src;
7220 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7221 }
7222#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7223 else
7224 *pu64Dst = 0;
7225#endif
7226 return rc;
7227}
7228#endif
7229
7230
7231/**
7232 * Fetches a data qword.
7233 *
7234 * @returns Strict VBox status code.
7235 * @param pIemCpu The IEM per CPU data.
7236 * @param pu64Dst Where to return the qword.
7237 * @param iSegReg The index of the segment register to use for
7238 * this access. The base and limits are checked.
7239 * @param GCPtrMem The address of the guest memory.
7240 */
7241IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7242{
7243 /* The lazy approach for now... */
7244 uint64_t const *pu64Src;
7245 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7246 if (rc == VINF_SUCCESS)
7247 {
7248 *pu64Dst = *pu64Src;
7249 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7250 }
7251 return rc;
7252}
7253
7254
7255/**
7256 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7257 *
7258 * @returns Strict VBox status code.
7259 * @param pIemCpu The IEM per CPU data.
7260 * @param pu64Dst Where to return the qword.
7261 * @param iSegReg The index of the segment register to use for
7262 * this access. The base and limits are checked.
7263 * @param GCPtrMem The address of the guest memory.
7264 */
7265IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7266{
7267 /* The lazy approach for now... */
7268 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7269 if (RT_UNLIKELY(GCPtrMem & 15))
7270 return iemRaiseGeneralProtectionFault0(pIemCpu);
7271
7272 uint64_t const *pu64Src;
7273 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7274 if (rc == VINF_SUCCESS)
7275 {
7276 *pu64Dst = *pu64Src;
7277 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7278 }
7279 return rc;
7280}
7281
7282
7283/**
7284 * Fetches a data tword.
7285 *
7286 * @returns Strict VBox status code.
7287 * @param pIemCpu The IEM per CPU data.
7288 * @param pr80Dst Where to return the tword.
7289 * @param iSegReg The index of the segment register to use for
7290 * this access. The base and limits are checked.
7291 * @param GCPtrMem The address of the guest memory.
7292 */
7293IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7294{
7295 /* The lazy approach for now... */
7296 PCRTFLOAT80U pr80Src;
7297 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7298 if (rc == VINF_SUCCESS)
7299 {
7300 *pr80Dst = *pr80Src;
7301 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7302 }
7303 return rc;
7304}
7305
7306
7307/**
7308 * Fetches a data dqword (double qword), generally SSE related.
7309 *
7310 * @returns Strict VBox status code.
7311 * @param pIemCpu The IEM per CPU data.
7312 * @param pu128Dst Where to return the qword.
7313 * @param iSegReg The index of the segment register to use for
7314 * this access. The base and limits are checked.
7315 * @param GCPtrMem The address of the guest memory.
7316 */
7317IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7318{
7319 /* The lazy approach for now... */
7320 uint128_t const *pu128Src;
7321 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7322 if (rc == VINF_SUCCESS)
7323 {
7324 *pu128Dst = *pu128Src;
7325 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7326 }
7327 return rc;
7328}
7329
7330
7331/**
7332 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7333 * related.
7334 *
7335 * Raises \#GP(0) if not aligned.
7336 *
7337 * @returns Strict VBox status code.
7338 * @param pIemCpu The IEM per CPU data.
7339 * @param pu128Dst Where to return the qword.
7340 * @param iSegReg The index of the segment register to use for
7341 * this access. The base and limits are checked.
7342 * @param GCPtrMem The address of the guest memory.
7343 */
7344IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7345{
7346 /* The lazy approach for now... */
7347 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7348 if ( (GCPtrMem & 15)
7349 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7350 return iemRaiseGeneralProtectionFault0(pIemCpu);
7351
7352 uint128_t const *pu128Src;
7353 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7354 if (rc == VINF_SUCCESS)
7355 {
7356 *pu128Dst = *pu128Src;
7357 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7358 }
7359 return rc;
7360}
7361
7362
7363
7364
7365/**
7366 * Fetches a descriptor register (lgdt, lidt).
7367 *
7368 * @returns Strict VBox status code.
7369 * @param pIemCpu The IEM per CPU data.
7370 * @param pcbLimit Where to return the limit.
7371 * @param pGCPtrBase Where to return the base.
7372 * @param iSegReg The index of the segment register to use for
7373 * this access. The base and limits are checked.
7374 * @param GCPtrMem The address of the guest memory.
7375 * @param enmOpSize The effective operand size.
7376 */
7377IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7378 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7379{
7380 uint8_t const *pu8Src;
7381 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7382 (void **)&pu8Src,
7383 enmOpSize == IEMMODE_64BIT
7384 ? 2 + 8
7385 : enmOpSize == IEMMODE_32BIT
7386 ? 2 + 4
7387 : 2 + 3,
7388 iSegReg,
7389 GCPtrMem,
7390 IEM_ACCESS_DATA_R);
7391 if (rcStrict == VINF_SUCCESS)
7392 {
7393 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7394 switch (enmOpSize)
7395 {
7396 case IEMMODE_16BIT:
7397 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7398 break;
7399 case IEMMODE_32BIT:
7400 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7401 break;
7402 case IEMMODE_64BIT:
7403 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7404 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7405 break;
7406
7407 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7408 }
7409 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7410 }
7411 return rcStrict;
7412}
7413
7414
7415
7416/**
7417 * Stores a data byte.
7418 *
7419 * @returns Strict VBox status code.
7420 * @param pIemCpu The IEM per CPU data.
7421 * @param iSegReg The index of the segment register to use for
7422 * this access. The base and limits are checked.
7423 * @param GCPtrMem The address of the guest memory.
7424 * @param u8Value The value to store.
7425 */
7426IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7427{
7428 /* The lazy approach for now... */
7429 uint8_t *pu8Dst;
7430 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7431 if (rc == VINF_SUCCESS)
7432 {
7433 *pu8Dst = u8Value;
7434 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7435 }
7436 return rc;
7437}
7438
7439
7440/**
7441 * Stores a data word.
7442 *
7443 * @returns Strict VBox status code.
7444 * @param pIemCpu The IEM per CPU data.
7445 * @param iSegReg The index of the segment register to use for
7446 * this access. The base and limits are checked.
7447 * @param GCPtrMem The address of the guest memory.
7448 * @param u16Value The value to store.
7449 */
7450IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7451{
7452 /* The lazy approach for now... */
7453 uint16_t *pu16Dst;
7454 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7455 if (rc == VINF_SUCCESS)
7456 {
7457 *pu16Dst = u16Value;
7458 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7459 }
7460 return rc;
7461}
7462
7463
7464/**
7465 * Stores a data dword.
7466 *
7467 * @returns Strict VBox status code.
7468 * @param pIemCpu The IEM per CPU data.
7469 * @param iSegReg The index of the segment register to use for
7470 * this access. The base and limits are checked.
7471 * @param GCPtrMem The address of the guest memory.
7472 * @param u32Value The value to store.
7473 */
7474IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7475{
7476 /* The lazy approach for now... */
7477 uint32_t *pu32Dst;
7478 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7479 if (rc == VINF_SUCCESS)
7480 {
7481 *pu32Dst = u32Value;
7482 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7483 }
7484 return rc;
7485}
7486
7487
7488/**
7489 * Stores a data qword.
7490 *
7491 * @returns Strict VBox status code.
7492 * @param pIemCpu The IEM per CPU data.
7493 * @param iSegReg The index of the segment register to use for
7494 * this access. The base and limits are checked.
7495 * @param GCPtrMem The address of the guest memory.
7496 * @param u64Value The value to store.
7497 */
7498IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7499{
7500 /* The lazy approach for now... */
7501 uint64_t *pu64Dst;
7502 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7503 if (rc == VINF_SUCCESS)
7504 {
7505 *pu64Dst = u64Value;
7506 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7507 }
7508 return rc;
7509}
7510
7511
7512/**
7513 * Stores a data dqword.
7514 *
7515 * @returns Strict VBox status code.
7516 * @param pIemCpu The IEM per CPU data.
7517 * @param iSegReg The index of the segment register to use for
7518 * this access. The base and limits are checked.
7519 * @param GCPtrMem The address of the guest memory.
7520 * @param u128Value The value to store.
7521 */
7522IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7523{
7524 /* The lazy approach for now... */
7525 uint128_t *pu128Dst;
7526 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7527 if (rc == VINF_SUCCESS)
7528 {
7529 *pu128Dst = u128Value;
7530 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7531 }
7532 return rc;
7533}
7534
7535
7536/**
7537 * Stores a data dqword, SSE aligned.
7538 *
7539 * @returns Strict VBox status code.
7540 * @param pIemCpu The IEM per CPU data.
7541 * @param iSegReg The index of the segment register to use for
7542 * this access. The base and limits are checked.
7543 * @param GCPtrMem The address of the guest memory.
7544 * @param u128Value The value to store.
7545 */
7546IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7547{
7548 /* The lazy approach for now... */
7549 if ( (GCPtrMem & 15)
7550 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7551 return iemRaiseGeneralProtectionFault0(pIemCpu);
7552
7553 uint128_t *pu128Dst;
7554 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7555 if (rc == VINF_SUCCESS)
7556 {
7557 *pu128Dst = u128Value;
7558 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7559 }
7560 return rc;
7561}
7562
7563
7564/**
7565 * Stores a descriptor register (sgdt, sidt).
7566 *
7567 * @returns Strict VBox status code.
7568 * @param pIemCpu The IEM per CPU data.
7569 * @param cbLimit The limit.
7570 * @param GCPtrBase The base address.
7571 * @param iSegReg The index of the segment register to use for
7572 * this access. The base and limits are checked.
7573 * @param GCPtrMem The address of the guest memory.
7574 * @param enmOpSize The effective operand size.
7575 */
7576IEM_STATIC VBOXSTRICTRC
7577iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7578{
7579 uint8_t *pu8Src;
7580 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7581 (void **)&pu8Src,
7582 enmOpSize == IEMMODE_64BIT
7583 ? 2 + 8
7584 : enmOpSize == IEMMODE_32BIT
7585 ? 2 + 4
7586 : 2 + 3,
7587 iSegReg,
7588 GCPtrMem,
7589 IEM_ACCESS_DATA_W);
7590 if (rcStrict == VINF_SUCCESS)
7591 {
7592 pu8Src[0] = RT_BYTE1(cbLimit);
7593 pu8Src[1] = RT_BYTE2(cbLimit);
7594 pu8Src[2] = RT_BYTE1(GCPtrBase);
7595 pu8Src[3] = RT_BYTE2(GCPtrBase);
7596 pu8Src[4] = RT_BYTE3(GCPtrBase);
7597 if (enmOpSize == IEMMODE_16BIT)
7598 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7599 else
7600 {
7601 pu8Src[5] = RT_BYTE4(GCPtrBase);
7602 if (enmOpSize == IEMMODE_64BIT)
7603 {
7604 pu8Src[6] = RT_BYTE5(GCPtrBase);
7605 pu8Src[7] = RT_BYTE6(GCPtrBase);
7606 pu8Src[8] = RT_BYTE7(GCPtrBase);
7607 pu8Src[9] = RT_BYTE8(GCPtrBase);
7608 }
7609 }
7610 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7611 }
7612 return rcStrict;
7613}
7614
7615
7616/**
7617 * Pushes a word onto the stack.
7618 *
7619 * @returns Strict VBox status code.
7620 * @param pIemCpu The IEM per CPU data.
7621 * @param u16Value The value to push.
7622 */
7623IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7624{
7625 /* Increment the stack pointer. */
7626 uint64_t uNewRsp;
7627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7628 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7629
7630 /* Write the word the lazy way. */
7631 uint16_t *pu16Dst;
7632 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7633 if (rc == VINF_SUCCESS)
7634 {
7635 *pu16Dst = u16Value;
7636 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7637 }
7638
7639 /* Commit the new RSP value unless we an access handler made trouble. */
7640 if (rc == VINF_SUCCESS)
7641 pCtx->rsp = uNewRsp;
7642
7643 return rc;
7644}
7645
7646
7647/**
7648 * Pushes a dword onto the stack.
7649 *
7650 * @returns Strict VBox status code.
7651 * @param pIemCpu The IEM per CPU data.
7652 * @param u32Value The value to push.
7653 */
7654IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7655{
7656 /* Increment the stack pointer. */
7657 uint64_t uNewRsp;
7658 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7659 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7660
7661 /* Write the dword the lazy way. */
7662 uint32_t *pu32Dst;
7663 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7664 if (rc == VINF_SUCCESS)
7665 {
7666 *pu32Dst = u32Value;
7667 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7668 }
7669
7670 /* Commit the new RSP value unless we an access handler made trouble. */
7671 if (rc == VINF_SUCCESS)
7672 pCtx->rsp = uNewRsp;
7673
7674 return rc;
7675}
7676
7677
7678/**
7679 * Pushes a dword segment register value onto the stack.
7680 *
7681 * @returns Strict VBox status code.
7682 * @param pIemCpu The IEM per CPU data.
7683 * @param u32Value The value to push.
7684 */
7685IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7686{
7687 /* Increment the stack pointer. */
7688 uint64_t uNewRsp;
7689 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7690 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7691
7692 VBOXSTRICTRC rc;
7693 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7694 {
7695 /* The recompiler writes a full dword. */
7696 uint32_t *pu32Dst;
7697 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7698 if (rc == VINF_SUCCESS)
7699 {
7700 *pu32Dst = u32Value;
7701 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7702 }
7703 }
7704 else
7705 {
7706 /* The intel docs talks about zero extending the selector register
7707 value. My actual intel CPU here might be zero extending the value
7708 but it still only writes the lower word... */
7709 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7710 * happens when crossing an electric page boundrary, is the high word checked
7711 * for write accessibility or not? Probably it is. What about segment limits?
7712 * It appears this behavior is also shared with trap error codes.
7713 *
7714 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7715 * ancient hardware when it actually did change. */
7716 uint16_t *pu16Dst;
7717 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7718 if (rc == VINF_SUCCESS)
7719 {
7720 *pu16Dst = (uint16_t)u32Value;
7721 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7722 }
7723 }
7724
7725 /* Commit the new RSP value unless we an access handler made trouble. */
7726 if (rc == VINF_SUCCESS)
7727 pCtx->rsp = uNewRsp;
7728
7729 return rc;
7730}
7731
7732
7733/**
7734 * Pushes a qword onto the stack.
7735 *
7736 * @returns Strict VBox status code.
7737 * @param pIemCpu The IEM per CPU data.
7738 * @param u64Value The value to push.
7739 */
7740IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7741{
7742 /* Increment the stack pointer. */
7743 uint64_t uNewRsp;
7744 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7745 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7746
7747 /* Write the word the lazy way. */
7748 uint64_t *pu64Dst;
7749 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7750 if (rc == VINF_SUCCESS)
7751 {
7752 *pu64Dst = u64Value;
7753 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7754 }
7755
7756 /* Commit the new RSP value unless we an access handler made trouble. */
7757 if (rc == VINF_SUCCESS)
7758 pCtx->rsp = uNewRsp;
7759
7760 return rc;
7761}
7762
7763
7764/**
7765 * Pops a word from the stack.
7766 *
7767 * @returns Strict VBox status code.
7768 * @param pIemCpu The IEM per CPU data.
7769 * @param pu16Value Where to store the popped value.
7770 */
7771IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7772{
7773 /* Increment the stack pointer. */
7774 uint64_t uNewRsp;
7775 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7776 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7777
7778 /* Write the word the lazy way. */
7779 uint16_t const *pu16Src;
7780 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7781 if (rc == VINF_SUCCESS)
7782 {
7783 *pu16Value = *pu16Src;
7784 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7785
7786 /* Commit the new RSP value. */
7787 if (rc == VINF_SUCCESS)
7788 pCtx->rsp = uNewRsp;
7789 }
7790
7791 return rc;
7792}
7793
7794
7795/**
7796 * Pops a dword from the stack.
7797 *
7798 * @returns Strict VBox status code.
7799 * @param pIemCpu The IEM per CPU data.
7800 * @param pu32Value Where to store the popped value.
7801 */
7802IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7803{
7804 /* Increment the stack pointer. */
7805 uint64_t uNewRsp;
7806 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7807 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7808
7809 /* Write the word the lazy way. */
7810 uint32_t const *pu32Src;
7811 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7812 if (rc == VINF_SUCCESS)
7813 {
7814 *pu32Value = *pu32Src;
7815 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7816
7817 /* Commit the new RSP value. */
7818 if (rc == VINF_SUCCESS)
7819 pCtx->rsp = uNewRsp;
7820 }
7821
7822 return rc;
7823}
7824
7825
7826/**
7827 * Pops a qword from the stack.
7828 *
7829 * @returns Strict VBox status code.
7830 * @param pIemCpu The IEM per CPU data.
7831 * @param pu64Value Where to store the popped value.
7832 */
7833IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7834{
7835 /* Increment the stack pointer. */
7836 uint64_t uNewRsp;
7837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7838 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7839
7840 /* Write the word the lazy way. */
7841 uint64_t const *pu64Src;
7842 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7843 if (rc == VINF_SUCCESS)
7844 {
7845 *pu64Value = *pu64Src;
7846 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7847
7848 /* Commit the new RSP value. */
7849 if (rc == VINF_SUCCESS)
7850 pCtx->rsp = uNewRsp;
7851 }
7852
7853 return rc;
7854}
7855
7856
7857/**
7858 * Pushes a word onto the stack, using a temporary stack pointer.
7859 *
7860 * @returns Strict VBox status code.
7861 * @param pIemCpu The IEM per CPU data.
7862 * @param u16Value The value to push.
7863 * @param pTmpRsp Pointer to the temporary stack pointer.
7864 */
7865IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7866{
7867 /* Increment the stack pointer. */
7868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7869 RTUINT64U NewRsp = *pTmpRsp;
7870 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7871
7872 /* Write the word the lazy way. */
7873 uint16_t *pu16Dst;
7874 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7875 if (rc == VINF_SUCCESS)
7876 {
7877 *pu16Dst = u16Value;
7878 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7879 }
7880
7881 /* Commit the new RSP value unless we an access handler made trouble. */
7882 if (rc == VINF_SUCCESS)
7883 *pTmpRsp = NewRsp;
7884
7885 return rc;
7886}
7887
7888
7889/**
7890 * Pushes a dword onto the stack, using a temporary stack pointer.
7891 *
7892 * @returns Strict VBox status code.
7893 * @param pIemCpu The IEM per CPU data.
7894 * @param u32Value The value to push.
7895 * @param pTmpRsp Pointer to the temporary stack pointer.
7896 */
7897IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7898{
7899 /* Increment the stack pointer. */
7900 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7901 RTUINT64U NewRsp = *pTmpRsp;
7902 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7903
7904 /* Write the word the lazy way. */
7905 uint32_t *pu32Dst;
7906 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7907 if (rc == VINF_SUCCESS)
7908 {
7909 *pu32Dst = u32Value;
7910 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7911 }
7912
7913 /* Commit the new RSP value unless we an access handler made trouble. */
7914 if (rc == VINF_SUCCESS)
7915 *pTmpRsp = NewRsp;
7916
7917 return rc;
7918}
7919
7920
7921/**
7922 * Pushes a dword onto the stack, using a temporary stack pointer.
7923 *
7924 * @returns Strict VBox status code.
7925 * @param pIemCpu The IEM per CPU data.
7926 * @param u64Value The value to push.
7927 * @param pTmpRsp Pointer to the temporary stack pointer.
7928 */
7929IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7930{
7931 /* Increment the stack pointer. */
7932 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7933 RTUINT64U NewRsp = *pTmpRsp;
7934 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7935
7936 /* Write the word the lazy way. */
7937 uint64_t *pu64Dst;
7938 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7939 if (rc == VINF_SUCCESS)
7940 {
7941 *pu64Dst = u64Value;
7942 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7943 }
7944
7945 /* Commit the new RSP value unless we an access handler made trouble. */
7946 if (rc == VINF_SUCCESS)
7947 *pTmpRsp = NewRsp;
7948
7949 return rc;
7950}
7951
7952
7953/**
7954 * Pops a word from the stack, using a temporary stack pointer.
7955 *
7956 * @returns Strict VBox status code.
7957 * @param pIemCpu The IEM per CPU data.
7958 * @param pu16Value Where to store the popped value.
7959 * @param pTmpRsp Pointer to the temporary stack pointer.
7960 */
7961IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7962{
7963 /* Increment the stack pointer. */
7964 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7965 RTUINT64U NewRsp = *pTmpRsp;
7966 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7967
7968 /* Write the word the lazy way. */
7969 uint16_t const *pu16Src;
7970 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7971 if (rc == VINF_SUCCESS)
7972 {
7973 *pu16Value = *pu16Src;
7974 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7975
7976 /* Commit the new RSP value. */
7977 if (rc == VINF_SUCCESS)
7978 *pTmpRsp = NewRsp;
7979 }
7980
7981 return rc;
7982}
7983
7984
7985/**
7986 * Pops a dword from the stack, using a temporary stack pointer.
7987 *
7988 * @returns Strict VBox status code.
7989 * @param pIemCpu The IEM per CPU data.
7990 * @param pu32Value Where to store the popped value.
7991 * @param pTmpRsp Pointer to the temporary stack pointer.
7992 */
7993IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7994{
7995 /* Increment the stack pointer. */
7996 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7997 RTUINT64U NewRsp = *pTmpRsp;
7998 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7999
8000 /* Write the word the lazy way. */
8001 uint32_t const *pu32Src;
8002 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8003 if (rc == VINF_SUCCESS)
8004 {
8005 *pu32Value = *pu32Src;
8006 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8007
8008 /* Commit the new RSP value. */
8009 if (rc == VINF_SUCCESS)
8010 *pTmpRsp = NewRsp;
8011 }
8012
8013 return rc;
8014}
8015
8016
8017/**
8018 * Pops a qword from the stack, using a temporary stack pointer.
8019 *
8020 * @returns Strict VBox status code.
8021 * @param pIemCpu The IEM per CPU data.
8022 * @param pu64Value Where to store the popped value.
8023 * @param pTmpRsp Pointer to the temporary stack pointer.
8024 */
8025IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8026{
8027 /* Increment the stack pointer. */
8028 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8029 RTUINT64U NewRsp = *pTmpRsp;
8030 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8031
8032 /* Write the word the lazy way. */
8033 uint64_t const *pu64Src;
8034 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8035 if (rcStrict == VINF_SUCCESS)
8036 {
8037 *pu64Value = *pu64Src;
8038 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8039
8040 /* Commit the new RSP value. */
8041 if (rcStrict == VINF_SUCCESS)
8042 *pTmpRsp = NewRsp;
8043 }
8044
8045 return rcStrict;
8046}
8047
8048
8049/**
8050 * Begin a special stack push (used by interrupt, exceptions and such).
8051 *
8052 * This will raise \#SS or \#PF if appropriate.
8053 *
8054 * @returns Strict VBox status code.
8055 * @param pIemCpu The IEM per CPU data.
8056 * @param cbMem The number of bytes to push onto the stack.
8057 * @param ppvMem Where to return the pointer to the stack memory.
8058 * As with the other memory functions this could be
8059 * direct access or bounce buffered access, so
8060 * don't commit register until the commit call
8061 * succeeds.
8062 * @param puNewRsp Where to return the new RSP value. This must be
8063 * passed unchanged to
8064 * iemMemStackPushCommitSpecial().
8065 */
8066IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8067{
8068 Assert(cbMem < UINT8_MAX);
8069 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8070 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8071 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8072}
8073
8074
8075/**
8076 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8077 *
8078 * This will update the rSP.
8079 *
8080 * @returns Strict VBox status code.
8081 * @param pIemCpu The IEM per CPU data.
8082 * @param pvMem The pointer returned by
8083 * iemMemStackPushBeginSpecial().
8084 * @param uNewRsp The new RSP value returned by
8085 * iemMemStackPushBeginSpecial().
8086 */
8087IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8088{
8089 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8090 if (rcStrict == VINF_SUCCESS)
8091 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8092 return rcStrict;
8093}
8094
8095
8096/**
8097 * Begin a special stack pop (used by iret, retf and such).
8098 *
8099 * This will raise \#SS or \#PF if appropriate.
8100 *
8101 * @returns Strict VBox status code.
8102 * @param pIemCpu The IEM per CPU data.
8103 * @param cbMem The number of bytes to push onto the stack.
8104 * @param ppvMem Where to return the pointer to the stack memory.
8105 * @param puNewRsp Where to return the new RSP value. This must be
8106 * passed unchanged to
8107 * iemMemStackPopCommitSpecial() or applied
8108 * manually if iemMemStackPopDoneSpecial() is used.
8109 */
8110IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8111{
8112 Assert(cbMem < UINT8_MAX);
8113 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8114 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8115 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8116}
8117
8118
8119/**
8120 * Continue a special stack pop (used by iret and retf).
8121 *
8122 * This will raise \#SS or \#PF if appropriate.
8123 *
8124 * @returns Strict VBox status code.
8125 * @param pIemCpu The IEM per CPU data.
8126 * @param cbMem The number of bytes to push onto the stack.
8127 * @param ppvMem Where to return the pointer to the stack memory.
8128 * @param puNewRsp Where to return the new RSP value. This must be
8129 * passed unchanged to
8130 * iemMemStackPopCommitSpecial() or applied
8131 * manually if iemMemStackPopDoneSpecial() is used.
8132 */
8133IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8134{
8135 Assert(cbMem < UINT8_MAX);
8136 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8137 RTUINT64U NewRsp;
8138 NewRsp.u = *puNewRsp;
8139 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8140 *puNewRsp = NewRsp.u;
8141 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8142}
8143
8144
8145/**
8146 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8147 *
8148 * This will update the rSP.
8149 *
8150 * @returns Strict VBox status code.
8151 * @param pIemCpu The IEM per CPU data.
8152 * @param pvMem The pointer returned by
8153 * iemMemStackPopBeginSpecial().
8154 * @param uNewRsp The new RSP value returned by
8155 * iemMemStackPopBeginSpecial().
8156 */
8157IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8158{
8159 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8160 if (rcStrict == VINF_SUCCESS)
8161 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8162 return rcStrict;
8163}
8164
8165
8166/**
8167 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8168 * iemMemStackPopContinueSpecial).
8169 *
8170 * The caller will manually commit the rSP.
8171 *
8172 * @returns Strict VBox status code.
8173 * @param pIemCpu The IEM per CPU data.
8174 * @param pvMem The pointer returned by
8175 * iemMemStackPopBeginSpecial() or
8176 * iemMemStackPopContinueSpecial().
8177 */
8178IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8179{
8180 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8181}
8182
8183
8184/**
8185 * Fetches a system table byte.
8186 *
8187 * @returns Strict VBox status code.
8188 * @param pIemCpu The IEM per CPU data.
8189 * @param pbDst Where to return the byte.
8190 * @param iSegReg The index of the segment register to use for
8191 * this access. The base and limits are checked.
8192 * @param GCPtrMem The address of the guest memory.
8193 */
8194IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8195{
8196 /* The lazy approach for now... */
8197 uint8_t const *pbSrc;
8198 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8199 if (rc == VINF_SUCCESS)
8200 {
8201 *pbDst = *pbSrc;
8202 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8203 }
8204 return rc;
8205}
8206
8207
8208/**
8209 * Fetches a system table word.
8210 *
8211 * @returns Strict VBox status code.
8212 * @param pIemCpu The IEM per CPU data.
8213 * @param pu16Dst Where to return the word.
8214 * @param iSegReg The index of the segment register to use for
8215 * this access. The base and limits are checked.
8216 * @param GCPtrMem The address of the guest memory.
8217 */
8218IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8219{
8220 /* The lazy approach for now... */
8221 uint16_t const *pu16Src;
8222 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8223 if (rc == VINF_SUCCESS)
8224 {
8225 *pu16Dst = *pu16Src;
8226 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8227 }
8228 return rc;
8229}
8230
8231
8232/**
8233 * Fetches a system table dword.
8234 *
8235 * @returns Strict VBox status code.
8236 * @param pIemCpu The IEM per CPU data.
8237 * @param pu32Dst Where to return the dword.
8238 * @param iSegReg The index of the segment register to use for
8239 * this access. The base and limits are checked.
8240 * @param GCPtrMem The address of the guest memory.
8241 */
8242IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8243{
8244 /* The lazy approach for now... */
8245 uint32_t const *pu32Src;
8246 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8247 if (rc == VINF_SUCCESS)
8248 {
8249 *pu32Dst = *pu32Src;
8250 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8251 }
8252 return rc;
8253}
8254
8255
8256/**
8257 * Fetches a system table qword.
8258 *
8259 * @returns Strict VBox status code.
8260 * @param pIemCpu The IEM per CPU data.
8261 * @param pu64Dst Where to return the qword.
8262 * @param iSegReg The index of the segment register to use for
8263 * this access. The base and limits are checked.
8264 * @param GCPtrMem The address of the guest memory.
8265 */
8266IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8267{
8268 /* The lazy approach for now... */
8269 uint64_t const *pu64Src;
8270 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8271 if (rc == VINF_SUCCESS)
8272 {
8273 *pu64Dst = *pu64Src;
8274 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8275 }
8276 return rc;
8277}
8278
8279
8280/**
8281 * Fetches a descriptor table entry with caller specified error code.
8282 *
8283 * @returns Strict VBox status code.
8284 * @param pIemCpu The IEM per CPU.
8285 * @param pDesc Where to return the descriptor table entry.
8286 * @param uSel The selector which table entry to fetch.
8287 * @param uXcpt The exception to raise on table lookup error.
8288 * @param uErrorCode The error code associated with the exception.
8289 */
8290IEM_STATIC VBOXSTRICTRC
8291iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8292{
8293 AssertPtr(pDesc);
8294 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8295
8296 /** @todo did the 286 require all 8 bytes to be accessible? */
8297 /*
8298 * Get the selector table base and check bounds.
8299 */
8300 RTGCPTR GCPtrBase;
8301 if (uSel & X86_SEL_LDT)
8302 {
8303 if ( !pCtx->ldtr.Attr.n.u1Present
8304 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8305 {
8306 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8307 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8308 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8309 uErrorCode, 0);
8310 }
8311
8312 Assert(pCtx->ldtr.Attr.n.u1Present);
8313 GCPtrBase = pCtx->ldtr.u64Base;
8314 }
8315 else
8316 {
8317 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8318 {
8319 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8320 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8321 uErrorCode, 0);
8322 }
8323 GCPtrBase = pCtx->gdtr.pGdt;
8324 }
8325
8326 /*
8327 * Read the legacy descriptor and maybe the long mode extensions if
8328 * required.
8329 */
8330 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8331 if (rcStrict == VINF_SUCCESS)
8332 {
8333 if ( !IEM_IS_LONG_MODE(pIemCpu)
8334 || pDesc->Legacy.Gen.u1DescType)
8335 pDesc->Long.au64[1] = 0;
8336 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8337 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8338 else
8339 {
8340 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8341 /** @todo is this the right exception? */
8342 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8343 }
8344 }
8345 return rcStrict;
8346}
8347
8348
8349/**
8350 * Fetches a descriptor table entry.
8351 *
8352 * @returns Strict VBox status code.
8353 * @param pIemCpu The IEM per CPU.
8354 * @param pDesc Where to return the descriptor table entry.
8355 * @param uSel The selector which table entry to fetch.
8356 * @param uXcpt The exception to raise on table lookup error.
8357 */
8358IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8359{
8360 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8361}
8362
8363
8364/**
8365 * Fakes a long mode stack selector for SS = 0.
8366 *
8367 * @param pDescSs Where to return the fake stack descriptor.
8368 * @param uDpl The DPL we want.
8369 */
8370IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8371{
8372 pDescSs->Long.au64[0] = 0;
8373 pDescSs->Long.au64[1] = 0;
8374 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8375 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8376 pDescSs->Long.Gen.u2Dpl = uDpl;
8377 pDescSs->Long.Gen.u1Present = 1;
8378 pDescSs->Long.Gen.u1Long = 1;
8379}
8380
8381
8382/**
8383 * Marks the selector descriptor as accessed (only non-system descriptors).
8384 *
8385 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8386 * will therefore skip the limit checks.
8387 *
8388 * @returns Strict VBox status code.
8389 * @param pIemCpu The IEM per CPU.
8390 * @param uSel The selector.
8391 */
8392IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8393{
8394 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8395
8396 /*
8397 * Get the selector table base and calculate the entry address.
8398 */
8399 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8400 ? pCtx->ldtr.u64Base
8401 : pCtx->gdtr.pGdt;
8402 GCPtr += uSel & X86_SEL_MASK;
8403
8404 /*
8405 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8406 * ugly stuff to avoid this. This will make sure it's an atomic access
8407 * as well more or less remove any question about 8-bit or 32-bit accesss.
8408 */
8409 VBOXSTRICTRC rcStrict;
8410 uint32_t volatile *pu32;
8411 if ((GCPtr & 3) == 0)
8412 {
8413 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8414 GCPtr += 2 + 2;
8415 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8416 if (rcStrict != VINF_SUCCESS)
8417 return rcStrict;
8418 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8419 }
8420 else
8421 {
8422 /* The misaligned GDT/LDT case, map the whole thing. */
8423 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8424 if (rcStrict != VINF_SUCCESS)
8425 return rcStrict;
8426 switch ((uintptr_t)pu32 & 3)
8427 {
8428 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8429 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8430 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8431 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8432 }
8433 }
8434
8435 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8436}
8437
8438/** @} */
8439
8440
8441/*
8442 * Include the C/C++ implementation of instruction.
8443 */
8444#include "IEMAllCImpl.cpp.h"
8445
8446
8447
8448/** @name "Microcode" macros.
8449 *
8450 * The idea is that we should be able to use the same code to interpret
8451 * instructions as well as recompiler instructions. Thus this obfuscation.
8452 *
8453 * @{
8454 */
8455#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8456#define IEM_MC_END() }
8457#define IEM_MC_PAUSE() do {} while (0)
8458#define IEM_MC_CONTINUE() do {} while (0)
8459
8460/** Internal macro. */
8461#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8462 do \
8463 { \
8464 VBOXSTRICTRC rcStrict2 = a_Expr; \
8465 if (rcStrict2 != VINF_SUCCESS) \
8466 return rcStrict2; \
8467 } while (0)
8468
8469#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8470#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8471#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8472#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8473#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8474#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8475#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8476
8477#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8478#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8479 do { \
8480 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8481 return iemRaiseDeviceNotAvailable(pIemCpu); \
8482 } while (0)
8483#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8484 do { \
8485 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8486 return iemRaiseMathFault(pIemCpu); \
8487 } while (0)
8488#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8489 do { \
8490 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8491 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8492 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8493 return iemRaiseUndefinedOpcode(pIemCpu); \
8494 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8495 return iemRaiseDeviceNotAvailable(pIemCpu); \
8496 } while (0)
8497#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8498 do { \
8499 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8500 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8501 return iemRaiseUndefinedOpcode(pIemCpu); \
8502 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8503 return iemRaiseDeviceNotAvailable(pIemCpu); \
8504 } while (0)
8505#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8506 do { \
8507 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8508 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8509 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8510 return iemRaiseUndefinedOpcode(pIemCpu); \
8511 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8512 return iemRaiseDeviceNotAvailable(pIemCpu); \
8513 } while (0)
8514#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8515 do { \
8516 if (pIemCpu->uCpl != 0) \
8517 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8518 } while (0)
8519
8520
8521#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8522#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8523#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8524#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8525#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8526#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8527#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8528 uint32_t a_Name; \
8529 uint32_t *a_pName = &a_Name
8530#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8531 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8532
8533#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8534#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8535
8536#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8537#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8538#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8539#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8540#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8541#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8542#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8543#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8544#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8545#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8546#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8547#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8548#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8549#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8550#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8551#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8552#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8553#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8554#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8555#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8556#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8557#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8558#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8559#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8560#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8561#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8562#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8563#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8564#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8565/** @note Not for IOPL or IF testing or modification. */
8566#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8567#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8568#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8569#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8570
8571#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8572#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8573#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8574#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8575#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8576#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8577#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8578#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8579#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8580#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8581#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8582 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8583
8584#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8585#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8586/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8587 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8588#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8589#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8590/** @note Not for IOPL or IF testing or modification. */
8591#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8592
8593#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8594#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8595#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8596 do { \
8597 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8598 *pu32Reg += (a_u32Value); \
8599 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8600 } while (0)
8601#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8602
8603#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8604#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8605#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8606 do { \
8607 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8608 *pu32Reg -= (a_u32Value); \
8609 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8610 } while (0)
8611#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8612
8613#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8614#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8615#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8616#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8617#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8618#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8619#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8620
8621#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8622#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8623#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8624#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8625
8626#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8627#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8628#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8629
8630#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8631#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8632
8633#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8634#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8635#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8636
8637#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8638#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8639#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8640
8641#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8642
8643#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8644
8645#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8646#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8647#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8648 do { \
8649 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8650 *pu32Reg &= (a_u32Value); \
8651 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8652 } while (0)
8653#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8654
8655#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8656#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8657#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8658 do { \
8659 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8660 *pu32Reg |= (a_u32Value); \
8661 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8662 } while (0)
8663#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8664
8665
8666/** @note Not for IOPL or IF modification. */
8667#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8668/** @note Not for IOPL or IF modification. */
8669#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8670/** @note Not for IOPL or IF modification. */
8671#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8672
8673#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8674
8675
8676#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8677 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8678#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8679 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8680#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8681 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8682#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8683 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8684#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8685 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8686#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8687 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8688#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8689 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8690
8691#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8692 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8693#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8694 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8695#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8696 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8697#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8698 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8699#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8700 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8701 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8702 } while (0)
8703#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8704 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8705 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8706 } while (0)
8707#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8708 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8709#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8710 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8711#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8712 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8713
8714#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8716#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8718#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8720
8721#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8723#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8725#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8727
8728#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8730#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8732#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8734
8735#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8737
8738#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8740#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8742#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8744#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8746
8747#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8749#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8751#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8753
8754#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8756#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8758
8759
8760
8761#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8762 do { \
8763 uint8_t u8Tmp; \
8764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8765 (a_u16Dst) = u8Tmp; \
8766 } while (0)
8767#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8768 do { \
8769 uint8_t u8Tmp; \
8770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8771 (a_u32Dst) = u8Tmp; \
8772 } while (0)
8773#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8774 do { \
8775 uint8_t u8Tmp; \
8776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8777 (a_u64Dst) = u8Tmp; \
8778 } while (0)
8779#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8780 do { \
8781 uint16_t u16Tmp; \
8782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8783 (a_u32Dst) = u16Tmp; \
8784 } while (0)
8785#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8786 do { \
8787 uint16_t u16Tmp; \
8788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8789 (a_u64Dst) = u16Tmp; \
8790 } while (0)
8791#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8792 do { \
8793 uint32_t u32Tmp; \
8794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8795 (a_u64Dst) = u32Tmp; \
8796 } while (0)
8797
8798#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8799 do { \
8800 uint8_t u8Tmp; \
8801 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8802 (a_u16Dst) = (int8_t)u8Tmp; \
8803 } while (0)
8804#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8805 do { \
8806 uint8_t u8Tmp; \
8807 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8808 (a_u32Dst) = (int8_t)u8Tmp; \
8809 } while (0)
8810#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8811 do { \
8812 uint8_t u8Tmp; \
8813 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8814 (a_u64Dst) = (int8_t)u8Tmp; \
8815 } while (0)
8816#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8817 do { \
8818 uint16_t u16Tmp; \
8819 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8820 (a_u32Dst) = (int16_t)u16Tmp; \
8821 } while (0)
8822#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8823 do { \
8824 uint16_t u16Tmp; \
8825 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8826 (a_u64Dst) = (int16_t)u16Tmp; \
8827 } while (0)
8828#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8829 do { \
8830 uint32_t u32Tmp; \
8831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8832 (a_u64Dst) = (int32_t)u32Tmp; \
8833 } while (0)
8834
8835#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8836 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8837#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8838 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8839#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8840 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8841#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8843
8844#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8845 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8846#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8847 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8848#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8849 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8850#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8851 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8852
8853#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8854#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8855#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8856#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8857#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8858#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8859#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8860 do { \
8861 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8862 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8863 } while (0)
8864
8865#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8866 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8867#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8869
8870
8871#define IEM_MC_PUSH_U16(a_u16Value) \
8872 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8873#define IEM_MC_PUSH_U32(a_u32Value) \
8874 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8875#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8876 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8877#define IEM_MC_PUSH_U64(a_u64Value) \
8878 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8879
8880#define IEM_MC_POP_U16(a_pu16Value) \
8881 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8882#define IEM_MC_POP_U32(a_pu32Value) \
8883 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8884#define IEM_MC_POP_U64(a_pu64Value) \
8885 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8886
8887/** Maps guest memory for direct or bounce buffered access.
8888 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8889 * @remarks May return.
8890 */
8891#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8892 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8893
8894/** Maps guest memory for direct or bounce buffered access.
8895 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8896 * @remarks May return.
8897 */
8898#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8899 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8900
8901/** Commits the memory and unmaps the guest memory.
8902 * @remarks May return.
8903 */
8904#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8905 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8906
8907/** Commits the memory and unmaps the guest memory unless the FPU status word
8908 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8909 * that would cause FLD not to store.
8910 *
8911 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8912 * store, while \#P will not.
8913 *
8914 * @remarks May in theory return - for now.
8915 */
8916#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8917 do { \
8918 if ( !(a_u16FSW & X86_FSW_ES) \
8919 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8920 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8921 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8922 } while (0)
8923
8924/** Calculate efficient address from R/M. */
8925#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8926 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8927
8928#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8929#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8930#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8931#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8932#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8933#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8934#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8935
8936/**
8937 * Defers the rest of the instruction emulation to a C implementation routine
8938 * and returns, only taking the standard parameters.
8939 *
8940 * @param a_pfnCImpl The pointer to the C routine.
8941 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8942 */
8943#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8944
8945/**
8946 * Defers the rest of instruction emulation to a C implementation routine and
8947 * returns, taking one argument in addition to the standard ones.
8948 *
8949 * @param a_pfnCImpl The pointer to the C routine.
8950 * @param a0 The argument.
8951 */
8952#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8953
8954/**
8955 * Defers the rest of the instruction emulation to a C implementation routine
8956 * and returns, taking two arguments in addition to the standard ones.
8957 *
8958 * @param a_pfnCImpl The pointer to the C routine.
8959 * @param a0 The first extra argument.
8960 * @param a1 The second extra argument.
8961 */
8962#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8963
8964/**
8965 * Defers the rest of the instruction emulation to a C implementation routine
8966 * and returns, taking three arguments in addition to the standard ones.
8967 *
8968 * @param a_pfnCImpl The pointer to the C routine.
8969 * @param a0 The first extra argument.
8970 * @param a1 The second extra argument.
8971 * @param a2 The third extra argument.
8972 */
8973#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8974
8975/**
8976 * Defers the rest of the instruction emulation to a C implementation routine
8977 * and returns, taking four arguments in addition to the standard ones.
8978 *
8979 * @param a_pfnCImpl The pointer to the C routine.
8980 * @param a0 The first extra argument.
8981 * @param a1 The second extra argument.
8982 * @param a2 The third extra argument.
8983 * @param a3 The fourth extra argument.
8984 */
8985#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8986
8987/**
8988 * Defers the rest of the instruction emulation to a C implementation routine
8989 * and returns, taking two arguments in addition to the standard ones.
8990 *
8991 * @param a_pfnCImpl The pointer to the C routine.
8992 * @param a0 The first extra argument.
8993 * @param a1 The second extra argument.
8994 * @param a2 The third extra argument.
8995 * @param a3 The fourth extra argument.
8996 * @param a4 The fifth extra argument.
8997 */
8998#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8999
9000/**
9001 * Defers the entire instruction emulation to a C implementation routine and
9002 * returns, only taking the standard parameters.
9003 *
9004 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9005 *
9006 * @param a_pfnCImpl The pointer to the C routine.
9007 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9008 */
9009#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9010
9011/**
9012 * Defers the entire instruction emulation to a C implementation routine and
9013 * returns, taking one argument in addition to the standard ones.
9014 *
9015 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9016 *
9017 * @param a_pfnCImpl The pointer to the C routine.
9018 * @param a0 The argument.
9019 */
9020#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9021
9022/**
9023 * Defers the entire instruction emulation to a C implementation routine and
9024 * returns, taking two arguments in addition to the standard ones.
9025 *
9026 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9027 *
9028 * @param a_pfnCImpl The pointer to the C routine.
9029 * @param a0 The first extra argument.
9030 * @param a1 The second extra argument.
9031 */
9032#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9033
9034/**
9035 * Defers the entire instruction emulation to a C implementation routine and
9036 * returns, taking three arguments in addition to the standard ones.
9037 *
9038 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9039 *
9040 * @param a_pfnCImpl The pointer to the C routine.
9041 * @param a0 The first extra argument.
9042 * @param a1 The second extra argument.
9043 * @param a2 The third extra argument.
9044 */
9045#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9046
9047/**
9048 * Calls a FPU assembly implementation taking one visible argument.
9049 *
9050 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9051 * @param a0 The first extra argument.
9052 */
9053#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9054 do { \
9055 iemFpuPrepareUsage(pIemCpu); \
9056 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9057 } while (0)
9058
9059/**
9060 * Calls a FPU assembly implementation taking two visible arguments.
9061 *
9062 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9063 * @param a0 The first extra argument.
9064 * @param a1 The second extra argument.
9065 */
9066#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9067 do { \
9068 iemFpuPrepareUsage(pIemCpu); \
9069 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9070 } while (0)
9071
9072/**
9073 * Calls a FPU assembly implementation taking three visible arguments.
9074 *
9075 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9076 * @param a0 The first extra argument.
9077 * @param a1 The second extra argument.
9078 * @param a2 The third extra argument.
9079 */
9080#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9081 do { \
9082 iemFpuPrepareUsage(pIemCpu); \
9083 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9084 } while (0)
9085
9086#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9087 do { \
9088 (a_FpuData).FSW = (a_FSW); \
9089 (a_FpuData).r80Result = *(a_pr80Value); \
9090 } while (0)
9091
9092/** Pushes FPU result onto the stack. */
9093#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9094 iemFpuPushResult(pIemCpu, &a_FpuData)
9095/** Pushes FPU result onto the stack and sets the FPUDP. */
9096#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9097 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9098
9099/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9100#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9101 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9102
9103/** Stores FPU result in a stack register. */
9104#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9105 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9106/** Stores FPU result in a stack register and pops the stack. */
9107#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9108 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9109/** Stores FPU result in a stack register and sets the FPUDP. */
9110#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9111 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9112/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9113 * stack. */
9114#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9115 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9116
9117/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9118#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9119 iemFpuUpdateOpcodeAndIp(pIemCpu)
9120/** Free a stack register (for FFREE and FFREEP). */
9121#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9122 iemFpuStackFree(pIemCpu, a_iStReg)
9123/** Increment the FPU stack pointer. */
9124#define IEM_MC_FPU_STACK_INC_TOP() \
9125 iemFpuStackIncTop(pIemCpu)
9126/** Decrement the FPU stack pointer. */
9127#define IEM_MC_FPU_STACK_DEC_TOP() \
9128 iemFpuStackDecTop(pIemCpu)
9129
9130/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9131#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9132 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9133/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9134#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9135 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9136/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9137#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9138 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9139/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9140#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9141 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9142/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9143 * stack. */
9144#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9145 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9146/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9147#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9148 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9149
9150/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9151#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9152 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9153/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9154 * stack. */
9155#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9156 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9157/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9158 * FPUDS. */
9159#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9160 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9161/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9162 * FPUDS. Pops stack. */
9163#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9164 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9165/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9166 * stack twice. */
9167#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9168 iemFpuStackUnderflowThenPopPop(pIemCpu)
9169/** Raises a FPU stack underflow exception for an instruction pushing a result
9170 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9171#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9172 iemFpuStackPushUnderflow(pIemCpu)
9173/** Raises a FPU stack underflow exception for an instruction pushing a result
9174 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9175#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9176 iemFpuStackPushUnderflowTwo(pIemCpu)
9177
9178/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9179 * FPUIP, FPUCS and FOP. */
9180#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9181 iemFpuStackPushOverflow(pIemCpu)
9182/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9183 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9184#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9185 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9186/** Indicates that we (might) have modified the FPU state. */
9187#define IEM_MC_USED_FPU() \
9188 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9189
9190/**
9191 * Calls a MMX assembly implementation taking two visible arguments.
9192 *
9193 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9194 * @param a0 The first extra argument.
9195 * @param a1 The second extra argument.
9196 */
9197#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9198 do { \
9199 iemFpuPrepareUsage(pIemCpu); \
9200 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9201 } while (0)
9202
9203/**
9204 * Calls a MMX assembly implementation taking three visible arguments.
9205 *
9206 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9207 * @param a0 The first extra argument.
9208 * @param a1 The second extra argument.
9209 * @param a2 The third extra argument.
9210 */
9211#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9212 do { \
9213 iemFpuPrepareUsage(pIemCpu); \
9214 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9215 } while (0)
9216
9217
9218/**
9219 * Calls a SSE assembly implementation taking two visible arguments.
9220 *
9221 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9222 * @param a0 The first extra argument.
9223 * @param a1 The second extra argument.
9224 */
9225#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9226 do { \
9227 iemFpuPrepareUsageSse(pIemCpu); \
9228 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9229 } while (0)
9230
9231/**
9232 * Calls a SSE assembly implementation taking three visible arguments.
9233 *
9234 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9235 * @param a0 The first extra argument.
9236 * @param a1 The second extra argument.
9237 * @param a2 The third extra argument.
9238 */
9239#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9240 do { \
9241 iemFpuPrepareUsageSse(pIemCpu); \
9242 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9243 } while (0)
9244
9245
9246/** @note Not for IOPL or IF testing. */
9247#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9248/** @note Not for IOPL or IF testing. */
9249#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9250/** @note Not for IOPL or IF testing. */
9251#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9252/** @note Not for IOPL or IF testing. */
9253#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9254/** @note Not for IOPL or IF testing. */
9255#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9256 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9257 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9258/** @note Not for IOPL or IF testing. */
9259#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9260 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9261 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9262/** @note Not for IOPL or IF testing. */
9263#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9264 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9265 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9266 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9267/** @note Not for IOPL or IF testing. */
9268#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9269 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9270 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9271 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9272#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9273#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9274#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9275/** @note Not for IOPL or IF testing. */
9276#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9277 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9278 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9279/** @note Not for IOPL or IF testing. */
9280#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9281 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9282 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9283/** @note Not for IOPL or IF testing. */
9284#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9285 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9286 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9287/** @note Not for IOPL or IF testing. */
9288#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9289 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9290 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9291/** @note Not for IOPL or IF testing. */
9292#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9293 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9294 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9295/** @note Not for IOPL or IF testing. */
9296#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9297 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9298 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9299#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9300#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9301#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9302 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9303#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9304 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9305#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9306 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9307#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9308 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9309#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9310 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9311#define IEM_MC_IF_FCW_IM() \
9312 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9313
9314#define IEM_MC_ELSE() } else {
9315#define IEM_MC_ENDIF() } do {} while (0)
9316
9317/** @} */
9318
9319
9320/** @name Opcode Debug Helpers.
9321 * @{
9322 */
9323#ifdef DEBUG
9324# define IEMOP_MNEMONIC(a_szMnemonic) \
9325 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9326 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9327# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9328 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9329 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9330#else
9331# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9332# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9333#endif
9334
9335/** @} */
9336
9337
9338/** @name Opcode Helpers.
9339 * @{
9340 */
9341
9342/** The instruction raises an \#UD in real and V8086 mode. */
9343#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9344 do \
9345 { \
9346 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9347 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9348 } while (0)
9349
9350/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9351 * lock prefixed.
9352 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9353#define IEMOP_HLP_NO_LOCK_PREFIX() \
9354 do \
9355 { \
9356 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9357 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9358 } while (0)
9359
9360/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9361 * 64-bit mode. */
9362#define IEMOP_HLP_NO_64BIT() \
9363 do \
9364 { \
9365 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9366 return IEMOP_RAISE_INVALID_OPCODE(); \
9367 } while (0)
9368
9369/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9370 * 64-bit mode. */
9371#define IEMOP_HLP_ONLY_64BIT() \
9372 do \
9373 { \
9374 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9375 return IEMOP_RAISE_INVALID_OPCODE(); \
9376 } while (0)
9377
9378/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9379#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9380 do \
9381 { \
9382 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9383 iemRecalEffOpSize64Default(pIemCpu); \
9384 } while (0)
9385
9386/** The instruction has 64-bit operand size if 64-bit mode. */
9387#define IEMOP_HLP_64BIT_OP_SIZE() \
9388 do \
9389 { \
9390 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9391 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9392 } while (0)
9393
9394/** Only a REX prefix immediately preceeding the first opcode byte takes
9395 * effect. This macro helps ensuring this as well as logging bad guest code. */
9396#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9397 do \
9398 { \
9399 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9400 { \
9401 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9402 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9403 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9404 pIemCpu->uRexB = 0; \
9405 pIemCpu->uRexIndex = 0; \
9406 pIemCpu->uRexReg = 0; \
9407 iemRecalEffOpSize(pIemCpu); \
9408 } \
9409 } while (0)
9410
9411/**
9412 * Done decoding.
9413 */
9414#define IEMOP_HLP_DONE_DECODING() \
9415 do \
9416 { \
9417 /*nothing for now, maybe later... */ \
9418 } while (0)
9419
9420/**
9421 * Done decoding, raise \#UD exception if lock prefix present.
9422 */
9423#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9424 do \
9425 { \
9426 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9427 { /* likely */ } \
9428 else \
9429 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9430 } while (0)
9431#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9432 do \
9433 { \
9434 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9435 { /* likely */ } \
9436 else \
9437 { \
9438 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9439 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9440 } \
9441 } while (0)
9442#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9443 do \
9444 { \
9445 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9446 { /* likely */ } \
9447 else \
9448 { \
9449 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9450 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9451 } \
9452 } while (0)
9453/**
9454 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9455 * are present.
9456 */
9457#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9458 do \
9459 { \
9460 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9461 { /* likely */ } \
9462 else \
9463 return IEMOP_RAISE_INVALID_OPCODE(); \
9464 } while (0)
9465
9466
9467/**
9468 * Calculates the effective address of a ModR/M memory operand.
9469 *
9470 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9471 *
9472 * @return Strict VBox status code.
9473 * @param pIemCpu The IEM per CPU data.
9474 * @param bRm The ModRM byte.
9475 * @param cbImm The size of any immediate following the
9476 * effective address opcode bytes. Important for
9477 * RIP relative addressing.
9478 * @param pGCPtrEff Where to return the effective address.
9479 */
9480IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9481{
9482 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9483 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9484#define SET_SS_DEF() \
9485 do \
9486 { \
9487 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9488 pIemCpu->iEffSeg = X86_SREG_SS; \
9489 } while (0)
9490
9491 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9492 {
9493/** @todo Check the effective address size crap! */
9494 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9495 {
9496 uint16_t u16EffAddr;
9497
9498 /* Handle the disp16 form with no registers first. */
9499 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9500 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9501 else
9502 {
9503 /* Get the displacment. */
9504 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9505 {
9506 case 0: u16EffAddr = 0; break;
9507 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9508 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9509 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9510 }
9511
9512 /* Add the base and index registers to the disp. */
9513 switch (bRm & X86_MODRM_RM_MASK)
9514 {
9515 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9516 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9517 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9518 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9519 case 4: u16EffAddr += pCtx->si; break;
9520 case 5: u16EffAddr += pCtx->di; break;
9521 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9522 case 7: u16EffAddr += pCtx->bx; break;
9523 }
9524 }
9525
9526 *pGCPtrEff = u16EffAddr;
9527 }
9528 else
9529 {
9530 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9531 uint32_t u32EffAddr;
9532
9533 /* Handle the disp32 form with no registers first. */
9534 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9535 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9536 else
9537 {
9538 /* Get the register (or SIB) value. */
9539 switch ((bRm & X86_MODRM_RM_MASK))
9540 {
9541 case 0: u32EffAddr = pCtx->eax; break;
9542 case 1: u32EffAddr = pCtx->ecx; break;
9543 case 2: u32EffAddr = pCtx->edx; break;
9544 case 3: u32EffAddr = pCtx->ebx; break;
9545 case 4: /* SIB */
9546 {
9547 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9548
9549 /* Get the index and scale it. */
9550 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9551 {
9552 case 0: u32EffAddr = pCtx->eax; break;
9553 case 1: u32EffAddr = pCtx->ecx; break;
9554 case 2: u32EffAddr = pCtx->edx; break;
9555 case 3: u32EffAddr = pCtx->ebx; break;
9556 case 4: u32EffAddr = 0; /*none */ break;
9557 case 5: u32EffAddr = pCtx->ebp; break;
9558 case 6: u32EffAddr = pCtx->esi; break;
9559 case 7: u32EffAddr = pCtx->edi; break;
9560 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9561 }
9562 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9563
9564 /* add base */
9565 switch (bSib & X86_SIB_BASE_MASK)
9566 {
9567 case 0: u32EffAddr += pCtx->eax; break;
9568 case 1: u32EffAddr += pCtx->ecx; break;
9569 case 2: u32EffAddr += pCtx->edx; break;
9570 case 3: u32EffAddr += pCtx->ebx; break;
9571 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9572 case 5:
9573 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9574 {
9575 u32EffAddr += pCtx->ebp;
9576 SET_SS_DEF();
9577 }
9578 else
9579 {
9580 uint32_t u32Disp;
9581 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9582 u32EffAddr += u32Disp;
9583 }
9584 break;
9585 case 6: u32EffAddr += pCtx->esi; break;
9586 case 7: u32EffAddr += pCtx->edi; break;
9587 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9588 }
9589 break;
9590 }
9591 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9592 case 6: u32EffAddr = pCtx->esi; break;
9593 case 7: u32EffAddr = pCtx->edi; break;
9594 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9595 }
9596
9597 /* Get and add the displacement. */
9598 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9599 {
9600 case 0:
9601 break;
9602 case 1:
9603 {
9604 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9605 u32EffAddr += i8Disp;
9606 break;
9607 }
9608 case 2:
9609 {
9610 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9611 u32EffAddr += u32Disp;
9612 break;
9613 }
9614 default:
9615 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9616 }
9617
9618 }
9619 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9620 *pGCPtrEff = u32EffAddr;
9621 else
9622 {
9623 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9624 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9625 }
9626 }
9627 }
9628 else
9629 {
9630 uint64_t u64EffAddr;
9631
9632 /* Handle the rip+disp32 form with no registers first. */
9633 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9634 {
9635 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9636 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9637 }
9638 else
9639 {
9640 /* Get the register (or SIB) value. */
9641 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9642 {
9643 case 0: u64EffAddr = pCtx->rax; break;
9644 case 1: u64EffAddr = pCtx->rcx; break;
9645 case 2: u64EffAddr = pCtx->rdx; break;
9646 case 3: u64EffAddr = pCtx->rbx; break;
9647 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9648 case 6: u64EffAddr = pCtx->rsi; break;
9649 case 7: u64EffAddr = pCtx->rdi; break;
9650 case 8: u64EffAddr = pCtx->r8; break;
9651 case 9: u64EffAddr = pCtx->r9; break;
9652 case 10: u64EffAddr = pCtx->r10; break;
9653 case 11: u64EffAddr = pCtx->r11; break;
9654 case 13: u64EffAddr = pCtx->r13; break;
9655 case 14: u64EffAddr = pCtx->r14; break;
9656 case 15: u64EffAddr = pCtx->r15; break;
9657 /* SIB */
9658 case 4:
9659 case 12:
9660 {
9661 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9662
9663 /* Get the index and scale it. */
9664 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9665 {
9666 case 0: u64EffAddr = pCtx->rax; break;
9667 case 1: u64EffAddr = pCtx->rcx; break;
9668 case 2: u64EffAddr = pCtx->rdx; break;
9669 case 3: u64EffAddr = pCtx->rbx; break;
9670 case 4: u64EffAddr = 0; /*none */ break;
9671 case 5: u64EffAddr = pCtx->rbp; break;
9672 case 6: u64EffAddr = pCtx->rsi; break;
9673 case 7: u64EffAddr = pCtx->rdi; break;
9674 case 8: u64EffAddr = pCtx->r8; break;
9675 case 9: u64EffAddr = pCtx->r9; break;
9676 case 10: u64EffAddr = pCtx->r10; break;
9677 case 11: u64EffAddr = pCtx->r11; break;
9678 case 12: u64EffAddr = pCtx->r12; break;
9679 case 13: u64EffAddr = pCtx->r13; break;
9680 case 14: u64EffAddr = pCtx->r14; break;
9681 case 15: u64EffAddr = pCtx->r15; break;
9682 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9683 }
9684 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9685
9686 /* add base */
9687 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9688 {
9689 case 0: u64EffAddr += pCtx->rax; break;
9690 case 1: u64EffAddr += pCtx->rcx; break;
9691 case 2: u64EffAddr += pCtx->rdx; break;
9692 case 3: u64EffAddr += pCtx->rbx; break;
9693 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9694 case 6: u64EffAddr += pCtx->rsi; break;
9695 case 7: u64EffAddr += pCtx->rdi; break;
9696 case 8: u64EffAddr += pCtx->r8; break;
9697 case 9: u64EffAddr += pCtx->r9; break;
9698 case 10: u64EffAddr += pCtx->r10; break;
9699 case 11: u64EffAddr += pCtx->r11; break;
9700 case 12: u64EffAddr += pCtx->r12; break;
9701 case 14: u64EffAddr += pCtx->r14; break;
9702 case 15: u64EffAddr += pCtx->r15; break;
9703 /* complicated encodings */
9704 case 5:
9705 case 13:
9706 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9707 {
9708 if (!pIemCpu->uRexB)
9709 {
9710 u64EffAddr += pCtx->rbp;
9711 SET_SS_DEF();
9712 }
9713 else
9714 u64EffAddr += pCtx->r13;
9715 }
9716 else
9717 {
9718 uint32_t u32Disp;
9719 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9720 u64EffAddr += (int32_t)u32Disp;
9721 }
9722 break;
9723 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9724 }
9725 break;
9726 }
9727 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9728 }
9729
9730 /* Get and add the displacement. */
9731 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9732 {
9733 case 0:
9734 break;
9735 case 1:
9736 {
9737 int8_t i8Disp;
9738 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9739 u64EffAddr += i8Disp;
9740 break;
9741 }
9742 case 2:
9743 {
9744 uint32_t u32Disp;
9745 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9746 u64EffAddr += (int32_t)u32Disp;
9747 break;
9748 }
9749 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9750 }
9751
9752 }
9753
9754 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9755 *pGCPtrEff = u64EffAddr;
9756 else
9757 {
9758 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9759 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9760 }
9761 }
9762
9763 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9764 return VINF_SUCCESS;
9765}
9766
9767/** @} */
9768
9769
9770
9771/*
9772 * Include the instructions
9773 */
9774#include "IEMAllInstructions.cpp.h"
9775
9776
9777
9778
9779#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9780
9781/**
9782 * Sets up execution verification mode.
9783 */
9784IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9785{
9786 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9787 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9788
9789 /*
9790 * Always note down the address of the current instruction.
9791 */
9792 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9793 pIemCpu->uOldRip = pOrgCtx->rip;
9794
9795 /*
9796 * Enable verification and/or logging.
9797 */
9798 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9799 if ( fNewNoRem
9800 && ( 0
9801#if 0 /* auto enable on first paged protected mode interrupt */
9802 || ( pOrgCtx->eflags.Bits.u1IF
9803 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9804 && TRPMHasTrap(pVCpu)
9805 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9806#endif
9807#if 0
9808 || ( pOrgCtx->cs == 0x10
9809 && ( pOrgCtx->rip == 0x90119e3e
9810 || pOrgCtx->rip == 0x901d9810)
9811#endif
9812#if 0 /* Auto enable DSL - FPU stuff. */
9813 || ( pOrgCtx->cs == 0x10
9814 && (// pOrgCtx->rip == 0xc02ec07f
9815 //|| pOrgCtx->rip == 0xc02ec082
9816 //|| pOrgCtx->rip == 0xc02ec0c9
9817 0
9818 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9819#endif
9820#if 0 /* Auto enable DSL - fstp st0 stuff. */
9821 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9822#endif
9823#if 0
9824 || pOrgCtx->rip == 0x9022bb3a
9825#endif
9826#if 0
9827 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9828#endif
9829#if 0
9830 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9831 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9832#endif
9833#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9834 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9835 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9836 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9837#endif
9838#if 0 /* NT4SP1 - xadd early boot. */
9839 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9840#endif
9841#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9842 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9843#endif
9844#if 0 /* NT4SP1 - cmpxchg (AMD). */
9845 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9846#endif
9847#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9848 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9849#endif
9850#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9851 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9852
9853#endif
9854#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9855 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9856
9857#endif
9858#if 0 /* NT4SP1 - frstor [ecx] */
9859 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9860#endif
9861#if 0 /* xxxxxx - All long mode code. */
9862 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9863#endif
9864#if 0 /* rep movsq linux 3.7 64-bit boot. */
9865 || (pOrgCtx->rip == 0x0000000000100241)
9866#endif
9867#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9868 || (pOrgCtx->rip == 0x000000000215e240)
9869#endif
9870#if 0 /* DOS's size-overridden iret to v8086. */
9871 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9872#endif
9873 )
9874 )
9875 {
9876 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9877 RTLogFlags(NULL, "enabled");
9878 fNewNoRem = false;
9879 }
9880 if (fNewNoRem != pIemCpu->fNoRem)
9881 {
9882 pIemCpu->fNoRem = fNewNoRem;
9883 if (!fNewNoRem)
9884 {
9885 LogAlways(("Enabling verification mode!\n"));
9886 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9887 }
9888 else
9889 LogAlways(("Disabling verification mode!\n"));
9890 }
9891
9892 /*
9893 * Switch state.
9894 */
9895 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9896 {
9897 static CPUMCTX s_DebugCtx; /* Ugly! */
9898
9899 s_DebugCtx = *pOrgCtx;
9900 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9901 }
9902
9903 /*
9904 * See if there is an interrupt pending in TRPM and inject it if we can.
9905 */
9906 pIemCpu->uInjectCpl = UINT8_MAX;
9907 if ( pOrgCtx->eflags.Bits.u1IF
9908 && TRPMHasTrap(pVCpu)
9909 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9910 {
9911 uint8_t u8TrapNo;
9912 TRPMEVENT enmType;
9913 RTGCUINT uErrCode;
9914 RTGCPTR uCr2;
9915 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9916 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9917 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9918 TRPMResetTrap(pVCpu);
9919 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9920 }
9921
9922 /*
9923 * Reset the counters.
9924 */
9925 pIemCpu->cIOReads = 0;
9926 pIemCpu->cIOWrites = 0;
9927 pIemCpu->fIgnoreRaxRdx = false;
9928 pIemCpu->fOverlappingMovs = false;
9929 pIemCpu->fProblematicMemory = false;
9930 pIemCpu->fUndefinedEFlags = 0;
9931
9932 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9933 {
9934 /*
9935 * Free all verification records.
9936 */
9937 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9938 pIemCpu->pIemEvtRecHead = NULL;
9939 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9940 do
9941 {
9942 while (pEvtRec)
9943 {
9944 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9945 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9946 pIemCpu->pFreeEvtRec = pEvtRec;
9947 pEvtRec = pNext;
9948 }
9949 pEvtRec = pIemCpu->pOtherEvtRecHead;
9950 pIemCpu->pOtherEvtRecHead = NULL;
9951 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9952 } while (pEvtRec);
9953 }
9954}
9955
9956
9957/**
9958 * Allocate an event record.
9959 * @returns Pointer to a record.
9960 */
9961IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9962{
9963 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9964 return NULL;
9965
9966 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9967 if (pEvtRec)
9968 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9969 else
9970 {
9971 if (!pIemCpu->ppIemEvtRecNext)
9972 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9973
9974 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9975 if (!pEvtRec)
9976 return NULL;
9977 }
9978 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9979 pEvtRec->pNext = NULL;
9980 return pEvtRec;
9981}
9982
9983
9984/**
9985 * IOMMMIORead notification.
9986 */
9987VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9988{
9989 PVMCPU pVCpu = VMMGetCpu(pVM);
9990 if (!pVCpu)
9991 return;
9992 PIEMCPU pIemCpu = &pVCpu->iem.s;
9993 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9994 if (!pEvtRec)
9995 return;
9996 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9997 pEvtRec->u.RamRead.GCPhys = GCPhys;
9998 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9999 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10000 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10001}
10002
10003
10004/**
10005 * IOMMMIOWrite notification.
10006 */
10007VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10008{
10009 PVMCPU pVCpu = VMMGetCpu(pVM);
10010 if (!pVCpu)
10011 return;
10012 PIEMCPU pIemCpu = &pVCpu->iem.s;
10013 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10014 if (!pEvtRec)
10015 return;
10016 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10017 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10018 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10019 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10020 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10021 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10022 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10023 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10024 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10025}
10026
10027
10028/**
10029 * IOMIOPortRead notification.
10030 */
10031VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10032{
10033 PVMCPU pVCpu = VMMGetCpu(pVM);
10034 if (!pVCpu)
10035 return;
10036 PIEMCPU pIemCpu = &pVCpu->iem.s;
10037 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10038 if (!pEvtRec)
10039 return;
10040 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10041 pEvtRec->u.IOPortRead.Port = Port;
10042 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10043 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10044 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10045}
10046
10047/**
10048 * IOMIOPortWrite notification.
10049 */
10050VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10051{
10052 PVMCPU pVCpu = VMMGetCpu(pVM);
10053 if (!pVCpu)
10054 return;
10055 PIEMCPU pIemCpu = &pVCpu->iem.s;
10056 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10057 if (!pEvtRec)
10058 return;
10059 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10060 pEvtRec->u.IOPortWrite.Port = Port;
10061 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10062 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10063 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10064 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10065}
10066
10067
10068VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10069{
10070 PVMCPU pVCpu = VMMGetCpu(pVM);
10071 if (!pVCpu)
10072 return;
10073 PIEMCPU pIemCpu = &pVCpu->iem.s;
10074 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10075 if (!pEvtRec)
10076 return;
10077 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10078 pEvtRec->u.IOPortStrRead.Port = Port;
10079 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10080 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10081 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10082 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10083}
10084
10085
10086VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10087{
10088 PVMCPU pVCpu = VMMGetCpu(pVM);
10089 if (!pVCpu)
10090 return;
10091 PIEMCPU pIemCpu = &pVCpu->iem.s;
10092 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10093 if (!pEvtRec)
10094 return;
10095 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10096 pEvtRec->u.IOPortStrWrite.Port = Port;
10097 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10098 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10099 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10100 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10101}
10102
10103
10104/**
10105 * Fakes and records an I/O port read.
10106 *
10107 * @returns VINF_SUCCESS.
10108 * @param pIemCpu The IEM per CPU data.
10109 * @param Port The I/O port.
10110 * @param pu32Value Where to store the fake value.
10111 * @param cbValue The size of the access.
10112 */
10113IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10114{
10115 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10116 if (pEvtRec)
10117 {
10118 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10119 pEvtRec->u.IOPortRead.Port = Port;
10120 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10121 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10122 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10123 }
10124 pIemCpu->cIOReads++;
10125 *pu32Value = 0xcccccccc;
10126 return VINF_SUCCESS;
10127}
10128
10129
10130/**
10131 * Fakes and records an I/O port write.
10132 *
10133 * @returns VINF_SUCCESS.
10134 * @param pIemCpu The IEM per CPU data.
10135 * @param Port The I/O port.
10136 * @param u32Value The value being written.
10137 * @param cbValue The size of the access.
10138 */
10139IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10140{
10141 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10142 if (pEvtRec)
10143 {
10144 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10145 pEvtRec->u.IOPortWrite.Port = Port;
10146 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10147 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10148 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10149 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10150 }
10151 pIemCpu->cIOWrites++;
10152 return VINF_SUCCESS;
10153}
10154
10155
10156/**
10157 * Used to add extra details about a stub case.
10158 * @param pIemCpu The IEM per CPU state.
10159 */
10160IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10161{
10162 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10163 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10164 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10165 char szRegs[4096];
10166 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10167 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10168 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10169 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10170 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10171 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10172 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10173 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10174 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10175 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10176 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10177 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10178 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10179 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10180 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10181 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10182 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10183 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10184 " efer=%016VR{efer}\n"
10185 " pat=%016VR{pat}\n"
10186 " sf_mask=%016VR{sf_mask}\n"
10187 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10188 " lstar=%016VR{lstar}\n"
10189 " star=%016VR{star} cstar=%016VR{cstar}\n"
10190 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10191 );
10192
10193 char szInstr1[256];
10194 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10195 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10196 szInstr1, sizeof(szInstr1), NULL);
10197 char szInstr2[256];
10198 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10199 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10200 szInstr2, sizeof(szInstr2), NULL);
10201
10202 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10203}
10204
10205
10206/**
10207 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10208 * dump to the assertion info.
10209 *
10210 * @param pEvtRec The record to dump.
10211 */
10212IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10213{
10214 switch (pEvtRec->enmEvent)
10215 {
10216 case IEMVERIFYEVENT_IOPORT_READ:
10217 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10218 pEvtRec->u.IOPortWrite.Port,
10219 pEvtRec->u.IOPortWrite.cbValue);
10220 break;
10221 case IEMVERIFYEVENT_IOPORT_WRITE:
10222 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10223 pEvtRec->u.IOPortWrite.Port,
10224 pEvtRec->u.IOPortWrite.cbValue,
10225 pEvtRec->u.IOPortWrite.u32Value);
10226 break;
10227 case IEMVERIFYEVENT_IOPORT_STR_READ:
10228 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10229 pEvtRec->u.IOPortStrWrite.Port,
10230 pEvtRec->u.IOPortStrWrite.cbValue,
10231 pEvtRec->u.IOPortStrWrite.cTransfers);
10232 break;
10233 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10234 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10235 pEvtRec->u.IOPortStrWrite.Port,
10236 pEvtRec->u.IOPortStrWrite.cbValue,
10237 pEvtRec->u.IOPortStrWrite.cTransfers);
10238 break;
10239 case IEMVERIFYEVENT_RAM_READ:
10240 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10241 pEvtRec->u.RamRead.GCPhys,
10242 pEvtRec->u.RamRead.cb);
10243 break;
10244 case IEMVERIFYEVENT_RAM_WRITE:
10245 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10246 pEvtRec->u.RamWrite.GCPhys,
10247 pEvtRec->u.RamWrite.cb,
10248 (int)pEvtRec->u.RamWrite.cb,
10249 pEvtRec->u.RamWrite.ab);
10250 break;
10251 default:
10252 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10253 break;
10254 }
10255}
10256
10257
10258/**
10259 * Raises an assertion on the specified record, showing the given message with
10260 * a record dump attached.
10261 *
10262 * @param pIemCpu The IEM per CPU data.
10263 * @param pEvtRec1 The first record.
10264 * @param pEvtRec2 The second record.
10265 * @param pszMsg The message explaining why we're asserting.
10266 */
10267IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10268{
10269 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10270 iemVerifyAssertAddRecordDump(pEvtRec1);
10271 iemVerifyAssertAddRecordDump(pEvtRec2);
10272 iemVerifyAssertMsg2(pIemCpu);
10273 RTAssertPanic();
10274}
10275
10276
10277/**
10278 * Raises an assertion on the specified record, showing the given message with
10279 * a record dump attached.
10280 *
10281 * @param pIemCpu The IEM per CPU data.
10282 * @param pEvtRec1 The first record.
10283 * @param pszMsg The message explaining why we're asserting.
10284 */
10285IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10286{
10287 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10288 iemVerifyAssertAddRecordDump(pEvtRec);
10289 iemVerifyAssertMsg2(pIemCpu);
10290 RTAssertPanic();
10291}
10292
10293
10294/**
10295 * Verifies a write record.
10296 *
10297 * @param pIemCpu The IEM per CPU data.
10298 * @param pEvtRec The write record.
10299 * @param fRem Set if REM was doing the other executing. If clear
10300 * it was HM.
10301 */
10302IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10303{
10304 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10305 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10306 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10307 if ( RT_FAILURE(rc)
10308 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10309 {
10310 /* fend off ins */
10311 if ( !pIemCpu->cIOReads
10312 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10313 || ( pEvtRec->u.RamWrite.cb != 1
10314 && pEvtRec->u.RamWrite.cb != 2
10315 && pEvtRec->u.RamWrite.cb != 4) )
10316 {
10317 /* fend off ROMs and MMIO */
10318 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10319 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10320 {
10321 /* fend off fxsave */
10322 if (pEvtRec->u.RamWrite.cb != 512)
10323 {
10324 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10325 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10326 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10327 RTAssertMsg2Add("%s: %.*Rhxs\n"
10328 "iem: %.*Rhxs\n",
10329 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10330 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10331 iemVerifyAssertAddRecordDump(pEvtRec);
10332 iemVerifyAssertMsg2(pIemCpu);
10333 RTAssertPanic();
10334 }
10335 }
10336 }
10337 }
10338
10339}
10340
10341/**
10342 * Performs the post-execution verfication checks.
10343 */
10344IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10345{
10346 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10347 return;
10348
10349 /*
10350 * Switch back the state.
10351 */
10352 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10353 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10354 Assert(pOrgCtx != pDebugCtx);
10355 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10356
10357 /*
10358 * Execute the instruction in REM.
10359 */
10360 bool fRem = false;
10361 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10362 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10363 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10364#ifdef IEM_VERIFICATION_MODE_FULL_HM
10365 if ( HMIsEnabled(pVM)
10366 && pIemCpu->cIOReads == 0
10367 && pIemCpu->cIOWrites == 0
10368 && !pIemCpu->fProblematicMemory)
10369 {
10370 uint64_t uStartRip = pOrgCtx->rip;
10371 unsigned iLoops = 0;
10372 do
10373 {
10374 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10375 iLoops++;
10376 } while ( rc == VINF_SUCCESS
10377 || ( rc == VINF_EM_DBG_STEPPED
10378 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10379 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10380 || ( pOrgCtx->rip != pDebugCtx->rip
10381 && pIemCpu->uInjectCpl != UINT8_MAX
10382 && iLoops < 8) );
10383 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10384 rc = VINF_SUCCESS;
10385 }
10386#endif
10387 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10388 || rc == VINF_IOM_R3_IOPORT_READ
10389 || rc == VINF_IOM_R3_IOPORT_WRITE
10390 || rc == VINF_IOM_R3_MMIO_READ
10391 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10392 || rc == VINF_IOM_R3_MMIO_WRITE
10393 || rc == VINF_CPUM_R3_MSR_READ
10394 || rc == VINF_CPUM_R3_MSR_WRITE
10395 || rc == VINF_EM_RESCHEDULE
10396 )
10397 {
10398 EMRemLock(pVM);
10399 rc = REMR3EmulateInstruction(pVM, pVCpu);
10400 AssertRC(rc);
10401 EMRemUnlock(pVM);
10402 fRem = true;
10403 }
10404
10405 /*
10406 * Compare the register states.
10407 */
10408 unsigned cDiffs = 0;
10409 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10410 {
10411 //Log(("REM and IEM ends up with different registers!\n"));
10412 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10413
10414# define CHECK_FIELD(a_Field) \
10415 do \
10416 { \
10417 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10418 { \
10419 switch (sizeof(pOrgCtx->a_Field)) \
10420 { \
10421 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10422 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10423 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10424 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10425 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10426 } \
10427 cDiffs++; \
10428 } \
10429 } while (0)
10430# define CHECK_XSTATE_FIELD(a_Field) \
10431 do \
10432 { \
10433 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10434 { \
10435 switch (sizeof(pOrgXState->a_Field)) \
10436 { \
10437 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10438 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10439 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10440 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10441 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10442 } \
10443 cDiffs++; \
10444 } \
10445 } while (0)
10446
10447# define CHECK_BIT_FIELD(a_Field) \
10448 do \
10449 { \
10450 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10451 { \
10452 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10453 cDiffs++; \
10454 } \
10455 } while (0)
10456
10457# define CHECK_SEL(a_Sel) \
10458 do \
10459 { \
10460 CHECK_FIELD(a_Sel.Sel); \
10461 CHECK_FIELD(a_Sel.Attr.u); \
10462 CHECK_FIELD(a_Sel.u64Base); \
10463 CHECK_FIELD(a_Sel.u32Limit); \
10464 CHECK_FIELD(a_Sel.fFlags); \
10465 } while (0)
10466
10467 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10468 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10469
10470#if 1 /* The recompiler doesn't update these the intel way. */
10471 if (fRem)
10472 {
10473 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10474 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10475 pOrgXState->x87.CS = pDebugXState->x87.CS;
10476 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10477 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10478 pOrgXState->x87.DS = pDebugXState->x87.DS;
10479 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10480 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10481 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10482 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10483 }
10484#endif
10485 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10486 {
10487 RTAssertMsg2Weak(" the FPU state differs\n");
10488 cDiffs++;
10489 CHECK_XSTATE_FIELD(x87.FCW);
10490 CHECK_XSTATE_FIELD(x87.FSW);
10491 CHECK_XSTATE_FIELD(x87.FTW);
10492 CHECK_XSTATE_FIELD(x87.FOP);
10493 CHECK_XSTATE_FIELD(x87.FPUIP);
10494 CHECK_XSTATE_FIELD(x87.CS);
10495 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10496 CHECK_XSTATE_FIELD(x87.FPUDP);
10497 CHECK_XSTATE_FIELD(x87.DS);
10498 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10499 CHECK_XSTATE_FIELD(x87.MXCSR);
10500 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10501 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10502 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10503 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10504 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10505 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10506 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10507 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10508 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10509 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10510 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10511 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10512 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10513 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10514 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10515 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10516 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10517 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10518 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10519 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10520 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10521 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10522 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10523 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10524 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10525 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10526 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10527 }
10528 CHECK_FIELD(rip);
10529 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10530 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10531 {
10532 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10533 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10534 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10535 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10536 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10537 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10538 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10539 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10540 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10541 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10542 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10543 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10544 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10545 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10546 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10547 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10548 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10549 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10550 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10551 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10552 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10553 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10554 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10555 }
10556
10557 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10558 CHECK_FIELD(rax);
10559 CHECK_FIELD(rcx);
10560 if (!pIemCpu->fIgnoreRaxRdx)
10561 CHECK_FIELD(rdx);
10562 CHECK_FIELD(rbx);
10563 CHECK_FIELD(rsp);
10564 CHECK_FIELD(rbp);
10565 CHECK_FIELD(rsi);
10566 CHECK_FIELD(rdi);
10567 CHECK_FIELD(r8);
10568 CHECK_FIELD(r9);
10569 CHECK_FIELD(r10);
10570 CHECK_FIELD(r11);
10571 CHECK_FIELD(r12);
10572 CHECK_FIELD(r13);
10573 CHECK_SEL(cs);
10574 CHECK_SEL(ss);
10575 CHECK_SEL(ds);
10576 CHECK_SEL(es);
10577 CHECK_SEL(fs);
10578 CHECK_SEL(gs);
10579 CHECK_FIELD(cr0);
10580
10581 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10582 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10583 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10584 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10585 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10586 {
10587 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10588 { /* ignore */ }
10589 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10590 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10591 && fRem)
10592 { /* ignore */ }
10593 else
10594 CHECK_FIELD(cr2);
10595 }
10596 CHECK_FIELD(cr3);
10597 CHECK_FIELD(cr4);
10598 CHECK_FIELD(dr[0]);
10599 CHECK_FIELD(dr[1]);
10600 CHECK_FIELD(dr[2]);
10601 CHECK_FIELD(dr[3]);
10602 CHECK_FIELD(dr[6]);
10603 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10604 CHECK_FIELD(dr[7]);
10605 CHECK_FIELD(gdtr.cbGdt);
10606 CHECK_FIELD(gdtr.pGdt);
10607 CHECK_FIELD(idtr.cbIdt);
10608 CHECK_FIELD(idtr.pIdt);
10609 CHECK_SEL(ldtr);
10610 CHECK_SEL(tr);
10611 CHECK_FIELD(SysEnter.cs);
10612 CHECK_FIELD(SysEnter.eip);
10613 CHECK_FIELD(SysEnter.esp);
10614 CHECK_FIELD(msrEFER);
10615 CHECK_FIELD(msrSTAR);
10616 CHECK_FIELD(msrPAT);
10617 CHECK_FIELD(msrLSTAR);
10618 CHECK_FIELD(msrCSTAR);
10619 CHECK_FIELD(msrSFMASK);
10620 CHECK_FIELD(msrKERNELGSBASE);
10621
10622 if (cDiffs != 0)
10623 {
10624 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10625 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10626 iemVerifyAssertMsg2(pIemCpu);
10627 RTAssertPanic();
10628 }
10629# undef CHECK_FIELD
10630# undef CHECK_BIT_FIELD
10631 }
10632
10633 /*
10634 * If the register state compared fine, check the verification event
10635 * records.
10636 */
10637 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10638 {
10639 /*
10640 * Compare verficiation event records.
10641 * - I/O port accesses should be a 1:1 match.
10642 */
10643 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10644 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10645 while (pIemRec && pOtherRec)
10646 {
10647 /* Since we might miss RAM writes and reads, ignore reads and check
10648 that any written memory is the same extra ones. */
10649 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10650 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10651 && pIemRec->pNext)
10652 {
10653 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10654 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10655 pIemRec = pIemRec->pNext;
10656 }
10657
10658 /* Do the compare. */
10659 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10660 {
10661 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10662 break;
10663 }
10664 bool fEquals;
10665 switch (pIemRec->enmEvent)
10666 {
10667 case IEMVERIFYEVENT_IOPORT_READ:
10668 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10669 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10670 break;
10671 case IEMVERIFYEVENT_IOPORT_WRITE:
10672 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10673 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10674 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10675 break;
10676 case IEMVERIFYEVENT_IOPORT_STR_READ:
10677 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10678 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10679 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10680 break;
10681 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10682 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10683 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10684 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10685 break;
10686 case IEMVERIFYEVENT_RAM_READ:
10687 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10688 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10689 break;
10690 case IEMVERIFYEVENT_RAM_WRITE:
10691 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10692 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10693 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10694 break;
10695 default:
10696 fEquals = false;
10697 break;
10698 }
10699 if (!fEquals)
10700 {
10701 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10702 break;
10703 }
10704
10705 /* advance */
10706 pIemRec = pIemRec->pNext;
10707 pOtherRec = pOtherRec->pNext;
10708 }
10709
10710 /* Ignore extra writes and reads. */
10711 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10712 {
10713 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10714 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10715 pIemRec = pIemRec->pNext;
10716 }
10717 if (pIemRec != NULL)
10718 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10719 else if (pOtherRec != NULL)
10720 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10721 }
10722 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10723}
10724
10725#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10726
10727/* stubs */
10728IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10729{
10730 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10731 return VERR_INTERNAL_ERROR;
10732}
10733
10734IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10735{
10736 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10737 return VERR_INTERNAL_ERROR;
10738}
10739
10740#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10741
10742
10743#ifdef LOG_ENABLED
10744/**
10745 * Logs the current instruction.
10746 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10747 * @param pCtx The current CPU context.
10748 * @param fSameCtx Set if we have the same context information as the VMM,
10749 * clear if we may have already executed an instruction in
10750 * our debug context. When clear, we assume IEMCPU holds
10751 * valid CPU mode info.
10752 */
10753IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10754{
10755# ifdef IN_RING3
10756 if (LogIs2Enabled())
10757 {
10758 char szInstr[256];
10759 uint32_t cbInstr = 0;
10760 if (fSameCtx)
10761 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10762 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10763 szInstr, sizeof(szInstr), &cbInstr);
10764 else
10765 {
10766 uint32_t fFlags = 0;
10767 switch (pVCpu->iem.s.enmCpuMode)
10768 {
10769 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10770 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10771 case IEMMODE_16BIT:
10772 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10773 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10774 else
10775 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10776 break;
10777 }
10778 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10779 szInstr, sizeof(szInstr), &cbInstr);
10780 }
10781
10782 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10783 Log2(("****\n"
10784 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10785 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10786 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10787 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10788 " %s\n"
10789 ,
10790 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10791 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10792 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10793 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10794 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10795 szInstr));
10796
10797 if (LogIs3Enabled())
10798 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10799 }
10800 else
10801# endif
10802 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10803 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10804}
10805#endif
10806
10807
10808/**
10809 * Makes status code addjustments (pass up from I/O and access handler)
10810 * as well as maintaining statistics.
10811 *
10812 * @returns Strict VBox status code to pass up.
10813 * @param pIemCpu The IEM per CPU data.
10814 * @param rcStrict The status from executing an instruction.
10815 */
10816DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10817{
10818 if (rcStrict != VINF_SUCCESS)
10819 {
10820 if (RT_SUCCESS(rcStrict))
10821 {
10822 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10823 || rcStrict == VINF_IOM_R3_IOPORT_READ
10824 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10825 || rcStrict == VINF_IOM_R3_MMIO_READ
10826 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10827 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10828 || rcStrict == VINF_CPUM_R3_MSR_READ
10829 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10830 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10831 || rcStrict == VINF_EM_RAW_TO_R3
10832 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10833 /* raw-mode / virt handlers only: */
10834 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10835 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10836 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10837 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10838 || rcStrict == VINF_SELM_SYNC_GDT
10839 || rcStrict == VINF_CSAM_PENDING_ACTION
10840 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10841 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10842/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10843 int32_t const rcPassUp = pIemCpu->rcPassUp;
10844 if (rcPassUp == VINF_SUCCESS)
10845 pIemCpu->cRetInfStatuses++;
10846 else if ( rcPassUp < VINF_EM_FIRST
10847 || rcPassUp > VINF_EM_LAST
10848 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10849 {
10850 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10851 pIemCpu->cRetPassUpStatus++;
10852 rcStrict = rcPassUp;
10853 }
10854 else
10855 {
10856 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10857 pIemCpu->cRetInfStatuses++;
10858 }
10859 }
10860 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10861 pIemCpu->cRetAspectNotImplemented++;
10862 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10863 pIemCpu->cRetInstrNotImplemented++;
10864#ifdef IEM_VERIFICATION_MODE_FULL
10865 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10866 rcStrict = VINF_SUCCESS;
10867#endif
10868 else
10869 pIemCpu->cRetErrStatuses++;
10870 }
10871 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10872 {
10873 pIemCpu->cRetPassUpStatus++;
10874 rcStrict = pIemCpu->rcPassUp;
10875 }
10876
10877 return rcStrict;
10878}
10879
10880
10881/**
10882 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10883 * IEMExecOneWithPrefetchedByPC.
10884 *
10885 * @return Strict VBox status code.
10886 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10887 * @param pIemCpu The IEM per CPU data.
10888 * @param fExecuteInhibit If set, execute the instruction following CLI,
10889 * POP SS and MOV SS,GR.
10890 */
10891DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10892{
10893 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10894 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10895 if (rcStrict == VINF_SUCCESS)
10896 pIemCpu->cInstructions++;
10897 if (pIemCpu->cActiveMappings > 0)
10898 iemMemRollback(pIemCpu);
10899//#ifdef DEBUG
10900// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10901//#endif
10902
10903 /* Execute the next instruction as well if a cli, pop ss or
10904 mov ss, Gr has just completed successfully. */
10905 if ( fExecuteInhibit
10906 && rcStrict == VINF_SUCCESS
10907 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10908 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10909 {
10910 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10911 if (rcStrict == VINF_SUCCESS)
10912 {
10913# ifdef LOG_ENABLED
10914 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10915# endif
10916 IEM_OPCODE_GET_NEXT_U8(&b);
10917 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10918 if (rcStrict == VINF_SUCCESS)
10919 pIemCpu->cInstructions++;
10920 if (pIemCpu->cActiveMappings > 0)
10921 iemMemRollback(pIemCpu);
10922 }
10923 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10924 }
10925
10926 /*
10927 * Return value fiddling, statistics and sanity assertions.
10928 */
10929 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10930
10931 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10932 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10933#if defined(IEM_VERIFICATION_MODE_FULL)
10934 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10935 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10936 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10937 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10938#endif
10939 return rcStrict;
10940}
10941
10942
10943#ifdef IN_RC
10944/**
10945 * Re-enters raw-mode or ensure we return to ring-3.
10946 *
10947 * @returns rcStrict, maybe modified.
10948 * @param pIemCpu The IEM CPU structure.
10949 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10950 * @param pCtx The current CPU context.
10951 * @param rcStrict The status code returne by the interpreter.
10952 */
10953DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10954{
10955 if (!pIemCpu->fInPatchCode)
10956 CPUMRawEnter(pVCpu);
10957 return rcStrict;
10958}
10959#endif
10960
10961
10962/**
10963 * Execute one instruction.
10964 *
10965 * @return Strict VBox status code.
10966 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10967 */
10968VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10969{
10970 PIEMCPU pIemCpu = &pVCpu->iem.s;
10971
10972#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10973 iemExecVerificationModeSetup(pIemCpu);
10974#endif
10975#ifdef LOG_ENABLED
10976 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10977 iemLogCurInstr(pVCpu, pCtx, true);
10978#endif
10979
10980 /*
10981 * Do the decoding and emulation.
10982 */
10983 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10984 if (rcStrict == VINF_SUCCESS)
10985 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10986
10987#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10988 /*
10989 * Assert some sanity.
10990 */
10991 iemExecVerificationModeCheck(pIemCpu);
10992#endif
10993#ifdef IN_RC
10994 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10995#endif
10996 if (rcStrict != VINF_SUCCESS)
10997 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10998 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10999 return rcStrict;
11000}
11001
11002
11003VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11004{
11005 PIEMCPU pIemCpu = &pVCpu->iem.s;
11006 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11007 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11008
11009 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11010 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11011 if (rcStrict == VINF_SUCCESS)
11012 {
11013 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11014 if (pcbWritten)
11015 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11016 }
11017
11018#ifdef IN_RC
11019 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11020#endif
11021 return rcStrict;
11022}
11023
11024
11025VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11026 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11027{
11028 PIEMCPU pIemCpu = &pVCpu->iem.s;
11029 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11030 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11031
11032 VBOXSTRICTRC rcStrict;
11033 if ( cbOpcodeBytes
11034 && pCtx->rip == OpcodeBytesPC)
11035 {
11036 iemInitDecoder(pIemCpu, false);
11037 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11038 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11039 rcStrict = VINF_SUCCESS;
11040 }
11041 else
11042 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11043 if (rcStrict == VINF_SUCCESS)
11044 {
11045 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11046 }
11047
11048#ifdef IN_RC
11049 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11050#endif
11051 return rcStrict;
11052}
11053
11054
11055VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11056{
11057 PIEMCPU pIemCpu = &pVCpu->iem.s;
11058 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11059 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11060
11061 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11062 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11063 if (rcStrict == VINF_SUCCESS)
11064 {
11065 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11066 if (pcbWritten)
11067 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11068 }
11069
11070#ifdef IN_RC
11071 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11072#endif
11073 return rcStrict;
11074}
11075
11076
11077VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11078 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11079{
11080 PIEMCPU pIemCpu = &pVCpu->iem.s;
11081 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11082 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11083
11084 VBOXSTRICTRC rcStrict;
11085 if ( cbOpcodeBytes
11086 && pCtx->rip == OpcodeBytesPC)
11087 {
11088 iemInitDecoder(pIemCpu, true);
11089 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11090 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11091 rcStrict = VINF_SUCCESS;
11092 }
11093 else
11094 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11095 if (rcStrict == VINF_SUCCESS)
11096 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11097
11098#ifdef IN_RC
11099 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11100#endif
11101 return rcStrict;
11102}
11103
11104
11105VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11106{
11107 PIEMCPU pIemCpu = &pVCpu->iem.s;
11108
11109 /*
11110 * See if there is an interrupt pending in TRPM and inject it if we can.
11111 */
11112#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11113 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11114# ifdef IEM_VERIFICATION_MODE_FULL
11115 pIemCpu->uInjectCpl = UINT8_MAX;
11116# endif
11117 if ( pCtx->eflags.Bits.u1IF
11118 && TRPMHasTrap(pVCpu)
11119 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11120 {
11121 uint8_t u8TrapNo;
11122 TRPMEVENT enmType;
11123 RTGCUINT uErrCode;
11124 RTGCPTR uCr2;
11125 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11126 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11127 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11128 TRPMResetTrap(pVCpu);
11129 }
11130#else
11131 iemExecVerificationModeSetup(pIemCpu);
11132 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11133#endif
11134
11135 /*
11136 * Log the state.
11137 */
11138#ifdef LOG_ENABLED
11139 iemLogCurInstr(pVCpu, pCtx, true);
11140#endif
11141
11142 /*
11143 * Do the decoding and emulation.
11144 */
11145 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11146 if (rcStrict == VINF_SUCCESS)
11147 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11148
11149#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11150 /*
11151 * Assert some sanity.
11152 */
11153 iemExecVerificationModeCheck(pIemCpu);
11154#endif
11155
11156 /*
11157 * Maybe re-enter raw-mode and log.
11158 */
11159#ifdef IN_RC
11160 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11161#endif
11162 if (rcStrict != VINF_SUCCESS)
11163 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11164 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11165 return rcStrict;
11166}
11167
11168
11169
11170/**
11171 * Injects a trap, fault, abort, software interrupt or external interrupt.
11172 *
11173 * The parameter list matches TRPMQueryTrapAll pretty closely.
11174 *
11175 * @returns Strict VBox status code.
11176 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11177 * @param u8TrapNo The trap number.
11178 * @param enmType What type is it (trap/fault/abort), software
11179 * interrupt or hardware interrupt.
11180 * @param uErrCode The error code if applicable.
11181 * @param uCr2 The CR2 value if applicable.
11182 * @param cbInstr The instruction length (only relevant for
11183 * software interrupts).
11184 */
11185VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11186 uint8_t cbInstr)
11187{
11188 iemInitDecoder(&pVCpu->iem.s, false);
11189#ifdef DBGFTRACE_ENABLED
11190 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11191 u8TrapNo, enmType, uErrCode, uCr2);
11192#endif
11193
11194 uint32_t fFlags;
11195 switch (enmType)
11196 {
11197 case TRPM_HARDWARE_INT:
11198 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11199 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11200 uErrCode = uCr2 = 0;
11201 break;
11202
11203 case TRPM_SOFTWARE_INT:
11204 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11205 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11206 uErrCode = uCr2 = 0;
11207 break;
11208
11209 case TRPM_TRAP:
11210 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11211 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11212 if (u8TrapNo == X86_XCPT_PF)
11213 fFlags |= IEM_XCPT_FLAGS_CR2;
11214 switch (u8TrapNo)
11215 {
11216 case X86_XCPT_DF:
11217 case X86_XCPT_TS:
11218 case X86_XCPT_NP:
11219 case X86_XCPT_SS:
11220 case X86_XCPT_PF:
11221 case X86_XCPT_AC:
11222 fFlags |= IEM_XCPT_FLAGS_ERR;
11223 break;
11224
11225 case X86_XCPT_NMI:
11226 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11227 break;
11228 }
11229 break;
11230
11231 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11232 }
11233
11234 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11235}
11236
11237
11238/**
11239 * Injects the active TRPM event.
11240 *
11241 * @returns Strict VBox status code.
11242 * @param pVCpu The cross context virtual CPU structure.
11243 */
11244VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11245{
11246#ifndef IEM_IMPLEMENTS_TASKSWITCH
11247 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11248#else
11249 uint8_t u8TrapNo;
11250 TRPMEVENT enmType;
11251 RTGCUINT uErrCode;
11252 RTGCUINTPTR uCr2;
11253 uint8_t cbInstr;
11254 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11255 if (RT_FAILURE(rc))
11256 return rc;
11257
11258 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11259
11260 /** @todo Are there any other codes that imply the event was successfully
11261 * delivered to the guest? See @bugref{6607}. */
11262 if ( rcStrict == VINF_SUCCESS
11263 || rcStrict == VINF_IEM_RAISED_XCPT)
11264 {
11265 TRPMResetTrap(pVCpu);
11266 }
11267 return rcStrict;
11268#endif
11269}
11270
11271
11272VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11273{
11274 return VERR_NOT_IMPLEMENTED;
11275}
11276
11277
11278VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11279{
11280 return VERR_NOT_IMPLEMENTED;
11281}
11282
11283
11284#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11285/**
11286 * Executes a IRET instruction with default operand size.
11287 *
11288 * This is for PATM.
11289 *
11290 * @returns VBox status code.
11291 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11292 * @param pCtxCore The register frame.
11293 */
11294VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11295{
11296 PIEMCPU pIemCpu = &pVCpu->iem.s;
11297 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11298
11299 iemCtxCoreToCtx(pCtx, pCtxCore);
11300 iemInitDecoder(pIemCpu);
11301 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11302 if (rcStrict == VINF_SUCCESS)
11303 iemCtxToCtxCore(pCtxCore, pCtx);
11304 else
11305 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11306 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11307 return rcStrict;
11308}
11309#endif
11310
11311
11312/**
11313 * Macro used by the IEMExec* method to check the given instruction length.
11314 *
11315 * Will return on failure!
11316 *
11317 * @param a_cbInstr The given instruction length.
11318 * @param a_cbMin The minimum length.
11319 */
11320#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11321 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11322 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11323
11324
11325/**
11326 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11327 *
11328 * This API ASSUMES that the caller has already verified that the guest code is
11329 * allowed to access the I/O port. (The I/O port is in the DX register in the
11330 * guest state.)
11331 *
11332 * @returns Strict VBox status code.
11333 * @param pVCpu The cross context virtual CPU structure.
11334 * @param cbValue The size of the I/O port access (1, 2, or 4).
11335 * @param enmAddrMode The addressing mode.
11336 * @param fRepPrefix Indicates whether a repeat prefix is used
11337 * (doesn't matter which for this instruction).
11338 * @param cbInstr The instruction length in bytes.
11339 * @param iEffSeg The effective segment address.
11340 */
11341VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11342 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11343{
11344 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11346
11347 /*
11348 * State init.
11349 */
11350 PIEMCPU pIemCpu = &pVCpu->iem.s;
11351 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11352
11353 /*
11354 * Switch orgy for getting to the right handler.
11355 */
11356 VBOXSTRICTRC rcStrict;
11357 if (fRepPrefix)
11358 {
11359 switch (enmAddrMode)
11360 {
11361 case IEMMODE_16BIT:
11362 switch (cbValue)
11363 {
11364 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11365 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11366 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11367 default:
11368 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11369 }
11370 break;
11371
11372 case IEMMODE_32BIT:
11373 switch (cbValue)
11374 {
11375 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11376 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11377 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11378 default:
11379 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11380 }
11381 break;
11382
11383 case IEMMODE_64BIT:
11384 switch (cbValue)
11385 {
11386 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11387 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11388 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11389 default:
11390 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11391 }
11392 break;
11393
11394 default:
11395 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11396 }
11397 }
11398 else
11399 {
11400 switch (enmAddrMode)
11401 {
11402 case IEMMODE_16BIT:
11403 switch (cbValue)
11404 {
11405 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11406 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11407 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11408 default:
11409 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11410 }
11411 break;
11412
11413 case IEMMODE_32BIT:
11414 switch (cbValue)
11415 {
11416 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11417 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11418 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11419 default:
11420 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11421 }
11422 break;
11423
11424 case IEMMODE_64BIT:
11425 switch (cbValue)
11426 {
11427 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11428 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11429 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11430 default:
11431 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11432 }
11433 break;
11434
11435 default:
11436 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11437 }
11438 }
11439
11440 iemUninitExec(pIemCpu);
11441 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11442}
11443
11444
11445/**
11446 * Interface for HM and EM for executing string I/O IN (read) instructions.
11447 *
11448 * This API ASSUMES that the caller has already verified that the guest code is
11449 * allowed to access the I/O port. (The I/O port is in the DX register in the
11450 * guest state.)
11451 *
11452 * @returns Strict VBox status code.
11453 * @param pVCpu The cross context virtual CPU structure.
11454 * @param cbValue The size of the I/O port access (1, 2, or 4).
11455 * @param enmAddrMode The addressing mode.
11456 * @param fRepPrefix Indicates whether a repeat prefix is used
11457 * (doesn't matter which for this instruction).
11458 * @param cbInstr The instruction length in bytes.
11459 */
11460VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11461 bool fRepPrefix, uint8_t cbInstr)
11462{
11463 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11464
11465 /*
11466 * State init.
11467 */
11468 PIEMCPU pIemCpu = &pVCpu->iem.s;
11469 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11470
11471 /*
11472 * Switch orgy for getting to the right handler.
11473 */
11474 VBOXSTRICTRC rcStrict;
11475 if (fRepPrefix)
11476 {
11477 switch (enmAddrMode)
11478 {
11479 case IEMMODE_16BIT:
11480 switch (cbValue)
11481 {
11482 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11483 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11484 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11485 default:
11486 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11487 }
11488 break;
11489
11490 case IEMMODE_32BIT:
11491 switch (cbValue)
11492 {
11493 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11494 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11495 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11496 default:
11497 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11498 }
11499 break;
11500
11501 case IEMMODE_64BIT:
11502 switch (cbValue)
11503 {
11504 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11505 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11506 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11507 default:
11508 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11509 }
11510 break;
11511
11512 default:
11513 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11514 }
11515 }
11516 else
11517 {
11518 switch (enmAddrMode)
11519 {
11520 case IEMMODE_16BIT:
11521 switch (cbValue)
11522 {
11523 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11524 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11525 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11526 default:
11527 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11528 }
11529 break;
11530
11531 case IEMMODE_32BIT:
11532 switch (cbValue)
11533 {
11534 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11535 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11536 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11537 default:
11538 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11539 }
11540 break;
11541
11542 case IEMMODE_64BIT:
11543 switch (cbValue)
11544 {
11545 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11546 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11547 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11548 default:
11549 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11550 }
11551 break;
11552
11553 default:
11554 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11555 }
11556 }
11557
11558 iemUninitExec(pIemCpu);
11559 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11560}
11561
11562
11563
11564/**
11565 * Interface for HM and EM to write to a CRx register.
11566 *
11567 * @returns Strict VBox status code.
11568 * @param pVCpu The cross context virtual CPU structure.
11569 * @param cbInstr The instruction length in bytes.
11570 * @param iCrReg The control register number (destination).
11571 * @param iGReg The general purpose register number (source).
11572 *
11573 * @remarks In ring-0 not all of the state needs to be synced in.
11574 */
11575VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11576{
11577 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11578 Assert(iCrReg < 16);
11579 Assert(iGReg < 16);
11580
11581 PIEMCPU pIemCpu = &pVCpu->iem.s;
11582 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11583 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11584 iemUninitExec(pIemCpu);
11585 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11586}
11587
11588
11589/**
11590 * Interface for HM and EM to read from a CRx register.
11591 *
11592 * @returns Strict VBox status code.
11593 * @param pVCpu The cross context virtual CPU structure.
11594 * @param cbInstr The instruction length in bytes.
11595 * @param iGReg The general purpose register number (destination).
11596 * @param iCrReg The control register number (source).
11597 *
11598 * @remarks In ring-0 not all of the state needs to be synced in.
11599 */
11600VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11601{
11602 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11603 Assert(iCrReg < 16);
11604 Assert(iGReg < 16);
11605
11606 PIEMCPU pIemCpu = &pVCpu->iem.s;
11607 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11608 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11609 iemUninitExec(pIemCpu);
11610 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11611}
11612
11613
11614/**
11615 * Interface for HM and EM to clear the CR0[TS] bit.
11616 *
11617 * @returns Strict VBox status code.
11618 * @param pVCpu The cross context virtual CPU structure.
11619 * @param cbInstr The instruction length in bytes.
11620 *
11621 * @remarks In ring-0 not all of the state needs to be synced in.
11622 */
11623VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11624{
11625 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11626
11627 PIEMCPU pIemCpu = &pVCpu->iem.s;
11628 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11629 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11630 iemUninitExec(pIemCpu);
11631 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11632}
11633
11634
11635/**
11636 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11637 *
11638 * @returns Strict VBox status code.
11639 * @param pVCpu The cross context virtual CPU structure.
11640 * @param cbInstr The instruction length in bytes.
11641 * @param uValue The value to load into CR0.
11642 *
11643 * @remarks In ring-0 not all of the state needs to be synced in.
11644 */
11645VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11646{
11647 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11648
11649 PIEMCPU pIemCpu = &pVCpu->iem.s;
11650 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11651 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11652 iemUninitExec(pIemCpu);
11653 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11654}
11655
11656
11657/**
11658 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11659 *
11660 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11661 *
11662 * @returns Strict VBox status code.
11663 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11664 * @param cbInstr The instruction length in bytes.
11665 * @remarks In ring-0 not all of the state needs to be synced in.
11666 * @thread EMT(pVCpu)
11667 */
11668VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11669{
11670 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11671
11672 PIEMCPU pIemCpu = &pVCpu->iem.s;
11673 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11674 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11675 iemUninitExec(pIemCpu);
11676 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11677}
11678
11679#ifdef IN_RING3
11680
11681/**
11682 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11683 *
11684 * @returns Merge between @a rcStrict and what the commit operation returned.
11685 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11686 * @param rcStrict The status code returned by ring-0 or raw-mode.
11687 */
11688VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11689{
11690 PIEMCPU pIemCpu = &pVCpu->iem.s;
11691
11692 /*
11693 * Retrieve and reset the pending commit.
11694 */
11695 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11696 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11697 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11698
11699 /*
11700 * Must reset pass-up status code.
11701 */
11702 pIemCpu->rcPassUp = VINF_SUCCESS;
11703
11704 /*
11705 * Call the function. Currently using switch here instead of function
11706 * pointer table as a switch won't get skewed.
11707 */
11708 VBOXSTRICTRC rcStrictCommit;
11709 switch (enmFn)
11710 {
11711 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11712 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11713 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11714 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11715 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11716 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11717 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11718 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11719 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11720 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11721 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11722 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11723 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11724 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11725 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11726 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11727 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11728 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11729 default:
11730 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11731 }
11732
11733 /*
11734 * Merge status code (if any) with the incomming one.
11735 */
11736 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11737 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11738 return rcStrict;
11739 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11740 return rcStrictCommit;
11741
11742 /* Complicated. */
11743 if (RT_FAILURE(rcStrict))
11744 return rcStrict;
11745 if (RT_FAILURE(rcStrictCommit))
11746 return rcStrictCommit;
11747 if ( rcStrict >= VINF_EM_FIRST
11748 && rcStrict <= VINF_EM_LAST)
11749 {
11750 if ( rcStrictCommit >= VINF_EM_FIRST
11751 && rcStrictCommit <= VINF_EM_LAST)
11752 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11753
11754 /* This really shouldn't happen. Check PGM + handler code! */
11755 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11756 }
11757 /* This shouldn't really happen either, see IOM_SUCCESS. */
11758 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11759}
11760
11761#endif /* IN_RING */
11762
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette