VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60186

Last change on this file since 60186 was 60186, checked in by vboxsync, 9 years ago

IEM: backed out previous commit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 443.4 KB
Line 
1/* $Id: IEMAll.cpp 60186 2016-03-24 17:42:08Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Initializes the execution state.
766 *
767 * @param pIemCpu The per CPU IEM state.
768 * @param fBypassHandlers Whether to bypass access handlers.
769 */
770DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
771{
772 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
773 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
774
775 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
776 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
777
778#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
785 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
786 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
787#endif
788
789#ifdef VBOX_WITH_RAW_MODE_NOT_R0
790 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
791#endif
792 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
793 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
794 ? IEMMODE_64BIT
795 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
796 ? IEMMODE_32BIT
797 : IEMMODE_16BIT;
798 pIemCpu->enmCpuMode = enmMode;
799#ifdef VBOX_STRICT
800 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
801 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
802 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
803 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
804 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
805 pIemCpu->uRexReg = 127;
806 pIemCpu->uRexB = 127;
807 pIemCpu->uRexIndex = 127;
808 pIemCpu->iEffSeg = 127;
809 pIemCpu->offOpcode = 127;
810 pIemCpu->cbOpcode = 127;
811#endif
812
813 pIemCpu->cActiveMappings = 0;
814 pIemCpu->iNextMapping = 0;
815 pIemCpu->rcPassUp = VINF_SUCCESS;
816 pIemCpu->fBypassHandlers = fBypassHandlers;
817#ifdef VBOX_WITH_RAW_MODE_NOT_R0
818 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
819 && pCtx->cs.u64Base == 0
820 && pCtx->cs.u32Limit == UINT32_MAX
821 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
822 if (!pIemCpu->fInPatchCode)
823 CPUMRawLeave(pVCpu, VINF_SUCCESS);
824#endif
825}
826
827
828/**
829 * Initializes the decoder state.
830 *
831 * @param pIemCpu The per CPU IEM state.
832 * @param fBypassHandlers Whether to bypass access handlers.
833 */
834DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
835{
836 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
837 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
838
839 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
840 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
841
842#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
851#endif
852
853#ifdef VBOX_WITH_RAW_MODE_NOT_R0
854 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
855#endif
856 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
857#ifdef IEM_VERIFICATION_MODE_FULL
858 if (pIemCpu->uInjectCpl != UINT8_MAX)
859 pIemCpu->uCpl = pIemCpu->uInjectCpl;
860#endif
861 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
862 ? IEMMODE_64BIT
863 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
864 ? IEMMODE_32BIT
865 : IEMMODE_16BIT;
866 pIemCpu->enmCpuMode = enmMode;
867 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
868 pIemCpu->enmEffAddrMode = enmMode;
869 if (enmMode != IEMMODE_64BIT)
870 {
871 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
872 pIemCpu->enmEffOpSize = enmMode;
873 }
874 else
875 {
876 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
877 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
878 }
879 pIemCpu->fPrefixes = 0;
880 pIemCpu->uRexReg = 0;
881 pIemCpu->uRexB = 0;
882 pIemCpu->uRexIndex = 0;
883 pIemCpu->iEffSeg = X86_SREG_DS;
884 pIemCpu->offOpcode = 0;
885 pIemCpu->cbOpcode = 0;
886 pIemCpu->cActiveMappings = 0;
887 pIemCpu->iNextMapping = 0;
888 pIemCpu->rcPassUp = VINF_SUCCESS;
889 pIemCpu->fBypassHandlers = fBypassHandlers;
890#ifdef VBOX_WITH_RAW_MODE_NOT_R0
891 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
892 && pCtx->cs.u64Base == 0
893 && pCtx->cs.u32Limit == UINT32_MAX
894 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
895 if (!pIemCpu->fInPatchCode)
896 CPUMRawLeave(pVCpu, VINF_SUCCESS);
897#endif
898
899#ifdef DBGFTRACE_ENABLED
900 switch (enmMode)
901 {
902 case IEMMODE_64BIT:
903 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
904 break;
905 case IEMMODE_32BIT:
906 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
907 break;
908 case IEMMODE_16BIT:
909 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
910 break;
911 }
912#endif
913}
914
915
916/**
917 * Prefetch opcodes the first time when starting executing.
918 *
919 * @returns Strict VBox status code.
920 * @param pIemCpu The IEM state.
921 * @param fBypassHandlers Whether to bypass access handlers.
922 */
923IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
924{
925#ifdef IEM_VERIFICATION_MODE_FULL
926 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
927#endif
928 iemInitDecoder(pIemCpu, fBypassHandlers);
929
930 /*
931 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
932 *
933 * First translate CS:rIP to a physical address.
934 */
935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
936 uint32_t cbToTryRead;
937 RTGCPTR GCPtrPC;
938 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
939 {
940 cbToTryRead = PAGE_SIZE;
941 GCPtrPC = pCtx->rip;
942 if (!IEM_IS_CANONICAL(GCPtrPC))
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
945 }
946 else
947 {
948 uint32_t GCPtrPC32 = pCtx->eip;
949 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
950 if (GCPtrPC32 > pCtx->cs.u32Limit)
951 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
952 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
953 if (!cbToTryRead) /* overflowed */
954 {
955 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
956 cbToTryRead = UINT32_MAX;
957 }
958 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
959 Assert(GCPtrPC <= UINT32_MAX);
960 }
961
962#ifdef VBOX_WITH_RAW_MODE_NOT_R0
963 /* Allow interpretation of patch manager code blocks since they can for
964 instance throw #PFs for perfectly good reasons. */
965 if (pIemCpu->fInPatchCode)
966 {
967 size_t cbRead = 0;
968 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
969 AssertRCReturn(rc, rc);
970 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
971 return VINF_SUCCESS;
972 }
973#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
974
975 RTGCPHYS GCPhys;
976 uint64_t fFlags;
977 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
978 if (RT_FAILURE(rc))
979 {
980 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
981 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
982 }
983 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
984 {
985 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
986 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
987 }
988 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
989 {
990 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
991 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
992 }
993 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
994 /** @todo Check reserved bits and such stuff. PGM is better at doing
995 * that, so do it when implementing the guest virtual address
996 * TLB... */
997
998#ifdef IEM_VERIFICATION_MODE_FULL
999 /*
1000 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1001 * instruction.
1002 */
1003 /** @todo optimize this differently by not using PGMPhysRead. */
1004 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1005 pIemCpu->GCPhysOpcodes = GCPhys;
1006 if ( offPrevOpcodes < cbOldOpcodes
1007 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1008 {
1009 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1010 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1011 pIemCpu->cbOpcode = cbNew;
1012 return VINF_SUCCESS;
1013 }
1014#endif
1015
1016 /*
1017 * Read the bytes at this address.
1018 */
1019 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1020#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1021 size_t cbActual;
1022 if ( PATMIsEnabled(pVM)
1023 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1024 {
1025 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1026 Assert(cbActual > 0);
1027 pIemCpu->cbOpcode = (uint8_t)cbActual;
1028 }
1029 else
1030#endif
1031 {
1032 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1033 if (cbToTryRead > cbLeftOnPage)
1034 cbToTryRead = cbLeftOnPage;
1035 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1036 cbToTryRead = sizeof(pIemCpu->abOpcode);
1037
1038 if (!pIemCpu->fBypassHandlers)
1039 {
1040 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1041 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1042 { /* likely */ }
1043 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1044 {
1045 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1046 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1047 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1048 }
1049 else
1050 {
1051 Log((RT_SUCCESS(rcStrict)
1052 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1053 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1054 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1055 return rcStrict;
1056 }
1057 }
1058 else
1059 {
1060 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1061 if (RT_SUCCESS(rc))
1062 { /* likely */ }
1063 else
1064 {
1065 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1066 GCPtrPC, GCPhys, rc, cbToTryRead));
1067 return rc;
1068 }
1069 }
1070 pIemCpu->cbOpcode = cbToTryRead;
1071 }
1072
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1079 * exception if it fails.
1080 *
1081 * @returns Strict VBox status code.
1082 * @param pIemCpu The IEM state.
1083 * @param cbMin The minimum number of bytes relative offOpcode
1084 * that must be read.
1085 */
1086IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1087{
1088 /*
1089 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1090 *
1091 * First translate CS:rIP to a physical address.
1092 */
1093 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1094 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1095 uint32_t cbToTryRead;
1096 RTGCPTR GCPtrNext;
1097 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1098 {
1099 cbToTryRead = PAGE_SIZE;
1100 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1101 if (!IEM_IS_CANONICAL(GCPtrNext))
1102 return iemRaiseGeneralProtectionFault0(pIemCpu);
1103 }
1104 else
1105 {
1106 uint32_t GCPtrNext32 = pCtx->eip;
1107 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1108 GCPtrNext32 += pIemCpu->cbOpcode;
1109 if (GCPtrNext32 > pCtx->cs.u32Limit)
1110 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1111 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1112 if (!cbToTryRead) /* overflowed */
1113 {
1114 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1115 cbToTryRead = UINT32_MAX;
1116 /** @todo check out wrapping around the code segment. */
1117 }
1118 if (cbToTryRead < cbMin - cbLeft)
1119 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1120 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1121 }
1122
1123 /* Only read up to the end of the page, and make sure we don't read more
1124 than the opcode buffer can hold. */
1125 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1126 if (cbToTryRead > cbLeftOnPage)
1127 cbToTryRead = cbLeftOnPage;
1128 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1129 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1130/** @todo r=bird: Convert assertion into undefined opcode exception? */
1131 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1132
1133#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1134 /* Allow interpretation of patch manager code blocks since they can for
1135 instance throw #PFs for perfectly good reasons. */
1136 if (pIemCpu->fInPatchCode)
1137 {
1138 size_t cbRead = 0;
1139 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1140 AssertRCReturn(rc, rc);
1141 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1142 return VINF_SUCCESS;
1143 }
1144#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1145
1146 RTGCPHYS GCPhys;
1147 uint64_t fFlags;
1148 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1149 if (RT_FAILURE(rc))
1150 {
1151 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1152 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1153 }
1154 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1155 {
1156 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1157 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1158 }
1159 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1160 {
1161 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1162 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1163 }
1164 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1165 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1166 /** @todo Check reserved bits and such stuff. PGM is better at doing
1167 * that, so do it when implementing the guest virtual address
1168 * TLB... */
1169
1170 /*
1171 * Read the bytes at this address.
1172 *
1173 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1174 * and since PATM should only patch the start of an instruction there
1175 * should be no need to check again here.
1176 */
1177 if (!pIemCpu->fBypassHandlers)
1178 {
1179 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1180 cbToTryRead, PGMACCESSORIGIN_IEM);
1181 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1182 { /* likely */ }
1183 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1184 {
1185 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1186 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1187 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1188 }
1189 else
1190 {
1191 Log((RT_SUCCESS(rcStrict)
1192 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1193 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1194 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1195 return rcStrict;
1196 }
1197 }
1198 else
1199 {
1200 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1201 if (RT_SUCCESS(rc))
1202 { /* likely */ }
1203 else
1204 {
1205 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1206 return rc;
1207 }
1208 }
1209 pIemCpu->cbOpcode += cbToTryRead;
1210 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1211
1212 return VINF_SUCCESS;
1213}
1214
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pIemCpu The IEM state.
1221 * @param pb Where to return the opcode byte.
1222 */
1223DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pIemCpu->offOpcode;
1229 *pb = pIemCpu->abOpcode[offOpcode];
1230 pIemCpu->offOpcode = offOpcode + 1;
1231 }
1232 else
1233 *pb = 0;
1234 return rcStrict;
1235}
1236
1237
1238/**
1239 * Fetches the next opcode byte.
1240 *
1241 * @returns Strict VBox status code.
1242 * @param pIemCpu The IEM state.
1243 * @param pu8 Where to return the opcode byte.
1244 */
1245DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1246{
1247 uint8_t const offOpcode = pIemCpu->offOpcode;
1248 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1249 {
1250 *pu8 = pIemCpu->abOpcode[offOpcode];
1251 pIemCpu->offOpcode = offOpcode + 1;
1252 return VINF_SUCCESS;
1253 }
1254 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1255}
1256
1257
1258/**
1259 * Fetches the next opcode byte, returns automatically on failure.
1260 *
1261 * @param a_pu8 Where to return the opcode byte.
1262 * @remark Implicitly references pIemCpu.
1263 */
1264#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1265 do \
1266 { \
1267 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1268 if (rcStrict2 != VINF_SUCCESS) \
1269 return rcStrict2; \
1270 } while (0)
1271
1272
1273/**
1274 * Fetches the next signed byte from the opcode stream.
1275 *
1276 * @returns Strict VBox status code.
1277 * @param pIemCpu The IEM state.
1278 * @param pi8 Where to return the signed byte.
1279 */
1280DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1281{
1282 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1283}
1284
1285
1286/**
1287 * Fetches the next signed byte from the opcode stream, returning automatically
1288 * on failure.
1289 *
1290 * @param a_pi8 Where to return the signed byte.
1291 * @remark Implicitly references pIemCpu.
1292 */
1293#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1294 do \
1295 { \
1296 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1297 if (rcStrict2 != VINF_SUCCESS) \
1298 return rcStrict2; \
1299 } while (0)
1300
1301
1302/**
1303 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pIemCpu The IEM state.
1307 * @param pu16 Where to return the opcode dword.
1308 */
1309DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1310{
1311 uint8_t u8;
1312 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1313 if (rcStrict == VINF_SUCCESS)
1314 *pu16 = (int8_t)u8;
1315 return rcStrict;
1316}
1317
1318
1319/**
1320 * Fetches the next signed byte from the opcode stream, extending it to
1321 * unsigned 16-bit.
1322 *
1323 * @returns Strict VBox status code.
1324 * @param pIemCpu The IEM state.
1325 * @param pu16 Where to return the unsigned word.
1326 */
1327DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1328{
1329 uint8_t const offOpcode = pIemCpu->offOpcode;
1330 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1331 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1332
1333 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1334 pIemCpu->offOpcode = offOpcode + 1;
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Fetches the next signed byte from the opcode stream and sign-extending it to
1341 * a word, returning automatically on failure.
1342 *
1343 * @param a_pu16 Where to return the word.
1344 * @remark Implicitly references pIemCpu.
1345 */
1346#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1347 do \
1348 { \
1349 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1350 if (rcStrict2 != VINF_SUCCESS) \
1351 return rcStrict2; \
1352 } while (0)
1353
1354
1355/**
1356 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1357 *
1358 * @returns Strict VBox status code.
1359 * @param pIemCpu The IEM state.
1360 * @param pu32 Where to return the opcode dword.
1361 */
1362DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1363{
1364 uint8_t u8;
1365 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1366 if (rcStrict == VINF_SUCCESS)
1367 *pu32 = (int8_t)u8;
1368 return rcStrict;
1369}
1370
1371
1372/**
1373 * Fetches the next signed byte from the opcode stream, extending it to
1374 * unsigned 32-bit.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pIemCpu The IEM state.
1378 * @param pu32 Where to return the unsigned dword.
1379 */
1380DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1381{
1382 uint8_t const offOpcode = pIemCpu->offOpcode;
1383 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1384 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1385
1386 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1387 pIemCpu->offOpcode = offOpcode + 1;
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Fetches the next signed byte from the opcode stream and sign-extending it to
1394 * a word, returning automatically on failure.
1395 *
1396 * @param a_pu32 Where to return the word.
1397 * @remark Implicitly references pIemCpu.
1398 */
1399#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1400 do \
1401 { \
1402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1403 if (rcStrict2 != VINF_SUCCESS) \
1404 return rcStrict2; \
1405 } while (0)
1406
1407
1408/**
1409 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pIemCpu The IEM state.
1413 * @param pu64 Where to return the opcode qword.
1414 */
1415DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1416{
1417 uint8_t u8;
1418 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1419 if (rcStrict == VINF_SUCCESS)
1420 *pu64 = (int8_t)u8;
1421 return rcStrict;
1422}
1423
1424
1425/**
1426 * Fetches the next signed byte from the opcode stream, extending it to
1427 * unsigned 64-bit.
1428 *
1429 * @returns Strict VBox status code.
1430 * @param pIemCpu The IEM state.
1431 * @param pu64 Where to return the unsigned qword.
1432 */
1433DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1434{
1435 uint8_t const offOpcode = pIemCpu->offOpcode;
1436 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1437 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1438
1439 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1440 pIemCpu->offOpcode = offOpcode + 1;
1441 return VINF_SUCCESS;
1442}
1443
1444
1445/**
1446 * Fetches the next signed byte from the opcode stream and sign-extending it to
1447 * a word, returning automatically on failure.
1448 *
1449 * @param a_pu64 Where to return the word.
1450 * @remark Implicitly references pIemCpu.
1451 */
1452#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1453 do \
1454 { \
1455 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1456 if (rcStrict2 != VINF_SUCCESS) \
1457 return rcStrict2; \
1458 } while (0)
1459
1460
1461/**
1462 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1463 *
1464 * @returns Strict VBox status code.
1465 * @param pIemCpu The IEM state.
1466 * @param pu16 Where to return the opcode word.
1467 */
1468DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1469{
1470 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1471 if (rcStrict == VINF_SUCCESS)
1472 {
1473 uint8_t offOpcode = pIemCpu->offOpcode;
1474 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1475 pIemCpu->offOpcode = offOpcode + 2;
1476 }
1477 else
1478 *pu16 = 0;
1479 return rcStrict;
1480}
1481
1482
1483/**
1484 * Fetches the next opcode word.
1485 *
1486 * @returns Strict VBox status code.
1487 * @param pIemCpu The IEM state.
1488 * @param pu16 Where to return the opcode word.
1489 */
1490DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1491{
1492 uint8_t const offOpcode = pIemCpu->offOpcode;
1493 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1494 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1495
1496 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1497 pIemCpu->offOpcode = offOpcode + 2;
1498 return VINF_SUCCESS;
1499}
1500
1501
1502/**
1503 * Fetches the next opcode word, returns automatically on failure.
1504 *
1505 * @param a_pu16 Where to return the opcode word.
1506 * @remark Implicitly references pIemCpu.
1507 */
1508#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1509 do \
1510 { \
1511 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1512 if (rcStrict2 != VINF_SUCCESS) \
1513 return rcStrict2; \
1514 } while (0)
1515
1516
1517/**
1518 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1519 *
1520 * @returns Strict VBox status code.
1521 * @param pIemCpu The IEM state.
1522 * @param pu32 Where to return the opcode double word.
1523 */
1524DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1525{
1526 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1527 if (rcStrict == VINF_SUCCESS)
1528 {
1529 uint8_t offOpcode = pIemCpu->offOpcode;
1530 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1531 pIemCpu->offOpcode = offOpcode + 2;
1532 }
1533 else
1534 *pu32 = 0;
1535 return rcStrict;
1536}
1537
1538
1539/**
1540 * Fetches the next opcode word, zero extending it to a double word.
1541 *
1542 * @returns Strict VBox status code.
1543 * @param pIemCpu The IEM state.
1544 * @param pu32 Where to return the opcode double word.
1545 */
1546DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1547{
1548 uint8_t const offOpcode = pIemCpu->offOpcode;
1549 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1550 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1551
1552 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1553 pIemCpu->offOpcode = offOpcode + 2;
1554 return VINF_SUCCESS;
1555}
1556
1557
1558/**
1559 * Fetches the next opcode word and zero extends it to a double word, returns
1560 * automatically on failure.
1561 *
1562 * @param a_pu32 Where to return the opcode double word.
1563 * @remark Implicitly references pIemCpu.
1564 */
1565#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1566 do \
1567 { \
1568 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1569 if (rcStrict2 != VINF_SUCCESS) \
1570 return rcStrict2; \
1571 } while (0)
1572
1573
1574/**
1575 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1576 *
1577 * @returns Strict VBox status code.
1578 * @param pIemCpu The IEM state.
1579 * @param pu64 Where to return the opcode quad word.
1580 */
1581DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1582{
1583 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1584 if (rcStrict == VINF_SUCCESS)
1585 {
1586 uint8_t offOpcode = pIemCpu->offOpcode;
1587 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1588 pIemCpu->offOpcode = offOpcode + 2;
1589 }
1590 else
1591 *pu64 = 0;
1592 return rcStrict;
1593}
1594
1595
1596/**
1597 * Fetches the next opcode word, zero extending it to a quad word.
1598 *
1599 * @returns Strict VBox status code.
1600 * @param pIemCpu The IEM state.
1601 * @param pu64 Where to return the opcode quad word.
1602 */
1603DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1604{
1605 uint8_t const offOpcode = pIemCpu->offOpcode;
1606 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1607 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1608
1609 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1610 pIemCpu->offOpcode = offOpcode + 2;
1611 return VINF_SUCCESS;
1612}
1613
1614
1615/**
1616 * Fetches the next opcode word and zero extends it to a quad word, returns
1617 * automatically on failure.
1618 *
1619 * @param a_pu64 Where to return the opcode quad word.
1620 * @remark Implicitly references pIemCpu.
1621 */
1622#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1623 do \
1624 { \
1625 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1626 if (rcStrict2 != VINF_SUCCESS) \
1627 return rcStrict2; \
1628 } while (0)
1629
1630
1631/**
1632 * Fetches the next signed word from the opcode stream.
1633 *
1634 * @returns Strict VBox status code.
1635 * @param pIemCpu The IEM state.
1636 * @param pi16 Where to return the signed word.
1637 */
1638DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1639{
1640 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1641}
1642
1643
1644/**
1645 * Fetches the next signed word from the opcode stream, returning automatically
1646 * on failure.
1647 *
1648 * @param a_pi16 Where to return the signed word.
1649 * @remark Implicitly references pIemCpu.
1650 */
1651#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1652 do \
1653 { \
1654 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1655 if (rcStrict2 != VINF_SUCCESS) \
1656 return rcStrict2; \
1657 } while (0)
1658
1659
1660/**
1661 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1662 *
1663 * @returns Strict VBox status code.
1664 * @param pIemCpu The IEM state.
1665 * @param pu32 Where to return the opcode dword.
1666 */
1667DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1668{
1669 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1670 if (rcStrict == VINF_SUCCESS)
1671 {
1672 uint8_t offOpcode = pIemCpu->offOpcode;
1673 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1674 pIemCpu->abOpcode[offOpcode + 1],
1675 pIemCpu->abOpcode[offOpcode + 2],
1676 pIemCpu->abOpcode[offOpcode + 3]);
1677 pIemCpu->offOpcode = offOpcode + 4;
1678 }
1679 else
1680 *pu32 = 0;
1681 return rcStrict;
1682}
1683
1684
1685/**
1686 * Fetches the next opcode dword.
1687 *
1688 * @returns Strict VBox status code.
1689 * @param pIemCpu The IEM state.
1690 * @param pu32 Where to return the opcode double word.
1691 */
1692DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1693{
1694 uint8_t const offOpcode = pIemCpu->offOpcode;
1695 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1696 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1697
1698 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1699 pIemCpu->abOpcode[offOpcode + 1],
1700 pIemCpu->abOpcode[offOpcode + 2],
1701 pIemCpu->abOpcode[offOpcode + 3]);
1702 pIemCpu->offOpcode = offOpcode + 4;
1703 return VINF_SUCCESS;
1704}
1705
1706
1707/**
1708 * Fetches the next opcode dword, returns automatically on failure.
1709 *
1710 * @param a_pu32 Where to return the opcode dword.
1711 * @remark Implicitly references pIemCpu.
1712 */
1713#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1714 do \
1715 { \
1716 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1717 if (rcStrict2 != VINF_SUCCESS) \
1718 return rcStrict2; \
1719 } while (0)
1720
1721
1722/**
1723 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1724 *
1725 * @returns Strict VBox status code.
1726 * @param pIemCpu The IEM state.
1727 * @param pu64 Where to return the opcode dword.
1728 */
1729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1730{
1731 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1732 if (rcStrict == VINF_SUCCESS)
1733 {
1734 uint8_t offOpcode = pIemCpu->offOpcode;
1735 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1736 pIemCpu->abOpcode[offOpcode + 1],
1737 pIemCpu->abOpcode[offOpcode + 2],
1738 pIemCpu->abOpcode[offOpcode + 3]);
1739 pIemCpu->offOpcode = offOpcode + 4;
1740 }
1741 else
1742 *pu64 = 0;
1743 return rcStrict;
1744}
1745
1746
1747/**
1748 * Fetches the next opcode dword, zero extending it to a quad word.
1749 *
1750 * @returns Strict VBox status code.
1751 * @param pIemCpu The IEM state.
1752 * @param pu64 Where to return the opcode quad word.
1753 */
1754DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1755{
1756 uint8_t const offOpcode = pIemCpu->offOpcode;
1757 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1758 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1759
1760 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1761 pIemCpu->abOpcode[offOpcode + 1],
1762 pIemCpu->abOpcode[offOpcode + 2],
1763 pIemCpu->abOpcode[offOpcode + 3]);
1764 pIemCpu->offOpcode = offOpcode + 4;
1765 return VINF_SUCCESS;
1766}
1767
1768
1769/**
1770 * Fetches the next opcode dword and zero extends it to a quad word, returns
1771 * automatically on failure.
1772 *
1773 * @param a_pu64 Where to return the opcode quad word.
1774 * @remark Implicitly references pIemCpu.
1775 */
1776#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1777 do \
1778 { \
1779 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1780 if (rcStrict2 != VINF_SUCCESS) \
1781 return rcStrict2; \
1782 } while (0)
1783
1784
1785/**
1786 * Fetches the next signed double word from the opcode stream.
1787 *
1788 * @returns Strict VBox status code.
1789 * @param pIemCpu The IEM state.
1790 * @param pi32 Where to return the signed double word.
1791 */
1792DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1793{
1794 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1795}
1796
1797/**
1798 * Fetches the next signed double word from the opcode stream, returning
1799 * automatically on failure.
1800 *
1801 * @param a_pi32 Where to return the signed double word.
1802 * @remark Implicitly references pIemCpu.
1803 */
1804#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1805 do \
1806 { \
1807 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1808 if (rcStrict2 != VINF_SUCCESS) \
1809 return rcStrict2; \
1810 } while (0)
1811
1812
1813/**
1814 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1815 *
1816 * @returns Strict VBox status code.
1817 * @param pIemCpu The IEM state.
1818 * @param pu64 Where to return the opcode qword.
1819 */
1820DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1821{
1822 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1823 if (rcStrict == VINF_SUCCESS)
1824 {
1825 uint8_t offOpcode = pIemCpu->offOpcode;
1826 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1827 pIemCpu->abOpcode[offOpcode + 1],
1828 pIemCpu->abOpcode[offOpcode + 2],
1829 pIemCpu->abOpcode[offOpcode + 3]);
1830 pIemCpu->offOpcode = offOpcode + 4;
1831 }
1832 else
1833 *pu64 = 0;
1834 return rcStrict;
1835}
1836
1837
1838/**
1839 * Fetches the next opcode dword, sign extending it into a quad word.
1840 *
1841 * @returns Strict VBox status code.
1842 * @param pIemCpu The IEM state.
1843 * @param pu64 Where to return the opcode quad word.
1844 */
1845DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1846{
1847 uint8_t const offOpcode = pIemCpu->offOpcode;
1848 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1849 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1850
1851 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1852 pIemCpu->abOpcode[offOpcode + 1],
1853 pIemCpu->abOpcode[offOpcode + 2],
1854 pIemCpu->abOpcode[offOpcode + 3]);
1855 *pu64 = i32;
1856 pIemCpu->offOpcode = offOpcode + 4;
1857 return VINF_SUCCESS;
1858}
1859
1860
1861/**
1862 * Fetches the next opcode double word and sign extends it to a quad word,
1863 * returns automatically on failure.
1864 *
1865 * @param a_pu64 Where to return the opcode quad word.
1866 * @remark Implicitly references pIemCpu.
1867 */
1868#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1869 do \
1870 { \
1871 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1872 if (rcStrict2 != VINF_SUCCESS) \
1873 return rcStrict2; \
1874 } while (0)
1875
1876
1877/**
1878 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1879 *
1880 * @returns Strict VBox status code.
1881 * @param pIemCpu The IEM state.
1882 * @param pu64 Where to return the opcode qword.
1883 */
1884DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1885{
1886 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1887 if (rcStrict == VINF_SUCCESS)
1888 {
1889 uint8_t offOpcode = pIemCpu->offOpcode;
1890 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1891 pIemCpu->abOpcode[offOpcode + 1],
1892 pIemCpu->abOpcode[offOpcode + 2],
1893 pIemCpu->abOpcode[offOpcode + 3],
1894 pIemCpu->abOpcode[offOpcode + 4],
1895 pIemCpu->abOpcode[offOpcode + 5],
1896 pIemCpu->abOpcode[offOpcode + 6],
1897 pIemCpu->abOpcode[offOpcode + 7]);
1898 pIemCpu->offOpcode = offOpcode + 8;
1899 }
1900 else
1901 *pu64 = 0;
1902 return rcStrict;
1903}
1904
1905
1906/**
1907 * Fetches the next opcode qword.
1908 *
1909 * @returns Strict VBox status code.
1910 * @param pIemCpu The IEM state.
1911 * @param pu64 Where to return the opcode qword.
1912 */
1913DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1914{
1915 uint8_t const offOpcode = pIemCpu->offOpcode;
1916 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1917 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1918
1919 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1920 pIemCpu->abOpcode[offOpcode + 1],
1921 pIemCpu->abOpcode[offOpcode + 2],
1922 pIemCpu->abOpcode[offOpcode + 3],
1923 pIemCpu->abOpcode[offOpcode + 4],
1924 pIemCpu->abOpcode[offOpcode + 5],
1925 pIemCpu->abOpcode[offOpcode + 6],
1926 pIemCpu->abOpcode[offOpcode + 7]);
1927 pIemCpu->offOpcode = offOpcode + 8;
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/**
1933 * Fetches the next opcode quad word, returns automatically on failure.
1934 *
1935 * @param a_pu64 Where to return the opcode quad word.
1936 * @remark Implicitly references pIemCpu.
1937 */
1938#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1939 do \
1940 { \
1941 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1942 if (rcStrict2 != VINF_SUCCESS) \
1943 return rcStrict2; \
1944 } while (0)
1945
1946
1947/** @name Misc Worker Functions.
1948 * @{
1949 */
1950
1951
1952/**
1953 * Validates a new SS segment.
1954 *
1955 * @returns VBox strict status code.
1956 * @param pIemCpu The IEM per CPU instance data.
1957 * @param pCtx The CPU context.
1958 * @param NewSS The new SS selctor.
1959 * @param uCpl The CPL to load the stack for.
1960 * @param pDesc Where to return the descriptor.
1961 */
1962IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1963{
1964 NOREF(pCtx);
1965
1966 /* Null selectors are not allowed (we're not called for dispatching
1967 interrupts with SS=0 in long mode). */
1968 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1969 {
1970 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1971 return iemRaiseTaskSwitchFault0(pIemCpu);
1972 }
1973
1974 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1975 if ((NewSS & X86_SEL_RPL) != uCpl)
1976 {
1977 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1978 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1979 }
1980
1981 /*
1982 * Read the descriptor.
1983 */
1984 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1985 if (rcStrict != VINF_SUCCESS)
1986 return rcStrict;
1987
1988 /*
1989 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1990 */
1991 if (!pDesc->Legacy.Gen.u1DescType)
1992 {
1993 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1994 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1995 }
1996
1997 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1998 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1999 {
2000 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2001 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2002 }
2003 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2004 {
2005 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2006 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2007 }
2008
2009 /* Is it there? */
2010 /** @todo testcase: Is this checked before the canonical / limit check below? */
2011 if (!pDesc->Legacy.Gen.u1Present)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2014 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2015 }
2016
2017 return VINF_SUCCESS;
2018}
2019
2020
2021/**
2022 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2023 * not.
2024 *
2025 * @param a_pIemCpu The IEM per CPU data.
2026 * @param a_pCtx The CPU context.
2027 */
2028#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2029# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2030 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2031 ? (a_pCtx)->eflags.u \
2032 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2033#else
2034# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2035 ( (a_pCtx)->eflags.u )
2036#endif
2037
2038/**
2039 * Updates the EFLAGS in the correct manner wrt. PATM.
2040 *
2041 * @param a_pIemCpu The IEM per CPU data.
2042 * @param a_pCtx The CPU context.
2043 * @param a_fEfl The new EFLAGS.
2044 */
2045#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2046# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2047 do { \
2048 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2049 (a_pCtx)->eflags.u = (a_fEfl); \
2050 else \
2051 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2052 } while (0)
2053#else
2054# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2055 do { \
2056 (a_pCtx)->eflags.u = (a_fEfl); \
2057 } while (0)
2058#endif
2059
2060
2061/** @} */
2062
2063/** @name Raising Exceptions.
2064 *
2065 * @{
2066 */
2067
2068/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2069 * @{ */
2070/** CPU exception. */
2071#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2072/** External interrupt (from PIC, APIC, whatever). */
2073#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2074/** Software interrupt (int or into, not bound).
2075 * Returns to the following instruction */
2076#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2077/** Takes an error code. */
2078#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2079/** Takes a CR2. */
2080#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2081/** Generated by the breakpoint instruction. */
2082#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2083/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2084#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2085/** @} */
2086
2087
2088/**
2089 * Loads the specified stack far pointer from the TSS.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pIemCpu The IEM per CPU instance data.
2093 * @param pCtx The CPU context.
2094 * @param uCpl The CPL to load the stack for.
2095 * @param pSelSS Where to return the new stack segment.
2096 * @param puEsp Where to return the new stack pointer.
2097 */
2098IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2099 PRTSEL pSelSS, uint32_t *puEsp)
2100{
2101 VBOXSTRICTRC rcStrict;
2102 Assert(uCpl < 4);
2103 *puEsp = 0; /* make gcc happy */
2104 *pSelSS = 0; /* make gcc happy */
2105
2106 switch (pCtx->tr.Attr.n.u4Type)
2107 {
2108 /*
2109 * 16-bit TSS (X86TSS16).
2110 */
2111 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2112 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2113 {
2114 uint32_t off = uCpl * 4 + 2;
2115 if (off + 4 > pCtx->tr.u32Limit)
2116 {
2117 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2118 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2119 }
2120
2121 uint32_t u32Tmp = 0; /* gcc maybe... */
2122 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2123 if (rcStrict == VINF_SUCCESS)
2124 {
2125 *puEsp = RT_LOWORD(u32Tmp);
2126 *pSelSS = RT_HIWORD(u32Tmp);
2127 return VINF_SUCCESS;
2128 }
2129 break;
2130 }
2131
2132 /*
2133 * 32-bit TSS (X86TSS32).
2134 */
2135 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2136 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2137 {
2138 uint32_t off = uCpl * 8 + 4;
2139 if (off + 7 > pCtx->tr.u32Limit)
2140 {
2141 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2142 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2143 }
2144
2145 uint64_t u64Tmp;
2146 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2147 if (rcStrict == VINF_SUCCESS)
2148 {
2149 *puEsp = u64Tmp & UINT32_MAX;
2150 *pSelSS = (RTSEL)(u64Tmp >> 32);
2151 return VINF_SUCCESS;
2152 }
2153 break;
2154 }
2155
2156 default:
2157 AssertFailedReturn(VERR_IEM_IPE_4);
2158 }
2159 return rcStrict;
2160}
2161
2162
2163/**
2164 * Loads the specified stack pointer from the 64-bit TSS.
2165 *
2166 * @returns VBox strict status code.
2167 * @param pIemCpu The IEM per CPU instance data.
2168 * @param pCtx The CPU context.
2169 * @param uCpl The CPL to load the stack for.
2170 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2171 * @param puRsp Where to return the new stack pointer.
2172 */
2173IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2174{
2175 Assert(uCpl < 4);
2176 Assert(uIst < 8);
2177 *puRsp = 0; /* make gcc happy */
2178
2179 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2180
2181 uint32_t off;
2182 if (uIst)
2183 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2184 else
2185 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2186 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2187 {
2188 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2189 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2190 }
2191
2192 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2193}
2194
2195
2196/**
2197 * Adjust the CPU state according to the exception being raised.
2198 *
2199 * @param pCtx The CPU context.
2200 * @param u8Vector The exception that has been raised.
2201 */
2202DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2203{
2204 switch (u8Vector)
2205 {
2206 case X86_XCPT_DB:
2207 pCtx->dr[7] &= ~X86_DR7_GD;
2208 break;
2209 /** @todo Read the AMD and Intel exception reference... */
2210 }
2211}
2212
2213
2214/**
2215 * Implements exceptions and interrupts for real mode.
2216 *
2217 * @returns VBox strict status code.
2218 * @param pIemCpu The IEM per CPU instance data.
2219 * @param pCtx The CPU context.
2220 * @param cbInstr The number of bytes to offset rIP by in the return
2221 * address.
2222 * @param u8Vector The interrupt / exception vector number.
2223 * @param fFlags The flags.
2224 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2225 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2226 */
2227IEM_STATIC VBOXSTRICTRC
2228iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2229 PCPUMCTX pCtx,
2230 uint8_t cbInstr,
2231 uint8_t u8Vector,
2232 uint32_t fFlags,
2233 uint16_t uErr,
2234 uint64_t uCr2)
2235{
2236 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2237 NOREF(uErr); NOREF(uCr2);
2238
2239 /*
2240 * Read the IDT entry.
2241 */
2242 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2243 {
2244 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2245 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2246 }
2247 RTFAR16 Idte;
2248 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2249 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2250 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2251 return rcStrict;
2252
2253 /*
2254 * Push the stack frame.
2255 */
2256 uint16_t *pu16Frame;
2257 uint64_t uNewRsp;
2258 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2259 if (rcStrict != VINF_SUCCESS)
2260 return rcStrict;
2261
2262 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2263 pu16Frame[2] = (uint16_t)fEfl;
2264 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2265 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2266 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2267 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2268 return rcStrict;
2269
2270 /*
2271 * Load the vector address into cs:ip and make exception specific state
2272 * adjustments.
2273 */
2274 pCtx->cs.Sel = Idte.sel;
2275 pCtx->cs.ValidSel = Idte.sel;
2276 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2277 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2278 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2279 pCtx->rip = Idte.off;
2280 fEfl &= ~X86_EFL_IF;
2281 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2282
2283 /** @todo do we actually do this in real mode? */
2284 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2285 iemRaiseXcptAdjustState(pCtx, u8Vector);
2286
2287 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2288}
2289
2290
2291/**
2292 * Loads a NULL data selector into when coming from V8086 mode.
2293 *
2294 * @param pIemCpu The IEM per CPU instance data.
2295 * @param pSReg Pointer to the segment register.
2296 */
2297IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2298{
2299 pSReg->Sel = 0;
2300 pSReg->ValidSel = 0;
2301 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2302 {
2303 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2304 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2305 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2306 }
2307 else
2308 {
2309 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2310 /** @todo check this on AMD-V */
2311 pSReg->u64Base = 0;
2312 pSReg->u32Limit = 0;
2313 }
2314}
2315
2316
2317/**
2318 * Loads a segment selector during a task switch in V8086 mode.
2319 *
2320 * @param pIemCpu The IEM per CPU instance data.
2321 * @param pSReg Pointer to the segment register.
2322 * @param uSel The selector value to load.
2323 */
2324IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2325{
2326 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2327 pSReg->Sel = uSel;
2328 pSReg->ValidSel = uSel;
2329 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2330 pSReg->u64Base = uSel << 4;
2331 pSReg->u32Limit = 0xffff;
2332 pSReg->Attr.u = 0xf3;
2333}
2334
2335
2336/**
2337 * Loads a NULL data selector into a selector register, both the hidden and
2338 * visible parts, in protected mode.
2339 *
2340 * @param pIemCpu The IEM state of the calling EMT.
2341 * @param pSReg Pointer to the segment register.
2342 * @param uRpl The RPL.
2343 */
2344IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2345{
2346 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2347 * data selector in protected mode. */
2348 pSReg->Sel = uRpl;
2349 pSReg->ValidSel = uRpl;
2350 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2351 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2352 {
2353 /* VT-x (Intel 3960x) observed doing something like this. */
2354 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2355 pSReg->u32Limit = UINT32_MAX;
2356 pSReg->u64Base = 0;
2357 }
2358 else
2359 {
2360 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2361 pSReg->u32Limit = 0;
2362 pSReg->u64Base = 0;
2363 }
2364}
2365
2366
2367/**
2368 * Loads a segment selector during a task switch in protected mode.
2369 *
2370 * In this task switch scenario, we would throw \#TS exceptions rather than
2371 * \#GPs.
2372 *
2373 * @returns VBox strict status code.
2374 * @param pIemCpu The IEM per CPU instance data.
2375 * @param pSReg Pointer to the segment register.
2376 * @param uSel The new selector value.
2377 *
2378 * @remarks This does _not_ handle CS or SS.
2379 * @remarks This expects pIemCpu->uCpl to be up to date.
2380 */
2381IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2382{
2383 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2384
2385 /* Null data selector. */
2386 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2387 {
2388 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2390 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2391 return VINF_SUCCESS;
2392 }
2393
2394 /* Fetch the descriptor. */
2395 IEMSELDESC Desc;
2396 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2397 if (rcStrict != VINF_SUCCESS)
2398 {
2399 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2400 VBOXSTRICTRC_VAL(rcStrict)));
2401 return rcStrict;
2402 }
2403
2404 /* Must be a data segment or readable code segment. */
2405 if ( !Desc.Legacy.Gen.u1DescType
2406 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2407 {
2408 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2409 Desc.Legacy.Gen.u4Type));
2410 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2411 }
2412
2413 /* Check privileges for data segments and non-conforming code segments. */
2414 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2415 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2416 {
2417 /* The RPL and the new CPL must be less than or equal to the DPL. */
2418 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2419 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2420 {
2421 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2422 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2423 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2424 }
2425 }
2426
2427 /* Is it there? */
2428 if (!Desc.Legacy.Gen.u1Present)
2429 {
2430 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2431 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2432 }
2433
2434 /* The base and limit. */
2435 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2436 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2437
2438 /*
2439 * Ok, everything checked out fine. Now set the accessed bit before
2440 * committing the result into the registers.
2441 */
2442 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2443 {
2444 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2445 if (rcStrict != VINF_SUCCESS)
2446 return rcStrict;
2447 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2448 }
2449
2450 /* Commit */
2451 pSReg->Sel = uSel;
2452 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2453 pSReg->u32Limit = cbLimit;
2454 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2455 pSReg->ValidSel = uSel;
2456 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2457 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2458 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2459
2460 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2461 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2462 return VINF_SUCCESS;
2463}
2464
2465
2466/**
2467 * Performs a task switch.
2468 *
2469 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2470 * caller is responsible for performing the necessary checks (like DPL, TSS
2471 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2472 * reference for JMP, CALL, IRET.
2473 *
2474 * If the task switch is the due to a software interrupt or hardware exception,
2475 * the caller is responsible for validating the TSS selector and descriptor. See
2476 * Intel Instruction reference for INT n.
2477 *
2478 * @returns VBox strict status code.
2479 * @param pIemCpu The IEM per CPU instance data.
2480 * @param pCtx The CPU context.
2481 * @param enmTaskSwitch What caused this task switch.
2482 * @param uNextEip The EIP effective after the task switch.
2483 * @param fFlags The flags.
2484 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2485 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2486 * @param SelTSS The TSS selector of the new task.
2487 * @param pNewDescTSS Pointer to the new TSS descriptor.
2488 */
2489IEM_STATIC VBOXSTRICTRC
2490iemTaskSwitch(PIEMCPU pIemCpu,
2491 PCPUMCTX pCtx,
2492 IEMTASKSWITCH enmTaskSwitch,
2493 uint32_t uNextEip,
2494 uint32_t fFlags,
2495 uint16_t uErr,
2496 uint64_t uCr2,
2497 RTSEL SelTSS,
2498 PIEMSELDESC pNewDescTSS)
2499{
2500 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2501 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2502
2503 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2504 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2505 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2506 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2507 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2508
2509 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2510 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2511
2512 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2513 fIsNewTSS386, pCtx->eip, uNextEip));
2514
2515 /* Update CR2 in case it's a page-fault. */
2516 /** @todo This should probably be done much earlier in IEM/PGM. See
2517 * @bugref{5653#c49}. */
2518 if (fFlags & IEM_XCPT_FLAGS_CR2)
2519 pCtx->cr2 = uCr2;
2520
2521 /*
2522 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2523 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2524 */
2525 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2526 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2527 if (uNewTSSLimit < uNewTSSLimitMin)
2528 {
2529 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2530 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2531 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2532 }
2533
2534 /*
2535 * Check the current TSS limit. The last written byte to the current TSS during the
2536 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2537 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2538 *
2539 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2540 * end up with smaller than "legal" TSS limits.
2541 */
2542 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2543 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2544 if (uCurTSSLimit < uCurTSSLimitMin)
2545 {
2546 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2547 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2548 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2549 }
2550
2551 /*
2552 * Verify that the new TSS can be accessed and map it. Map only the required contents
2553 * and not the entire TSS.
2554 */
2555 void *pvNewTSS;
2556 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2557 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2558 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2559 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2560 * not perform correct translation if this happens. See Intel spec. 7.2.1
2561 * "Task-State Segment" */
2562 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2563 if (rcStrict != VINF_SUCCESS)
2564 {
2565 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2566 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2567 return rcStrict;
2568 }
2569
2570 /*
2571 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2572 */
2573 uint32_t u32EFlags = pCtx->eflags.u32;
2574 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2575 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2576 {
2577 PX86DESC pDescCurTSS;
2578 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2579 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2580 if (rcStrict != VINF_SUCCESS)
2581 {
2582 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2583 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2584 return rcStrict;
2585 }
2586
2587 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2588 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2589 if (rcStrict != VINF_SUCCESS)
2590 {
2591 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2592 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2593 return rcStrict;
2594 }
2595
2596 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2597 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2598 {
2599 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2600 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2601 u32EFlags &= ~X86_EFL_NT;
2602 }
2603 }
2604
2605 /*
2606 * Save the CPU state into the current TSS.
2607 */
2608 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2609 if (GCPtrNewTSS == GCPtrCurTSS)
2610 {
2611 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2612 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2613 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2614 }
2615 if (fIsNewTSS386)
2616 {
2617 /*
2618 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2619 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2620 */
2621 void *pvCurTSS32;
2622 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2623 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2624 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2625 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2626 if (rcStrict != VINF_SUCCESS)
2627 {
2628 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2629 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2630 return rcStrict;
2631 }
2632
2633 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2634 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2635 pCurTSS32->eip = uNextEip;
2636 pCurTSS32->eflags = u32EFlags;
2637 pCurTSS32->eax = pCtx->eax;
2638 pCurTSS32->ecx = pCtx->ecx;
2639 pCurTSS32->edx = pCtx->edx;
2640 pCurTSS32->ebx = pCtx->ebx;
2641 pCurTSS32->esp = pCtx->esp;
2642 pCurTSS32->ebp = pCtx->ebp;
2643 pCurTSS32->esi = pCtx->esi;
2644 pCurTSS32->edi = pCtx->edi;
2645 pCurTSS32->es = pCtx->es.Sel;
2646 pCurTSS32->cs = pCtx->cs.Sel;
2647 pCurTSS32->ss = pCtx->ss.Sel;
2648 pCurTSS32->ds = pCtx->ds.Sel;
2649 pCurTSS32->fs = pCtx->fs.Sel;
2650 pCurTSS32->gs = pCtx->gs.Sel;
2651
2652 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2653 if (rcStrict != VINF_SUCCESS)
2654 {
2655 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2656 VBOXSTRICTRC_VAL(rcStrict)));
2657 return rcStrict;
2658 }
2659 }
2660 else
2661 {
2662 /*
2663 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2664 */
2665 void *pvCurTSS16;
2666 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2667 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2668 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2669 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2670 if (rcStrict != VINF_SUCCESS)
2671 {
2672 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2673 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2678 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2679 pCurTSS16->ip = uNextEip;
2680 pCurTSS16->flags = u32EFlags;
2681 pCurTSS16->ax = pCtx->ax;
2682 pCurTSS16->cx = pCtx->cx;
2683 pCurTSS16->dx = pCtx->dx;
2684 pCurTSS16->bx = pCtx->bx;
2685 pCurTSS16->sp = pCtx->sp;
2686 pCurTSS16->bp = pCtx->bp;
2687 pCurTSS16->si = pCtx->si;
2688 pCurTSS16->di = pCtx->di;
2689 pCurTSS16->es = pCtx->es.Sel;
2690 pCurTSS16->cs = pCtx->cs.Sel;
2691 pCurTSS16->ss = pCtx->ss.Sel;
2692 pCurTSS16->ds = pCtx->ds.Sel;
2693
2694 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2695 if (rcStrict != VINF_SUCCESS)
2696 {
2697 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2698 VBOXSTRICTRC_VAL(rcStrict)));
2699 return rcStrict;
2700 }
2701 }
2702
2703 /*
2704 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2705 */
2706 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2707 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2708 {
2709 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2710 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2711 pNewTSS->selPrev = pCtx->tr.Sel;
2712 }
2713
2714 /*
2715 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2716 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2717 */
2718 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2719 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2720 bool fNewDebugTrap;
2721 if (fIsNewTSS386)
2722 {
2723 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2724 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2725 uNewEip = pNewTSS32->eip;
2726 uNewEflags = pNewTSS32->eflags;
2727 uNewEax = pNewTSS32->eax;
2728 uNewEcx = pNewTSS32->ecx;
2729 uNewEdx = pNewTSS32->edx;
2730 uNewEbx = pNewTSS32->ebx;
2731 uNewEsp = pNewTSS32->esp;
2732 uNewEbp = pNewTSS32->ebp;
2733 uNewEsi = pNewTSS32->esi;
2734 uNewEdi = pNewTSS32->edi;
2735 uNewES = pNewTSS32->es;
2736 uNewCS = pNewTSS32->cs;
2737 uNewSS = pNewTSS32->ss;
2738 uNewDS = pNewTSS32->ds;
2739 uNewFS = pNewTSS32->fs;
2740 uNewGS = pNewTSS32->gs;
2741 uNewLdt = pNewTSS32->selLdt;
2742 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2743 }
2744 else
2745 {
2746 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2747 uNewCr3 = 0;
2748 uNewEip = pNewTSS16->ip;
2749 uNewEflags = pNewTSS16->flags;
2750 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2751 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2752 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2753 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2754 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2755 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2756 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2757 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2758 uNewES = pNewTSS16->es;
2759 uNewCS = pNewTSS16->cs;
2760 uNewSS = pNewTSS16->ss;
2761 uNewDS = pNewTSS16->ds;
2762 uNewFS = 0;
2763 uNewGS = 0;
2764 uNewLdt = pNewTSS16->selLdt;
2765 fNewDebugTrap = false;
2766 }
2767
2768 if (GCPtrNewTSS == GCPtrCurTSS)
2769 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2770 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2771
2772 /*
2773 * We're done accessing the new TSS.
2774 */
2775 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2776 if (rcStrict != VINF_SUCCESS)
2777 {
2778 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2779 return rcStrict;
2780 }
2781
2782 /*
2783 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2784 */
2785 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2786 {
2787 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2788 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2789 if (rcStrict != VINF_SUCCESS)
2790 {
2791 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2792 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2793 return rcStrict;
2794 }
2795
2796 /* Check that the descriptor indicates the new TSS is available (not busy). */
2797 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2798 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2799 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2800
2801 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2802 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2803 if (rcStrict != VINF_SUCCESS)
2804 {
2805 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2806 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2807 return rcStrict;
2808 }
2809 }
2810
2811 /*
2812 * From this point on, we're technically in the new task. We will defer exceptions
2813 * until the completion of the task switch but before executing any instructions in the new task.
2814 */
2815 pCtx->tr.Sel = SelTSS;
2816 pCtx->tr.ValidSel = SelTSS;
2817 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2818 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2819 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2820 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2821 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2822
2823 /* Set the busy bit in TR. */
2824 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2825 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2826 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2827 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2828 {
2829 uNewEflags |= X86_EFL_NT;
2830 }
2831
2832 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2833 pCtx->cr0 |= X86_CR0_TS;
2834 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2835
2836 pCtx->eip = uNewEip;
2837 pCtx->eax = uNewEax;
2838 pCtx->ecx = uNewEcx;
2839 pCtx->edx = uNewEdx;
2840 pCtx->ebx = uNewEbx;
2841 pCtx->esp = uNewEsp;
2842 pCtx->ebp = uNewEbp;
2843 pCtx->esi = uNewEsi;
2844 pCtx->edi = uNewEdi;
2845
2846 uNewEflags &= X86_EFL_LIVE_MASK;
2847 uNewEflags |= X86_EFL_RA1_MASK;
2848 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2849
2850 /*
2851 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2852 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2853 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2854 */
2855 pCtx->es.Sel = uNewES;
2856 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2857 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2858
2859 pCtx->cs.Sel = uNewCS;
2860 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2861 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2862
2863 pCtx->ss.Sel = uNewSS;
2864 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2865 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2866
2867 pCtx->ds.Sel = uNewDS;
2868 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2869 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2870
2871 pCtx->fs.Sel = uNewFS;
2872 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2873 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2874
2875 pCtx->gs.Sel = uNewGS;
2876 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2877 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2878 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2879
2880 pCtx->ldtr.Sel = uNewLdt;
2881 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2882 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2883 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2884
2885 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2886 {
2887 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2888 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2889 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2890 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2891 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2892 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2893 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2894 }
2895
2896 /*
2897 * Switch CR3 for the new task.
2898 */
2899 if ( fIsNewTSS386
2900 && (pCtx->cr0 & X86_CR0_PG))
2901 {
2902 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2903 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2904 {
2905 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2906 AssertRCSuccessReturn(rc, rc);
2907 }
2908 else
2909 pCtx->cr3 = uNewCr3;
2910
2911 /* Inform PGM. */
2912 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2913 {
2914 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2915 AssertRCReturn(rc, rc);
2916 /* ignore informational status codes */
2917 }
2918 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2919 }
2920
2921 /*
2922 * Switch LDTR for the new task.
2923 */
2924 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2925 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2926 else
2927 {
2928 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2929
2930 IEMSELDESC DescNewLdt;
2931 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2932 if (rcStrict != VINF_SUCCESS)
2933 {
2934 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2935 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2936 return rcStrict;
2937 }
2938 if ( !DescNewLdt.Legacy.Gen.u1Present
2939 || DescNewLdt.Legacy.Gen.u1DescType
2940 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2941 {
2942 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2943 uNewLdt, DescNewLdt.Legacy.u));
2944 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2945 }
2946
2947 pCtx->ldtr.ValidSel = uNewLdt;
2948 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2949 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2950 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2951 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2952 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2953 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2955 }
2956
2957 IEMSELDESC DescSS;
2958 if (IEM_IS_V86_MODE(pIemCpu))
2959 {
2960 pIemCpu->uCpl = 3;
2961 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2962 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2963 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2964 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2965 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2966 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2967 }
2968 else
2969 {
2970 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2971
2972 /*
2973 * Load the stack segment for the new task.
2974 */
2975 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2976 {
2977 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2978 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 /* Fetch the descriptor. */
2982 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2983 if (rcStrict != VINF_SUCCESS)
2984 {
2985 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2986 VBOXSTRICTRC_VAL(rcStrict)));
2987 return rcStrict;
2988 }
2989
2990 /* SS must be a data segment and writable. */
2991 if ( !DescSS.Legacy.Gen.u1DescType
2992 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2993 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2994 {
2995 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2996 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2997 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2998 }
2999
3000 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3001 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3002 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3003 {
3004 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3005 uNewCpl));
3006 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 /* Is it there? */
3010 if (!DescSS.Legacy.Gen.u1Present)
3011 {
3012 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3013 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3014 }
3015
3016 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3017 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3018
3019 /* Set the accessed bit before committing the result into SS. */
3020 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3021 {
3022 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3023 if (rcStrict != VINF_SUCCESS)
3024 return rcStrict;
3025 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3026 }
3027
3028 /* Commit SS. */
3029 pCtx->ss.Sel = uNewSS;
3030 pCtx->ss.ValidSel = uNewSS;
3031 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3032 pCtx->ss.u32Limit = cbLimit;
3033 pCtx->ss.u64Base = u64Base;
3034 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3036
3037 /* CPL has changed, update IEM before loading rest of segments. */
3038 pIemCpu->uCpl = uNewCpl;
3039
3040 /*
3041 * Load the data segments for the new task.
3042 */
3043 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3044 if (rcStrict != VINF_SUCCESS)
3045 return rcStrict;
3046 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3047 if (rcStrict != VINF_SUCCESS)
3048 return rcStrict;
3049 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3050 if (rcStrict != VINF_SUCCESS)
3051 return rcStrict;
3052 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3053 if (rcStrict != VINF_SUCCESS)
3054 return rcStrict;
3055
3056 /*
3057 * Load the code segment for the new task.
3058 */
3059 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3060 {
3061 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3062 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3063 }
3064
3065 /* Fetch the descriptor. */
3066 IEMSELDESC DescCS;
3067 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3068 if (rcStrict != VINF_SUCCESS)
3069 {
3070 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3071 return rcStrict;
3072 }
3073
3074 /* CS must be a code segment. */
3075 if ( !DescCS.Legacy.Gen.u1DescType
3076 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3077 {
3078 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3079 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3080 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3081 }
3082
3083 /* For conforming CS, DPL must be less than or equal to the RPL. */
3084 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3085 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3086 {
3087 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3088 DescCS.Legacy.Gen.u2Dpl));
3089 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3090 }
3091
3092 /* For non-conforming CS, DPL must match RPL. */
3093 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3094 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3095 {
3096 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3097 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3098 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3099 }
3100
3101 /* Is it there? */
3102 if (!DescCS.Legacy.Gen.u1Present)
3103 {
3104 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3105 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3106 }
3107
3108 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3109 u64Base = X86DESC_BASE(&DescCS.Legacy);
3110
3111 /* Set the accessed bit before committing the result into CS. */
3112 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3113 {
3114 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3115 if (rcStrict != VINF_SUCCESS)
3116 return rcStrict;
3117 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3118 }
3119
3120 /* Commit CS. */
3121 pCtx->cs.Sel = uNewCS;
3122 pCtx->cs.ValidSel = uNewCS;
3123 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3124 pCtx->cs.u32Limit = cbLimit;
3125 pCtx->cs.u64Base = u64Base;
3126 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3128 }
3129
3130 /** @todo Debug trap. */
3131 if (fIsNewTSS386 && fNewDebugTrap)
3132 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3133
3134 /*
3135 * Construct the error code masks based on what caused this task switch.
3136 * See Intel Instruction reference for INT.
3137 */
3138 uint16_t uExt;
3139 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3140 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3141 {
3142 uExt = 1;
3143 }
3144 else
3145 uExt = 0;
3146
3147 /*
3148 * Push any error code on to the new stack.
3149 */
3150 if (fFlags & IEM_XCPT_FLAGS_ERR)
3151 {
3152 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3153 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3154 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3155
3156 /* Check that there is sufficient space on the stack. */
3157 /** @todo Factor out segment limit checking for normal/expand down segments
3158 * into a separate function. */
3159 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3160 {
3161 if ( pCtx->esp - 1 > cbLimitSS
3162 || pCtx->esp < cbStackFrame)
3163 {
3164 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3165 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3166 cbStackFrame));
3167 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3168 }
3169 }
3170 else
3171 {
3172 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3173 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3174 {
3175 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3176 cbStackFrame));
3177 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3178 }
3179 }
3180
3181
3182 if (fIsNewTSS386)
3183 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3184 else
3185 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3186 if (rcStrict != VINF_SUCCESS)
3187 {
3188 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3189 VBOXSTRICTRC_VAL(rcStrict)));
3190 return rcStrict;
3191 }
3192 }
3193
3194 /* Check the new EIP against the new CS limit. */
3195 if (pCtx->eip > pCtx->cs.u32Limit)
3196 {
3197 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3198 pCtx->eip, pCtx->cs.u32Limit));
3199 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3200 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3201 }
3202
3203 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3204 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3205}
3206
3207
3208/**
3209 * Implements exceptions and interrupts for protected mode.
3210 *
3211 * @returns VBox strict status code.
3212 * @param pIemCpu The IEM per CPU instance data.
3213 * @param pCtx The CPU context.
3214 * @param cbInstr The number of bytes to offset rIP by in the return
3215 * address.
3216 * @param u8Vector The interrupt / exception vector number.
3217 * @param fFlags The flags.
3218 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3219 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3220 */
3221IEM_STATIC VBOXSTRICTRC
3222iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3223 PCPUMCTX pCtx,
3224 uint8_t cbInstr,
3225 uint8_t u8Vector,
3226 uint32_t fFlags,
3227 uint16_t uErr,
3228 uint64_t uCr2)
3229{
3230 /*
3231 * Read the IDT entry.
3232 */
3233 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3234 {
3235 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3236 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3237 }
3238 X86DESC Idte;
3239 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3240 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3241 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3242 return rcStrict;
3243 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3244 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3245 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3246
3247 /*
3248 * Check the descriptor type, DPL and such.
3249 * ASSUMES this is done in the same order as described for call-gate calls.
3250 */
3251 if (Idte.Gate.u1DescType)
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3254 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3255 }
3256 bool fTaskGate = false;
3257 uint8_t f32BitGate = true;
3258 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3259 switch (Idte.Gate.u4Type)
3260 {
3261 case X86_SEL_TYPE_SYS_UNDEFINED:
3262 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3263 case X86_SEL_TYPE_SYS_LDT:
3264 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3265 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3266 case X86_SEL_TYPE_SYS_UNDEFINED2:
3267 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3268 case X86_SEL_TYPE_SYS_UNDEFINED3:
3269 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3270 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3271 case X86_SEL_TYPE_SYS_UNDEFINED4:
3272 {
3273 /** @todo check what actually happens when the type is wrong...
3274 * esp. call gates. */
3275 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3276 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3277 }
3278
3279 case X86_SEL_TYPE_SYS_286_INT_GATE:
3280 f32BitGate = false;
3281 case X86_SEL_TYPE_SYS_386_INT_GATE:
3282 fEflToClear |= X86_EFL_IF;
3283 break;
3284
3285 case X86_SEL_TYPE_SYS_TASK_GATE:
3286 fTaskGate = true;
3287#ifndef IEM_IMPLEMENTS_TASKSWITCH
3288 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3289#endif
3290 break;
3291
3292 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3293 f32BitGate = false;
3294 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3295 break;
3296
3297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3298 }
3299
3300 /* Check DPL against CPL if applicable. */
3301 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3302 {
3303 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3304 {
3305 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3306 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3307 }
3308 }
3309
3310 /* Is it there? */
3311 if (!Idte.Gate.u1Present)
3312 {
3313 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3314 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3315 }
3316
3317 /* Is it a task-gate? */
3318 if (fTaskGate)
3319 {
3320 /*
3321 * Construct the error code masks based on what caused this task switch.
3322 * See Intel Instruction reference for INT.
3323 */
3324 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3325 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3326 RTSEL SelTSS = Idte.Gate.u16Sel;
3327
3328 /*
3329 * Fetch the TSS descriptor in the GDT.
3330 */
3331 IEMSELDESC DescTSS;
3332 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3333 if (rcStrict != VINF_SUCCESS)
3334 {
3335 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3336 VBOXSTRICTRC_VAL(rcStrict)));
3337 return rcStrict;
3338 }
3339
3340 /* The TSS descriptor must be a system segment and be available (not busy). */
3341 if ( DescTSS.Legacy.Gen.u1DescType
3342 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3343 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3344 {
3345 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3346 u8Vector, SelTSS, DescTSS.Legacy.au64));
3347 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3348 }
3349
3350 /* The TSS must be present. */
3351 if (!DescTSS.Legacy.Gen.u1Present)
3352 {
3353 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3354 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3355 }
3356
3357 /* Do the actual task switch. */
3358 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3359 }
3360
3361 /* A null CS is bad. */
3362 RTSEL NewCS = Idte.Gate.u16Sel;
3363 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3364 {
3365 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3366 return iemRaiseGeneralProtectionFault0(pIemCpu);
3367 }
3368
3369 /* Fetch the descriptor for the new CS. */
3370 IEMSELDESC DescCS;
3371 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3372 if (rcStrict != VINF_SUCCESS)
3373 {
3374 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3375 return rcStrict;
3376 }
3377
3378 /* Must be a code segment. */
3379 if (!DescCS.Legacy.Gen.u1DescType)
3380 {
3381 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3382 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3383 }
3384 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3385 {
3386 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3387 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3388 }
3389
3390 /* Don't allow lowering the privilege level. */
3391 /** @todo Does the lowering of privileges apply to software interrupts
3392 * only? This has bearings on the more-privileged or
3393 * same-privilege stack behavior further down. A testcase would
3394 * be nice. */
3395 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3396 {
3397 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3398 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3399 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3400 }
3401
3402 /* Make sure the selector is present. */
3403 if (!DescCS.Legacy.Gen.u1Present)
3404 {
3405 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3406 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3407 }
3408
3409 /* Check the new EIP against the new CS limit. */
3410 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3411 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3412 ? Idte.Gate.u16OffsetLow
3413 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3414 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3415 if (uNewEip > cbLimitCS)
3416 {
3417 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3418 u8Vector, uNewEip, cbLimitCS, NewCS));
3419 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3420 }
3421
3422 /* Calc the flag image to push. */
3423 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3424 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3425 fEfl &= ~X86_EFL_RF;
3426 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3427 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3428
3429 /* From V8086 mode only go to CPL 0. */
3430 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3431 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3432 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3433 {
3434 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3435 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3436 }
3437
3438 /*
3439 * If the privilege level changes, we need to get a new stack from the TSS.
3440 * This in turns means validating the new SS and ESP...
3441 */
3442 if (uNewCpl != pIemCpu->uCpl)
3443 {
3444 RTSEL NewSS;
3445 uint32_t uNewEsp;
3446 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3447 if (rcStrict != VINF_SUCCESS)
3448 return rcStrict;
3449
3450 IEMSELDESC DescSS;
3451 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3452 if (rcStrict != VINF_SUCCESS)
3453 return rcStrict;
3454
3455 /* Check that there is sufficient space for the stack frame. */
3456 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3457 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3458 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3459 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3460
3461 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3462 {
3463 if ( uNewEsp - 1 > cbLimitSS
3464 || uNewEsp < cbStackFrame)
3465 {
3466 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3467 u8Vector, NewSS, uNewEsp, cbStackFrame));
3468 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3469 }
3470 }
3471 else
3472 {
3473 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3474 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3475 {
3476 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3477 u8Vector, NewSS, uNewEsp, cbStackFrame));
3478 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3479 }
3480 }
3481
3482 /*
3483 * Start making changes.
3484 */
3485
3486 /* Create the stack frame. */
3487 RTPTRUNION uStackFrame;
3488 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3489 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3490 if (rcStrict != VINF_SUCCESS)
3491 return rcStrict;
3492 void * const pvStackFrame = uStackFrame.pv;
3493 if (f32BitGate)
3494 {
3495 if (fFlags & IEM_XCPT_FLAGS_ERR)
3496 *uStackFrame.pu32++ = uErr;
3497 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3498 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3499 uStackFrame.pu32[2] = fEfl;
3500 uStackFrame.pu32[3] = pCtx->esp;
3501 uStackFrame.pu32[4] = pCtx->ss.Sel;
3502 if (fEfl & X86_EFL_VM)
3503 {
3504 uStackFrame.pu32[1] = pCtx->cs.Sel;
3505 uStackFrame.pu32[5] = pCtx->es.Sel;
3506 uStackFrame.pu32[6] = pCtx->ds.Sel;
3507 uStackFrame.pu32[7] = pCtx->fs.Sel;
3508 uStackFrame.pu32[8] = pCtx->gs.Sel;
3509 }
3510 }
3511 else
3512 {
3513 if (fFlags & IEM_XCPT_FLAGS_ERR)
3514 *uStackFrame.pu16++ = uErr;
3515 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3516 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3517 uStackFrame.pu16[2] = fEfl;
3518 uStackFrame.pu16[3] = pCtx->sp;
3519 uStackFrame.pu16[4] = pCtx->ss.Sel;
3520 if (fEfl & X86_EFL_VM)
3521 {
3522 uStackFrame.pu16[1] = pCtx->cs.Sel;
3523 uStackFrame.pu16[5] = pCtx->es.Sel;
3524 uStackFrame.pu16[6] = pCtx->ds.Sel;
3525 uStackFrame.pu16[7] = pCtx->fs.Sel;
3526 uStackFrame.pu16[8] = pCtx->gs.Sel;
3527 }
3528 }
3529 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3530 if (rcStrict != VINF_SUCCESS)
3531 return rcStrict;
3532
3533 /* Mark the selectors 'accessed' (hope this is the correct time). */
3534 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3535 * after pushing the stack frame? (Write protect the gdt + stack to
3536 * find out.) */
3537 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3538 {
3539 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3540 if (rcStrict != VINF_SUCCESS)
3541 return rcStrict;
3542 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3543 }
3544
3545 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3546 {
3547 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3548 if (rcStrict != VINF_SUCCESS)
3549 return rcStrict;
3550 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3551 }
3552
3553 /*
3554 * Start comitting the register changes (joins with the DPL=CPL branch).
3555 */
3556 pCtx->ss.Sel = NewSS;
3557 pCtx->ss.ValidSel = NewSS;
3558 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3559 pCtx->ss.u32Limit = cbLimitSS;
3560 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3561 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3562 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3563 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3564 * SP is loaded).
3565 * Need to check the other combinations too:
3566 * - 16-bit TSS, 32-bit handler
3567 * - 32-bit TSS, 16-bit handler */
3568 pCtx->rsp = uNewEsp - cbStackFrame;
3569 pIemCpu->uCpl = uNewCpl;
3570
3571 if (fEfl & X86_EFL_VM)
3572 {
3573 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3574 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3575 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3576 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3577 }
3578 }
3579 /*
3580 * Same privilege, no stack change and smaller stack frame.
3581 */
3582 else
3583 {
3584 uint64_t uNewRsp;
3585 RTPTRUNION uStackFrame;
3586 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3587 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3588 if (rcStrict != VINF_SUCCESS)
3589 return rcStrict;
3590 void * const pvStackFrame = uStackFrame.pv;
3591
3592 if (f32BitGate)
3593 {
3594 if (fFlags & IEM_XCPT_FLAGS_ERR)
3595 *uStackFrame.pu32++ = uErr;
3596 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3597 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3598 uStackFrame.pu32[2] = fEfl;
3599 }
3600 else
3601 {
3602 if (fFlags & IEM_XCPT_FLAGS_ERR)
3603 *uStackFrame.pu16++ = uErr;
3604 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3605 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3606 uStackFrame.pu16[2] = fEfl;
3607 }
3608 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3609 if (rcStrict != VINF_SUCCESS)
3610 return rcStrict;
3611
3612 /* Mark the CS selector as 'accessed'. */
3613 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3614 {
3615 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3616 if (rcStrict != VINF_SUCCESS)
3617 return rcStrict;
3618 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3619 }
3620
3621 /*
3622 * Start committing the register changes (joins with the other branch).
3623 */
3624 pCtx->rsp = uNewRsp;
3625 }
3626
3627 /* ... register committing continues. */
3628 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3629 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3630 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3631 pCtx->cs.u32Limit = cbLimitCS;
3632 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3633 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3634
3635 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3636 fEfl &= ~fEflToClear;
3637 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3638
3639 if (fFlags & IEM_XCPT_FLAGS_CR2)
3640 pCtx->cr2 = uCr2;
3641
3642 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3643 iemRaiseXcptAdjustState(pCtx, u8Vector);
3644
3645 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3646}
3647
3648
3649/**
3650 * Implements exceptions and interrupts for long mode.
3651 *
3652 * @returns VBox strict status code.
3653 * @param pIemCpu The IEM per CPU instance data.
3654 * @param pCtx The CPU context.
3655 * @param cbInstr The number of bytes to offset rIP by in the return
3656 * address.
3657 * @param u8Vector The interrupt / exception vector number.
3658 * @param fFlags The flags.
3659 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3660 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3661 */
3662IEM_STATIC VBOXSTRICTRC
3663iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3664 PCPUMCTX pCtx,
3665 uint8_t cbInstr,
3666 uint8_t u8Vector,
3667 uint32_t fFlags,
3668 uint16_t uErr,
3669 uint64_t uCr2)
3670{
3671 /*
3672 * Read the IDT entry.
3673 */
3674 uint16_t offIdt = (uint16_t)u8Vector << 4;
3675 if (pCtx->idtr.cbIdt < offIdt + 7)
3676 {
3677 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3678 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3679 }
3680 X86DESC64 Idte;
3681 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3682 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3683 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3684 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3685 return rcStrict;
3686 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3687 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3688 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3689
3690 /*
3691 * Check the descriptor type, DPL and such.
3692 * ASSUMES this is done in the same order as described for call-gate calls.
3693 */
3694 if (Idte.Gate.u1DescType)
3695 {
3696 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3697 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3698 }
3699 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3700 switch (Idte.Gate.u4Type)
3701 {
3702 case AMD64_SEL_TYPE_SYS_INT_GATE:
3703 fEflToClear |= X86_EFL_IF;
3704 break;
3705 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3706 break;
3707
3708 default:
3709 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3710 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3711 }
3712
3713 /* Check DPL against CPL if applicable. */
3714 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3715 {
3716 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3717 {
3718 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3719 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3720 }
3721 }
3722
3723 /* Is it there? */
3724 if (!Idte.Gate.u1Present)
3725 {
3726 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3727 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3728 }
3729
3730 /* A null CS is bad. */
3731 RTSEL NewCS = Idte.Gate.u16Sel;
3732 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3733 {
3734 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3735 return iemRaiseGeneralProtectionFault0(pIemCpu);
3736 }
3737
3738 /* Fetch the descriptor for the new CS. */
3739 IEMSELDESC DescCS;
3740 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3741 if (rcStrict != VINF_SUCCESS)
3742 {
3743 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3744 return rcStrict;
3745 }
3746
3747 /* Must be a 64-bit code segment. */
3748 if (!DescCS.Long.Gen.u1DescType)
3749 {
3750 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3751 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3752 }
3753 if ( !DescCS.Long.Gen.u1Long
3754 || DescCS.Long.Gen.u1DefBig
3755 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3756 {
3757 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3758 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3759 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3760 }
3761
3762 /* Don't allow lowering the privilege level. For non-conforming CS
3763 selectors, the CS.DPL sets the privilege level the trap/interrupt
3764 handler runs at. For conforming CS selectors, the CPL remains
3765 unchanged, but the CS.DPL must be <= CPL. */
3766 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3767 * when CPU in Ring-0. Result \#GP? */
3768 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3769 {
3770 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3771 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3772 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3773 }
3774
3775
3776 /* Make sure the selector is present. */
3777 if (!DescCS.Legacy.Gen.u1Present)
3778 {
3779 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3780 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3781 }
3782
3783 /* Check that the new RIP is canonical. */
3784 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3785 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3786 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3787 if (!IEM_IS_CANONICAL(uNewRip))
3788 {
3789 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3790 return iemRaiseGeneralProtectionFault0(pIemCpu);
3791 }
3792
3793 /*
3794 * If the privilege level changes or if the IST isn't zero, we need to get
3795 * a new stack from the TSS.
3796 */
3797 uint64_t uNewRsp;
3798 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3799 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3800 if ( uNewCpl != pIemCpu->uCpl
3801 || Idte.Gate.u3IST != 0)
3802 {
3803 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3804 if (rcStrict != VINF_SUCCESS)
3805 return rcStrict;
3806 }
3807 else
3808 uNewRsp = pCtx->rsp;
3809 uNewRsp &= ~(uint64_t)0xf;
3810
3811 /*
3812 * Calc the flag image to push.
3813 */
3814 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3815 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3816 fEfl &= ~X86_EFL_RF;
3817 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3818 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3819
3820 /*
3821 * Start making changes.
3822 */
3823
3824 /* Create the stack frame. */
3825 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3826 RTPTRUNION uStackFrame;
3827 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3828 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3829 if (rcStrict != VINF_SUCCESS)
3830 return rcStrict;
3831 void * const pvStackFrame = uStackFrame.pv;
3832
3833 if (fFlags & IEM_XCPT_FLAGS_ERR)
3834 *uStackFrame.pu64++ = uErr;
3835 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3836 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3837 uStackFrame.pu64[2] = fEfl;
3838 uStackFrame.pu64[3] = pCtx->rsp;
3839 uStackFrame.pu64[4] = pCtx->ss.Sel;
3840 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3841 if (rcStrict != VINF_SUCCESS)
3842 return rcStrict;
3843
3844 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3845 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3846 * after pushing the stack frame? (Write protect the gdt + stack to
3847 * find out.) */
3848 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3849 {
3850 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3851 if (rcStrict != VINF_SUCCESS)
3852 return rcStrict;
3853 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3854 }
3855
3856 /*
3857 * Start comitting the register changes.
3858 */
3859 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3860 * hidden registers when interrupting 32-bit or 16-bit code! */
3861 if (uNewCpl != pIemCpu->uCpl)
3862 {
3863 pCtx->ss.Sel = 0 | uNewCpl;
3864 pCtx->ss.ValidSel = 0 | uNewCpl;
3865 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3866 pCtx->ss.u32Limit = UINT32_MAX;
3867 pCtx->ss.u64Base = 0;
3868 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3869 }
3870 pCtx->rsp = uNewRsp - cbStackFrame;
3871 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3872 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3873 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3874 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3875 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3876 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3877 pCtx->rip = uNewRip;
3878 pIemCpu->uCpl = uNewCpl;
3879
3880 fEfl &= ~fEflToClear;
3881 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3882
3883 if (fFlags & IEM_XCPT_FLAGS_CR2)
3884 pCtx->cr2 = uCr2;
3885
3886 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3887 iemRaiseXcptAdjustState(pCtx, u8Vector);
3888
3889 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3890}
3891
3892
3893/**
3894 * Implements exceptions and interrupts.
3895 *
3896 * All exceptions and interrupts goes thru this function!
3897 *
3898 * @returns VBox strict status code.
3899 * @param pIemCpu The IEM per CPU instance data.
3900 * @param cbInstr The number of bytes to offset rIP by in the return
3901 * address.
3902 * @param u8Vector The interrupt / exception vector number.
3903 * @param fFlags The flags.
3904 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3905 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3906 */
3907DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3908iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3909 uint8_t cbInstr,
3910 uint8_t u8Vector,
3911 uint32_t fFlags,
3912 uint16_t uErr,
3913 uint64_t uCr2)
3914{
3915 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3916#ifdef IN_RING0
3917 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3918 AssertRCReturn(rc, rc);
3919#endif
3920
3921 /*
3922 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3923 */
3924 if ( pCtx->eflags.Bits.u1VM
3925 && pCtx->eflags.Bits.u2IOPL != 3
3926 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3927 && (pCtx->cr0 & X86_CR0_PE) )
3928 {
3929 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3930 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3931 u8Vector = X86_XCPT_GP;
3932 uErr = 0;
3933 }
3934#ifdef DBGFTRACE_ENABLED
3935 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3936 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3937 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3938#endif
3939
3940 /*
3941 * Do recursion accounting.
3942 */
3943 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3944 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3945 if (pIemCpu->cXcptRecursions == 0)
3946 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3947 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3948 else
3949 {
3950 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3951 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3952
3953 /** @todo double and tripple faults. */
3954 if (pIemCpu->cXcptRecursions >= 3)
3955 {
3956#ifdef DEBUG_bird
3957 AssertFailed();
3958#endif
3959 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3960 }
3961
3962 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3963 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3964 {
3965 ....
3966 } */
3967 }
3968 pIemCpu->cXcptRecursions++;
3969 pIemCpu->uCurXcpt = u8Vector;
3970 pIemCpu->fCurXcpt = fFlags;
3971
3972 /*
3973 * Extensive logging.
3974 */
3975#if defined(LOG_ENABLED) && defined(IN_RING3)
3976 if (LogIs3Enabled())
3977 {
3978 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3979 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3980 char szRegs[4096];
3981 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3982 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3983 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3984 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3985 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3986 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3987 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3988 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3989 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3990 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3991 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3992 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3993 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3994 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3995 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3996 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3997 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3998 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3999 " efer=%016VR{efer}\n"
4000 " pat=%016VR{pat}\n"
4001 " sf_mask=%016VR{sf_mask}\n"
4002 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4003 " lstar=%016VR{lstar}\n"
4004 " star=%016VR{star} cstar=%016VR{cstar}\n"
4005 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4006 );
4007
4008 char szInstr[256];
4009 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4010 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4011 szInstr, sizeof(szInstr), NULL);
4012 Log3(("%s%s\n", szRegs, szInstr));
4013 }
4014#endif /* LOG_ENABLED */
4015
4016 /*
4017 * Call the mode specific worker function.
4018 */
4019 VBOXSTRICTRC rcStrict;
4020 if (!(pCtx->cr0 & X86_CR0_PE))
4021 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4022 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4023 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4024 else
4025 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4026
4027 /*
4028 * Unwind.
4029 */
4030 pIemCpu->cXcptRecursions--;
4031 pIemCpu->uCurXcpt = uPrevXcpt;
4032 pIemCpu->fCurXcpt = fPrevXcpt;
4033 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4034 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4035 return rcStrict;
4036}
4037
4038
4039/** \#DE - 00. */
4040DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4041{
4042 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4043}
4044
4045
4046/** \#DB - 01.
4047 * @note This automatically clear DR7.GD. */
4048DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4049{
4050 /** @todo set/clear RF. */
4051 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4052 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4053}
4054
4055
4056/** \#UD - 06. */
4057DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4058{
4059 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4060}
4061
4062
4063/** \#NM - 07. */
4064DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4065{
4066 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4067}
4068
4069
4070/** \#TS(err) - 0a. */
4071DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4072{
4073 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4074}
4075
4076
4077/** \#TS(tr) - 0a. */
4078DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4079{
4080 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4081 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4082}
4083
4084
4085/** \#TS(0) - 0a. */
4086DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4087{
4088 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4089 0, 0);
4090}
4091
4092
4093/** \#TS(err) - 0a. */
4094DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4095{
4096 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4097 uSel & X86_SEL_MASK_OFF_RPL, 0);
4098}
4099
4100
4101/** \#NP(err) - 0b. */
4102DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4103{
4104 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4105}
4106
4107
4108/** \#NP(seg) - 0b. */
4109DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4110{
4111 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4112 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4113}
4114
4115
4116/** \#NP(sel) - 0b. */
4117DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4118{
4119 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4120 uSel & ~X86_SEL_RPL, 0);
4121}
4122
4123
4124/** \#SS(seg) - 0c. */
4125DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4126{
4127 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4128 uSel & ~X86_SEL_RPL, 0);
4129}
4130
4131
4132/** \#SS(err) - 0c. */
4133DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4134{
4135 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4136}
4137
4138
4139/** \#GP(n) - 0d. */
4140DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4141{
4142 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4143}
4144
4145
4146/** \#GP(0) - 0d. */
4147DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4148{
4149 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4150}
4151
4152
4153/** \#GP(sel) - 0d. */
4154DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4155{
4156 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4157 Sel & ~X86_SEL_RPL, 0);
4158}
4159
4160
4161/** \#GP(0) - 0d. */
4162DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4163{
4164 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4165}
4166
4167
4168/** \#GP(sel) - 0d. */
4169DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4170{
4171 NOREF(iSegReg); NOREF(fAccess);
4172 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4173 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4174}
4175
4176
4177/** \#GP(sel) - 0d. */
4178DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4179{
4180 NOREF(Sel);
4181 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4182}
4183
4184
4185/** \#GP(sel) - 0d. */
4186DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4187{
4188 NOREF(iSegReg); NOREF(fAccess);
4189 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4190}
4191
4192
4193/** \#PF(n) - 0e. */
4194DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4195{
4196 uint16_t uErr;
4197 switch (rc)
4198 {
4199 case VERR_PAGE_NOT_PRESENT:
4200 case VERR_PAGE_TABLE_NOT_PRESENT:
4201 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4202 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4203 uErr = 0;
4204 break;
4205
4206 default:
4207 AssertMsgFailed(("%Rrc\n", rc));
4208 case VERR_ACCESS_DENIED:
4209 uErr = X86_TRAP_PF_P;
4210 break;
4211
4212 /** @todo reserved */
4213 }
4214
4215 if (pIemCpu->uCpl == 3)
4216 uErr |= X86_TRAP_PF_US;
4217
4218 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4219 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4220 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4221 uErr |= X86_TRAP_PF_ID;
4222
4223#if 0 /* This is so much non-sense, really. Why was it done like that? */
4224 /* Note! RW access callers reporting a WRITE protection fault, will clear
4225 the READ flag before calling. So, read-modify-write accesses (RW)
4226 can safely be reported as READ faults. */
4227 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4228 uErr |= X86_TRAP_PF_RW;
4229#else
4230 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4231 {
4232 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4233 uErr |= X86_TRAP_PF_RW;
4234 }
4235#endif
4236
4237 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4238 uErr, GCPtrWhere);
4239}
4240
4241
4242/** \#MF(0) - 10. */
4243DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4244{
4245 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4246}
4247
4248
4249/** \#AC(0) - 11. */
4250DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4251{
4252 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4253}
4254
4255
4256/**
4257 * Macro for calling iemCImplRaiseDivideError().
4258 *
4259 * This enables us to add/remove arguments and force different levels of
4260 * inlining as we wish.
4261 *
4262 * @return Strict VBox status code.
4263 */
4264#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4265IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4266{
4267 NOREF(cbInstr);
4268 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4269}
4270
4271
4272/**
4273 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4274 *
4275 * This enables us to add/remove arguments and force different levels of
4276 * inlining as we wish.
4277 *
4278 * @return Strict VBox status code.
4279 */
4280#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4281IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4282{
4283 NOREF(cbInstr);
4284 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4285}
4286
4287
4288/**
4289 * Macro for calling iemCImplRaiseInvalidOpcode().
4290 *
4291 * This enables us to add/remove arguments and force different levels of
4292 * inlining as we wish.
4293 *
4294 * @return Strict VBox status code.
4295 */
4296#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4297IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4298{
4299 NOREF(cbInstr);
4300 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4301}
4302
4303
4304/** @} */
4305
4306
4307/*
4308 *
4309 * Helpers routines.
4310 * Helpers routines.
4311 * Helpers routines.
4312 *
4313 */
4314
4315/**
4316 * Recalculates the effective operand size.
4317 *
4318 * @param pIemCpu The IEM state.
4319 */
4320IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4321{
4322 switch (pIemCpu->enmCpuMode)
4323 {
4324 case IEMMODE_16BIT:
4325 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4326 break;
4327 case IEMMODE_32BIT:
4328 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4329 break;
4330 case IEMMODE_64BIT:
4331 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4332 {
4333 case 0:
4334 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4335 break;
4336 case IEM_OP_PRF_SIZE_OP:
4337 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4338 break;
4339 case IEM_OP_PRF_SIZE_REX_W:
4340 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4341 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4342 break;
4343 }
4344 break;
4345 default:
4346 AssertFailed();
4347 }
4348}
4349
4350
4351/**
4352 * Sets the default operand size to 64-bit and recalculates the effective
4353 * operand size.
4354 *
4355 * @param pIemCpu The IEM state.
4356 */
4357IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4358{
4359 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4360 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4361 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4362 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4363 else
4364 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4365}
4366
4367
4368/*
4369 *
4370 * Common opcode decoders.
4371 * Common opcode decoders.
4372 * Common opcode decoders.
4373 *
4374 */
4375//#include <iprt/mem.h>
4376
4377/**
4378 * Used to add extra details about a stub case.
4379 * @param pIemCpu The IEM per CPU state.
4380 */
4381IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4382{
4383#if defined(LOG_ENABLED) && defined(IN_RING3)
4384 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4385 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4386 char szRegs[4096];
4387 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4388 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4389 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4390 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4391 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4392 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4393 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4394 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4395 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4396 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4397 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4398 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4399 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4400 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4401 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4402 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4403 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4404 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4405 " efer=%016VR{efer}\n"
4406 " pat=%016VR{pat}\n"
4407 " sf_mask=%016VR{sf_mask}\n"
4408 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4409 " lstar=%016VR{lstar}\n"
4410 " star=%016VR{star} cstar=%016VR{cstar}\n"
4411 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4412 );
4413
4414 char szInstr[256];
4415 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4416 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4417 szInstr, sizeof(szInstr), NULL);
4418
4419 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4420#else
4421 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4422#endif
4423}
4424
4425/**
4426 * Complains about a stub.
4427 *
4428 * Providing two versions of this macro, one for daily use and one for use when
4429 * working on IEM.
4430 */
4431#if 0
4432# define IEMOP_BITCH_ABOUT_STUB() \
4433 do { \
4434 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4435 iemOpStubMsg2(pIemCpu); \
4436 RTAssertPanic(); \
4437 } while (0)
4438#else
4439# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4440#endif
4441
4442/** Stubs an opcode. */
4443#define FNIEMOP_STUB(a_Name) \
4444 FNIEMOP_DEF(a_Name) \
4445 { \
4446 IEMOP_BITCH_ABOUT_STUB(); \
4447 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4448 } \
4449 typedef int ignore_semicolon
4450
4451/** Stubs an opcode. */
4452#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4453 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4454 { \
4455 IEMOP_BITCH_ABOUT_STUB(); \
4456 NOREF(a_Name0); \
4457 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4458 } \
4459 typedef int ignore_semicolon
4460
4461/** Stubs an opcode which currently should raise \#UD. */
4462#define FNIEMOP_UD_STUB(a_Name) \
4463 FNIEMOP_DEF(a_Name) \
4464 { \
4465 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4466 return IEMOP_RAISE_INVALID_OPCODE(); \
4467 } \
4468 typedef int ignore_semicolon
4469
4470/** Stubs an opcode which currently should raise \#UD. */
4471#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4472 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4473 { \
4474 NOREF(a_Name0); \
4475 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4476 return IEMOP_RAISE_INVALID_OPCODE(); \
4477 } \
4478 typedef int ignore_semicolon
4479
4480
4481
4482/** @name Register Access.
4483 * @{
4484 */
4485
4486/**
4487 * Gets a reference (pointer) to the specified hidden segment register.
4488 *
4489 * @returns Hidden register reference.
4490 * @param pIemCpu The per CPU data.
4491 * @param iSegReg The segment register.
4492 */
4493IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4494{
4495 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4496 PCPUMSELREG pSReg;
4497 switch (iSegReg)
4498 {
4499 case X86_SREG_ES: pSReg = &pCtx->es; break;
4500 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4501 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4502 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4503 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4504 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4505 default:
4506 AssertFailedReturn(NULL);
4507 }
4508#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4509 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4510 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4511#else
4512 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4513#endif
4514 return pSReg;
4515}
4516
4517
4518/**
4519 * Gets a reference (pointer) to the specified segment register (the selector
4520 * value).
4521 *
4522 * @returns Pointer to the selector variable.
4523 * @param pIemCpu The per CPU data.
4524 * @param iSegReg The segment register.
4525 */
4526IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4527{
4528 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4529 switch (iSegReg)
4530 {
4531 case X86_SREG_ES: return &pCtx->es.Sel;
4532 case X86_SREG_CS: return &pCtx->cs.Sel;
4533 case X86_SREG_SS: return &pCtx->ss.Sel;
4534 case X86_SREG_DS: return &pCtx->ds.Sel;
4535 case X86_SREG_FS: return &pCtx->fs.Sel;
4536 case X86_SREG_GS: return &pCtx->gs.Sel;
4537 }
4538 AssertFailedReturn(NULL);
4539}
4540
4541
4542/**
4543 * Fetches the selector value of a segment register.
4544 *
4545 * @returns The selector value.
4546 * @param pIemCpu The per CPU data.
4547 * @param iSegReg The segment register.
4548 */
4549IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4550{
4551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4552 switch (iSegReg)
4553 {
4554 case X86_SREG_ES: return pCtx->es.Sel;
4555 case X86_SREG_CS: return pCtx->cs.Sel;
4556 case X86_SREG_SS: return pCtx->ss.Sel;
4557 case X86_SREG_DS: return pCtx->ds.Sel;
4558 case X86_SREG_FS: return pCtx->fs.Sel;
4559 case X86_SREG_GS: return pCtx->gs.Sel;
4560 }
4561 AssertFailedReturn(0xffff);
4562}
4563
4564
4565/**
4566 * Gets a reference (pointer) to the specified general register.
4567 *
4568 * @returns Register reference.
4569 * @param pIemCpu The per CPU data.
4570 * @param iReg The general register.
4571 */
4572IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4573{
4574 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4575 switch (iReg)
4576 {
4577 case X86_GREG_xAX: return &pCtx->rax;
4578 case X86_GREG_xCX: return &pCtx->rcx;
4579 case X86_GREG_xDX: return &pCtx->rdx;
4580 case X86_GREG_xBX: return &pCtx->rbx;
4581 case X86_GREG_xSP: return &pCtx->rsp;
4582 case X86_GREG_xBP: return &pCtx->rbp;
4583 case X86_GREG_xSI: return &pCtx->rsi;
4584 case X86_GREG_xDI: return &pCtx->rdi;
4585 case X86_GREG_x8: return &pCtx->r8;
4586 case X86_GREG_x9: return &pCtx->r9;
4587 case X86_GREG_x10: return &pCtx->r10;
4588 case X86_GREG_x11: return &pCtx->r11;
4589 case X86_GREG_x12: return &pCtx->r12;
4590 case X86_GREG_x13: return &pCtx->r13;
4591 case X86_GREG_x14: return &pCtx->r14;
4592 case X86_GREG_x15: return &pCtx->r15;
4593 }
4594 AssertFailedReturn(NULL);
4595}
4596
4597
4598/**
4599 * Gets a reference (pointer) to the specified 8-bit general register.
4600 *
4601 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4602 *
4603 * @returns Register reference.
4604 * @param pIemCpu The per CPU data.
4605 * @param iReg The register.
4606 */
4607IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4608{
4609 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4610 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4611
4612 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4613 if (iReg >= 4)
4614 pu8Reg++;
4615 return pu8Reg;
4616}
4617
4618
4619/**
4620 * Fetches the value of a 8-bit general register.
4621 *
4622 * @returns The register value.
4623 * @param pIemCpu The per CPU data.
4624 * @param iReg The register.
4625 */
4626IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4627{
4628 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4629 return *pbSrc;
4630}
4631
4632
4633/**
4634 * Fetches the value of a 16-bit general register.
4635 *
4636 * @returns The register value.
4637 * @param pIemCpu The per CPU data.
4638 * @param iReg The register.
4639 */
4640IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4641{
4642 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4643}
4644
4645
4646/**
4647 * Fetches the value of a 32-bit general register.
4648 *
4649 * @returns The register value.
4650 * @param pIemCpu The per CPU data.
4651 * @param iReg The register.
4652 */
4653IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4654{
4655 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4656}
4657
4658
4659/**
4660 * Fetches the value of a 64-bit general register.
4661 *
4662 * @returns The register value.
4663 * @param pIemCpu The per CPU data.
4664 * @param iReg The register.
4665 */
4666IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4667{
4668 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4669}
4670
4671
4672/**
4673 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4674 *
4675 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4676 * segment limit.
4677 *
4678 * @param pIemCpu The per CPU data.
4679 * @param offNextInstr The offset of the next instruction.
4680 */
4681IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4682{
4683 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4684 switch (pIemCpu->enmEffOpSize)
4685 {
4686 case IEMMODE_16BIT:
4687 {
4688 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4689 if ( uNewIp > pCtx->cs.u32Limit
4690 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4691 return iemRaiseGeneralProtectionFault0(pIemCpu);
4692 pCtx->rip = uNewIp;
4693 break;
4694 }
4695
4696 case IEMMODE_32BIT:
4697 {
4698 Assert(pCtx->rip <= UINT32_MAX);
4699 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4700
4701 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4702 if (uNewEip > pCtx->cs.u32Limit)
4703 return iemRaiseGeneralProtectionFault0(pIemCpu);
4704 pCtx->rip = uNewEip;
4705 break;
4706 }
4707
4708 case IEMMODE_64BIT:
4709 {
4710 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4711
4712 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4713 if (!IEM_IS_CANONICAL(uNewRip))
4714 return iemRaiseGeneralProtectionFault0(pIemCpu);
4715 pCtx->rip = uNewRip;
4716 break;
4717 }
4718
4719 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4720 }
4721
4722 pCtx->eflags.Bits.u1RF = 0;
4723 return VINF_SUCCESS;
4724}
4725
4726
4727/**
4728 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4729 *
4730 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4731 * segment limit.
4732 *
4733 * @returns Strict VBox status code.
4734 * @param pIemCpu The per CPU data.
4735 * @param offNextInstr The offset of the next instruction.
4736 */
4737IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4738{
4739 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4740 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4741
4742 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4743 if ( uNewIp > pCtx->cs.u32Limit
4744 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4745 return iemRaiseGeneralProtectionFault0(pIemCpu);
4746 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4747 pCtx->rip = uNewIp;
4748 pCtx->eflags.Bits.u1RF = 0;
4749
4750 return VINF_SUCCESS;
4751}
4752
4753
4754/**
4755 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4756 *
4757 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4758 * segment limit.
4759 *
4760 * @returns Strict VBox status code.
4761 * @param pIemCpu The per CPU data.
4762 * @param offNextInstr The offset of the next instruction.
4763 */
4764IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4765{
4766 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4767 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4768
4769 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4770 {
4771 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4772
4773 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4774 if (uNewEip > pCtx->cs.u32Limit)
4775 return iemRaiseGeneralProtectionFault0(pIemCpu);
4776 pCtx->rip = uNewEip;
4777 }
4778 else
4779 {
4780 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4781
4782 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4783 if (!IEM_IS_CANONICAL(uNewRip))
4784 return iemRaiseGeneralProtectionFault0(pIemCpu);
4785 pCtx->rip = uNewRip;
4786 }
4787 pCtx->eflags.Bits.u1RF = 0;
4788 return VINF_SUCCESS;
4789}
4790
4791
4792/**
4793 * Performs a near jump to the specified address.
4794 *
4795 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4796 * segment limit.
4797 *
4798 * @param pIemCpu The per CPU data.
4799 * @param uNewRip The new RIP value.
4800 */
4801IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4802{
4803 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4804 switch (pIemCpu->enmEffOpSize)
4805 {
4806 case IEMMODE_16BIT:
4807 {
4808 Assert(uNewRip <= UINT16_MAX);
4809 if ( uNewRip > pCtx->cs.u32Limit
4810 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4811 return iemRaiseGeneralProtectionFault0(pIemCpu);
4812 /** @todo Test 16-bit jump in 64-bit mode. */
4813 pCtx->rip = uNewRip;
4814 break;
4815 }
4816
4817 case IEMMODE_32BIT:
4818 {
4819 Assert(uNewRip <= UINT32_MAX);
4820 Assert(pCtx->rip <= UINT32_MAX);
4821 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4822
4823 if (uNewRip > pCtx->cs.u32Limit)
4824 return iemRaiseGeneralProtectionFault0(pIemCpu);
4825 pCtx->rip = uNewRip;
4826 break;
4827 }
4828
4829 case IEMMODE_64BIT:
4830 {
4831 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4832
4833 if (!IEM_IS_CANONICAL(uNewRip))
4834 return iemRaiseGeneralProtectionFault0(pIemCpu);
4835 pCtx->rip = uNewRip;
4836 break;
4837 }
4838
4839 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4840 }
4841
4842 pCtx->eflags.Bits.u1RF = 0;
4843 return VINF_SUCCESS;
4844}
4845
4846
4847/**
4848 * Get the address of the top of the stack.
4849 *
4850 * @param pIemCpu The per CPU data.
4851 * @param pCtx The CPU context which SP/ESP/RSP should be
4852 * read.
4853 */
4854DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4855{
4856 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4857 return pCtx->rsp;
4858 if (pCtx->ss.Attr.n.u1DefBig)
4859 return pCtx->esp;
4860 return pCtx->sp;
4861}
4862
4863
4864/**
4865 * Updates the RIP/EIP/IP to point to the next instruction.
4866 *
4867 * This function leaves the EFLAGS.RF flag alone.
4868 *
4869 * @param pIemCpu The per CPU data.
4870 * @param cbInstr The number of bytes to add.
4871 */
4872IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4873{
4874 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4875 switch (pIemCpu->enmCpuMode)
4876 {
4877 case IEMMODE_16BIT:
4878 Assert(pCtx->rip <= UINT16_MAX);
4879 pCtx->eip += cbInstr;
4880 pCtx->eip &= UINT32_C(0xffff);
4881 break;
4882
4883 case IEMMODE_32BIT:
4884 pCtx->eip += cbInstr;
4885 Assert(pCtx->rip <= UINT32_MAX);
4886 break;
4887
4888 case IEMMODE_64BIT:
4889 pCtx->rip += cbInstr;
4890 break;
4891 default: AssertFailed();
4892 }
4893}
4894
4895
4896#if 0
4897/**
4898 * Updates the RIP/EIP/IP to point to the next instruction.
4899 *
4900 * @param pIemCpu The per CPU data.
4901 */
4902IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4903{
4904 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4905}
4906#endif
4907
4908
4909
4910/**
4911 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4912 *
4913 * @param pIemCpu The per CPU data.
4914 * @param cbInstr The number of bytes to add.
4915 */
4916IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4917{
4918 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4919
4920 pCtx->eflags.Bits.u1RF = 0;
4921
4922 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4923 switch (pIemCpu->enmCpuMode)
4924 {
4925 /** @todo investigate if EIP or RIP is really incremented. */
4926 case IEMMODE_16BIT:
4927 case IEMMODE_32BIT:
4928 pCtx->eip += cbInstr;
4929 Assert(pCtx->rip <= UINT32_MAX);
4930 break;
4931
4932 case IEMMODE_64BIT:
4933 pCtx->rip += cbInstr;
4934 break;
4935 default: AssertFailed();
4936 }
4937}
4938
4939
4940/**
4941 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4942 *
4943 * @param pIemCpu The per CPU data.
4944 */
4945IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4946{
4947 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4948}
4949
4950
4951/**
4952 * Adds to the stack pointer.
4953 *
4954 * @param pIemCpu The per CPU data.
4955 * @param pCtx The CPU context which SP/ESP/RSP should be
4956 * updated.
4957 * @param cbToAdd The number of bytes to add.
4958 */
4959DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4960{
4961 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4962 pCtx->rsp += cbToAdd;
4963 else if (pCtx->ss.Attr.n.u1DefBig)
4964 pCtx->esp += cbToAdd;
4965 else
4966 pCtx->sp += cbToAdd;
4967}
4968
4969
4970/**
4971 * Subtracts from the stack pointer.
4972 *
4973 * @param pIemCpu The per CPU data.
4974 * @param pCtx The CPU context which SP/ESP/RSP should be
4975 * updated.
4976 * @param cbToSub The number of bytes to subtract.
4977 */
4978DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4979{
4980 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4981 pCtx->rsp -= cbToSub;
4982 else if (pCtx->ss.Attr.n.u1DefBig)
4983 pCtx->esp -= cbToSub;
4984 else
4985 pCtx->sp -= cbToSub;
4986}
4987
4988
4989/**
4990 * Adds to the temporary stack pointer.
4991 *
4992 * @param pIemCpu The per CPU data.
4993 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4994 * @param cbToAdd The number of bytes to add.
4995 * @param pCtx Where to get the current stack mode.
4996 */
4997DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4998{
4999 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5000 pTmpRsp->u += cbToAdd;
5001 else if (pCtx->ss.Attr.n.u1DefBig)
5002 pTmpRsp->DWords.dw0 += cbToAdd;
5003 else
5004 pTmpRsp->Words.w0 += cbToAdd;
5005}
5006
5007
5008/**
5009 * Subtracts from the temporary stack pointer.
5010 *
5011 * @param pIemCpu The per CPU data.
5012 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5013 * @param cbToSub The number of bytes to subtract.
5014 * @param pCtx Where to get the current stack mode.
5015 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5016 * expecting that.
5017 */
5018DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5019{
5020 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5021 pTmpRsp->u -= cbToSub;
5022 else if (pCtx->ss.Attr.n.u1DefBig)
5023 pTmpRsp->DWords.dw0 -= cbToSub;
5024 else
5025 pTmpRsp->Words.w0 -= cbToSub;
5026}
5027
5028
5029/**
5030 * Calculates the effective stack address for a push of the specified size as
5031 * well as the new RSP value (upper bits may be masked).
5032 *
5033 * @returns Effective stack addressf for the push.
5034 * @param pIemCpu The IEM per CPU data.
5035 * @param pCtx Where to get the current stack mode.
5036 * @param cbItem The size of the stack item to pop.
5037 * @param puNewRsp Where to return the new RSP value.
5038 */
5039DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5040{
5041 RTUINT64U uTmpRsp;
5042 RTGCPTR GCPtrTop;
5043 uTmpRsp.u = pCtx->rsp;
5044
5045 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5046 GCPtrTop = uTmpRsp.u -= cbItem;
5047 else if (pCtx->ss.Attr.n.u1DefBig)
5048 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5049 else
5050 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5051 *puNewRsp = uTmpRsp.u;
5052 return GCPtrTop;
5053}
5054
5055
5056/**
5057 * Gets the current stack pointer and calculates the value after a pop of the
5058 * specified size.
5059 *
5060 * @returns Current stack pointer.
5061 * @param pIemCpu The per CPU data.
5062 * @param pCtx Where to get the current stack mode.
5063 * @param cbItem The size of the stack item to pop.
5064 * @param puNewRsp Where to return the new RSP value.
5065 */
5066DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5067{
5068 RTUINT64U uTmpRsp;
5069 RTGCPTR GCPtrTop;
5070 uTmpRsp.u = pCtx->rsp;
5071
5072 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5073 {
5074 GCPtrTop = uTmpRsp.u;
5075 uTmpRsp.u += cbItem;
5076 }
5077 else if (pCtx->ss.Attr.n.u1DefBig)
5078 {
5079 GCPtrTop = uTmpRsp.DWords.dw0;
5080 uTmpRsp.DWords.dw0 += cbItem;
5081 }
5082 else
5083 {
5084 GCPtrTop = uTmpRsp.Words.w0;
5085 uTmpRsp.Words.w0 += cbItem;
5086 }
5087 *puNewRsp = uTmpRsp.u;
5088 return GCPtrTop;
5089}
5090
5091
5092/**
5093 * Calculates the effective stack address for a push of the specified size as
5094 * well as the new temporary RSP value (upper bits may be masked).
5095 *
5096 * @returns Effective stack addressf for the push.
5097 * @param pIemCpu The per CPU data.
5098 * @param pCtx Where to get the current stack mode.
5099 * @param pTmpRsp The temporary stack pointer. This is updated.
5100 * @param cbItem The size of the stack item to pop.
5101 */
5102DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5103{
5104 RTGCPTR GCPtrTop;
5105
5106 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5107 GCPtrTop = pTmpRsp->u -= cbItem;
5108 else if (pCtx->ss.Attr.n.u1DefBig)
5109 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5110 else
5111 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5112 return GCPtrTop;
5113}
5114
5115
5116/**
5117 * Gets the effective stack address for a pop of the specified size and
5118 * calculates and updates the temporary RSP.
5119 *
5120 * @returns Current stack pointer.
5121 * @param pIemCpu The per CPU data.
5122 * @param pCtx Where to get the current stack mode.
5123 * @param pTmpRsp The temporary stack pointer. This is updated.
5124 * @param cbItem The size of the stack item to pop.
5125 */
5126DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5127{
5128 RTGCPTR GCPtrTop;
5129 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5130 {
5131 GCPtrTop = pTmpRsp->u;
5132 pTmpRsp->u += cbItem;
5133 }
5134 else if (pCtx->ss.Attr.n.u1DefBig)
5135 {
5136 GCPtrTop = pTmpRsp->DWords.dw0;
5137 pTmpRsp->DWords.dw0 += cbItem;
5138 }
5139 else
5140 {
5141 GCPtrTop = pTmpRsp->Words.w0;
5142 pTmpRsp->Words.w0 += cbItem;
5143 }
5144 return GCPtrTop;
5145}
5146
5147/** @} */
5148
5149
5150/** @name FPU access and helpers.
5151 *
5152 * @{
5153 */
5154
5155
5156/**
5157 * Hook for preparing to use the host FPU.
5158 *
5159 * This is necessary in ring-0 and raw-mode context.
5160 *
5161 * @param pIemCpu The IEM per CPU data.
5162 */
5163DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5164{
5165#ifdef IN_RING3
5166 NOREF(pIemCpu);
5167#else
5168/** @todo RZ: FIXME */
5169//# error "Implement me"
5170#endif
5171}
5172
5173
5174/**
5175 * Hook for preparing to use the host FPU for SSE
5176 *
5177 * This is necessary in ring-0 and raw-mode context.
5178 *
5179 * @param pIemCpu The IEM per CPU data.
5180 */
5181DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5182{
5183 iemFpuPrepareUsage(pIemCpu);
5184}
5185
5186
5187/**
5188 * Stores a QNaN value into a FPU register.
5189 *
5190 * @param pReg Pointer to the register.
5191 */
5192DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5193{
5194 pReg->au32[0] = UINT32_C(0x00000000);
5195 pReg->au32[1] = UINT32_C(0xc0000000);
5196 pReg->au16[4] = UINT16_C(0xffff);
5197}
5198
5199
5200/**
5201 * Updates the FOP, FPU.CS and FPUIP registers.
5202 *
5203 * @param pIemCpu The IEM per CPU data.
5204 * @param pCtx The CPU context.
5205 * @param pFpuCtx The FPU context.
5206 */
5207DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5208{
5209 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5210 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5211 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5212 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5213 {
5214 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5215 * happens in real mode here based on the fnsave and fnstenv images. */
5216 pFpuCtx->CS = 0;
5217 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5218 }
5219 else
5220 {
5221 pFpuCtx->CS = pCtx->cs.Sel;
5222 pFpuCtx->FPUIP = pCtx->rip;
5223 }
5224}
5225
5226
5227/**
5228 * Updates the x87.DS and FPUDP registers.
5229 *
5230 * @param pIemCpu The IEM per CPU data.
5231 * @param pCtx The CPU context.
5232 * @param pFpuCtx The FPU context.
5233 * @param iEffSeg The effective segment register.
5234 * @param GCPtrEff The effective address relative to @a iEffSeg.
5235 */
5236DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5237{
5238 RTSEL sel;
5239 switch (iEffSeg)
5240 {
5241 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5242 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5243 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5244 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5245 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5246 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5247 default:
5248 AssertMsgFailed(("%d\n", iEffSeg));
5249 sel = pCtx->ds.Sel;
5250 }
5251 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5252 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5253 {
5254 pFpuCtx->DS = 0;
5255 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5256 }
5257 else
5258 {
5259 pFpuCtx->DS = sel;
5260 pFpuCtx->FPUDP = GCPtrEff;
5261 }
5262}
5263
5264
5265/**
5266 * Rotates the stack registers in the push direction.
5267 *
5268 * @param pFpuCtx The FPU context.
5269 * @remarks This is a complete waste of time, but fxsave stores the registers in
5270 * stack order.
5271 */
5272DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5273{
5274 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5275 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5276 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5277 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5278 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5279 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5280 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5281 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5282 pFpuCtx->aRegs[0].r80 = r80Tmp;
5283}
5284
5285
5286/**
5287 * Rotates the stack registers in the pop direction.
5288 *
5289 * @param pFpuCtx The FPU context.
5290 * @remarks This is a complete waste of time, but fxsave stores the registers in
5291 * stack order.
5292 */
5293DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5294{
5295 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5296 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5297 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5298 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5299 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5300 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5301 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5302 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5303 pFpuCtx->aRegs[7].r80 = r80Tmp;
5304}
5305
5306
5307/**
5308 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5309 * exception prevents it.
5310 *
5311 * @param pIemCpu The IEM per CPU data.
5312 * @param pResult The FPU operation result to push.
5313 * @param pFpuCtx The FPU context.
5314 */
5315IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5316{
5317 /* Update FSW and bail if there are pending exceptions afterwards. */
5318 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5319 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5320 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5321 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5322 {
5323 pFpuCtx->FSW = fFsw;
5324 return;
5325 }
5326
5327 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5328 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5329 {
5330 /* All is fine, push the actual value. */
5331 pFpuCtx->FTW |= RT_BIT(iNewTop);
5332 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5333 }
5334 else if (pFpuCtx->FCW & X86_FCW_IM)
5335 {
5336 /* Masked stack overflow, push QNaN. */
5337 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5338 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5339 }
5340 else
5341 {
5342 /* Raise stack overflow, don't push anything. */
5343 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5344 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5345 return;
5346 }
5347
5348 fFsw &= ~X86_FSW_TOP_MASK;
5349 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5350 pFpuCtx->FSW = fFsw;
5351
5352 iemFpuRotateStackPush(pFpuCtx);
5353}
5354
5355
5356/**
5357 * Stores a result in a FPU register and updates the FSW and FTW.
5358 *
5359 * @param pFpuCtx The FPU context.
5360 * @param pResult The result to store.
5361 * @param iStReg Which FPU register to store it in.
5362 */
5363IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5364{
5365 Assert(iStReg < 8);
5366 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5367 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5368 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5369 pFpuCtx->FTW |= RT_BIT(iReg);
5370 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5371}
5372
5373
5374/**
5375 * Only updates the FPU status word (FSW) with the result of the current
5376 * instruction.
5377 *
5378 * @param pFpuCtx The FPU context.
5379 * @param u16FSW The FSW output of the current instruction.
5380 */
5381IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5382{
5383 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5384 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5385}
5386
5387
5388/**
5389 * Pops one item off the FPU stack if no pending exception prevents it.
5390 *
5391 * @param pFpuCtx The FPU context.
5392 */
5393IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5394{
5395 /* Check pending exceptions. */
5396 uint16_t uFSW = pFpuCtx->FSW;
5397 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5398 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5399 return;
5400
5401 /* TOP--. */
5402 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5403 uFSW &= ~X86_FSW_TOP_MASK;
5404 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5405 pFpuCtx->FSW = uFSW;
5406
5407 /* Mark the previous ST0 as empty. */
5408 iOldTop >>= X86_FSW_TOP_SHIFT;
5409 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5410
5411 /* Rotate the registers. */
5412 iemFpuRotateStackPop(pFpuCtx);
5413}
5414
5415
5416/**
5417 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5418 *
5419 * @param pIemCpu The IEM per CPU data.
5420 * @param pResult The FPU operation result to push.
5421 */
5422IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5423{
5424 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5425 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5426 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5427 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5428}
5429
5430
5431/**
5432 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5433 * and sets FPUDP and FPUDS.
5434 *
5435 * @param pIemCpu The IEM per CPU data.
5436 * @param pResult The FPU operation result to push.
5437 * @param iEffSeg The effective segment register.
5438 * @param GCPtrEff The effective address relative to @a iEffSeg.
5439 */
5440IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5441{
5442 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5443 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5444 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5445 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5446 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5447}
5448
5449
5450/**
5451 * Replace ST0 with the first value and push the second onto the FPU stack,
5452 * unless a pending exception prevents it.
5453 *
5454 * @param pIemCpu The IEM per CPU data.
5455 * @param pResult The FPU operation result to store and push.
5456 */
5457IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5458{
5459 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5460 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5461 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5462
5463 /* Update FSW and bail if there are pending exceptions afterwards. */
5464 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5465 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5466 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5467 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5468 {
5469 pFpuCtx->FSW = fFsw;
5470 return;
5471 }
5472
5473 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5474 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5475 {
5476 /* All is fine, push the actual value. */
5477 pFpuCtx->FTW |= RT_BIT(iNewTop);
5478 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5479 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5480 }
5481 else if (pFpuCtx->FCW & X86_FCW_IM)
5482 {
5483 /* Masked stack overflow, push QNaN. */
5484 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5485 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5486 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5487 }
5488 else
5489 {
5490 /* Raise stack overflow, don't push anything. */
5491 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5492 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5493 return;
5494 }
5495
5496 fFsw &= ~X86_FSW_TOP_MASK;
5497 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5498 pFpuCtx->FSW = fFsw;
5499
5500 iemFpuRotateStackPush(pFpuCtx);
5501}
5502
5503
5504/**
5505 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5506 * FOP.
5507 *
5508 * @param pIemCpu The IEM per CPU data.
5509 * @param pResult The result to store.
5510 * @param iStReg Which FPU register to store it in.
5511 */
5512IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5513{
5514 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5515 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5516 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5517 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5518}
5519
5520
5521/**
5522 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5523 * FOP, and then pops the stack.
5524 *
5525 * @param pIemCpu The IEM per CPU data.
5526 * @param pResult The result to store.
5527 * @param iStReg Which FPU register to store it in.
5528 */
5529IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5530{
5531 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5532 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5533 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5534 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5535 iemFpuMaybePopOne(pFpuCtx);
5536}
5537
5538
5539/**
5540 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5541 * FPUDP, and FPUDS.
5542 *
5543 * @param pIemCpu The IEM per CPU data.
5544 * @param pResult The result to store.
5545 * @param iStReg Which FPU register to store it in.
5546 * @param iEffSeg The effective memory operand selector register.
5547 * @param GCPtrEff The effective memory operand offset.
5548 */
5549IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5550 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5551{
5552 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5553 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5554 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5555 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5556 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5557}
5558
5559
5560/**
5561 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5562 * FPUDP, and FPUDS, and then pops the stack.
5563 *
5564 * @param pIemCpu The IEM per CPU data.
5565 * @param pResult The result to store.
5566 * @param iStReg Which FPU register to store it in.
5567 * @param iEffSeg The effective memory operand selector register.
5568 * @param GCPtrEff The effective memory operand offset.
5569 */
5570IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5571 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5572{
5573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5574 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5575 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5576 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5577 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5578 iemFpuMaybePopOne(pFpuCtx);
5579}
5580
5581
5582/**
5583 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5584 *
5585 * @param pIemCpu The IEM per CPU data.
5586 */
5587IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5588{
5589 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5590 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5591 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5592}
5593
5594
5595/**
5596 * Marks the specified stack register as free (for FFREE).
5597 *
5598 * @param pIemCpu The IEM per CPU data.
5599 * @param iStReg The register to free.
5600 */
5601IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5602{
5603 Assert(iStReg < 8);
5604 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5605 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5606 pFpuCtx->FTW &= ~RT_BIT(iReg);
5607}
5608
5609
5610/**
5611 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5612 *
5613 * @param pIemCpu The IEM per CPU data.
5614 */
5615IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5616{
5617 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5618 uint16_t uFsw = pFpuCtx->FSW;
5619 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5620 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5621 uFsw &= ~X86_FSW_TOP_MASK;
5622 uFsw |= uTop;
5623 pFpuCtx->FSW = uFsw;
5624}
5625
5626
5627/**
5628 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5629 *
5630 * @param pIemCpu The IEM per CPU data.
5631 */
5632IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5633{
5634 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5635 uint16_t uFsw = pFpuCtx->FSW;
5636 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5637 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5638 uFsw &= ~X86_FSW_TOP_MASK;
5639 uFsw |= uTop;
5640 pFpuCtx->FSW = uFsw;
5641}
5642
5643
5644/**
5645 * Updates the FSW, FOP, FPUIP, and FPUCS.
5646 *
5647 * @param pIemCpu The IEM per CPU data.
5648 * @param u16FSW The FSW from the current instruction.
5649 */
5650IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5651{
5652 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5653 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5654 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5655 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5656}
5657
5658
5659/**
5660 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5661 *
5662 * @param pIemCpu The IEM per CPU data.
5663 * @param u16FSW The FSW from the current instruction.
5664 */
5665IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5666{
5667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5668 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5669 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5670 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5671 iemFpuMaybePopOne(pFpuCtx);
5672}
5673
5674
5675/**
5676 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5677 *
5678 * @param pIemCpu The IEM per CPU data.
5679 * @param u16FSW The FSW from the current instruction.
5680 * @param iEffSeg The effective memory operand selector register.
5681 * @param GCPtrEff The effective memory operand offset.
5682 */
5683IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5684{
5685 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5686 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5687 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5688 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5689 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5690}
5691
5692
5693/**
5694 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5695 *
5696 * @param pIemCpu The IEM per CPU data.
5697 * @param u16FSW The FSW from the current instruction.
5698 */
5699IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5700{
5701 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5702 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5703 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5704 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5705 iemFpuMaybePopOne(pFpuCtx);
5706 iemFpuMaybePopOne(pFpuCtx);
5707}
5708
5709
5710/**
5711 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5712 *
5713 * @param pIemCpu The IEM per CPU data.
5714 * @param u16FSW The FSW from the current instruction.
5715 * @param iEffSeg The effective memory operand selector register.
5716 * @param GCPtrEff The effective memory operand offset.
5717 */
5718IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5719{
5720 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5721 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5722 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5723 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5724 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5725 iemFpuMaybePopOne(pFpuCtx);
5726}
5727
5728
5729/**
5730 * Worker routine for raising an FPU stack underflow exception.
5731 *
5732 * @param pIemCpu The IEM per CPU data.
5733 * @param pFpuCtx The FPU context.
5734 * @param iStReg The stack register being accessed.
5735 */
5736IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5737{
5738 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5739 if (pFpuCtx->FCW & X86_FCW_IM)
5740 {
5741 /* Masked underflow. */
5742 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5743 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5744 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5745 if (iStReg != UINT8_MAX)
5746 {
5747 pFpuCtx->FTW |= RT_BIT(iReg);
5748 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5749 }
5750 }
5751 else
5752 {
5753 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5754 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5755 }
5756}
5757
5758
5759/**
5760 * Raises a FPU stack underflow exception.
5761 *
5762 * @param pIemCpu The IEM per CPU data.
5763 * @param iStReg The destination register that should be loaded
5764 * with QNaN if \#IS is not masked. Specify
5765 * UINT8_MAX if none (like for fcom).
5766 */
5767DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5768{
5769 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5770 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5771 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5772 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5773}
5774
5775
5776DECL_NO_INLINE(IEM_STATIC, void)
5777iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5778{
5779 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5780 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5781 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5782 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5783 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5784}
5785
5786
5787DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5788{
5789 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5790 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5791 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5792 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5793 iemFpuMaybePopOne(pFpuCtx);
5794}
5795
5796
5797DECL_NO_INLINE(IEM_STATIC, void)
5798iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5799{
5800 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5801 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5802 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5803 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5804 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5805 iemFpuMaybePopOne(pFpuCtx);
5806}
5807
5808
5809DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5810{
5811 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5812 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5813 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5814 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5815 iemFpuMaybePopOne(pFpuCtx);
5816 iemFpuMaybePopOne(pFpuCtx);
5817}
5818
5819
5820DECL_NO_INLINE(IEM_STATIC, void)
5821iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5822{
5823 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5824 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5825 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5826
5827 if (pFpuCtx->FCW & X86_FCW_IM)
5828 {
5829 /* Masked overflow - Push QNaN. */
5830 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5831 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5832 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5833 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5834 pFpuCtx->FTW |= RT_BIT(iNewTop);
5835 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5836 iemFpuRotateStackPush(pFpuCtx);
5837 }
5838 else
5839 {
5840 /* Exception pending - don't change TOP or the register stack. */
5841 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5842 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5843 }
5844}
5845
5846
5847DECL_NO_INLINE(IEM_STATIC, void)
5848iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5849{
5850 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5851 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5852 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5853
5854 if (pFpuCtx->FCW & X86_FCW_IM)
5855 {
5856 /* Masked overflow - Push QNaN. */
5857 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5858 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5859 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5860 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5861 pFpuCtx->FTW |= RT_BIT(iNewTop);
5862 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5863 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5864 iemFpuRotateStackPush(pFpuCtx);
5865 }
5866 else
5867 {
5868 /* Exception pending - don't change TOP or the register stack. */
5869 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5870 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5871 }
5872}
5873
5874
5875/**
5876 * Worker routine for raising an FPU stack overflow exception on a push.
5877 *
5878 * @param pFpuCtx The FPU context.
5879 */
5880IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5881{
5882 if (pFpuCtx->FCW & X86_FCW_IM)
5883 {
5884 /* Masked overflow. */
5885 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5886 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5887 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5888 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5889 pFpuCtx->FTW |= RT_BIT(iNewTop);
5890 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5891 iemFpuRotateStackPush(pFpuCtx);
5892 }
5893 else
5894 {
5895 /* Exception pending - don't change TOP or the register stack. */
5896 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5897 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5898 }
5899}
5900
5901
5902/**
5903 * Raises a FPU stack overflow exception on a push.
5904 *
5905 * @param pIemCpu The IEM per CPU data.
5906 */
5907DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5908{
5909 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5910 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5911 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5912 iemFpuStackPushOverflowOnly(pFpuCtx);
5913}
5914
5915
5916/**
5917 * Raises a FPU stack overflow exception on a push with a memory operand.
5918 *
5919 * @param pIemCpu The IEM per CPU data.
5920 * @param iEffSeg The effective memory operand selector register.
5921 * @param GCPtrEff The effective memory operand offset.
5922 */
5923DECL_NO_INLINE(IEM_STATIC, void)
5924iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5925{
5926 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5927 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5928 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5929 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5930 iemFpuStackPushOverflowOnly(pFpuCtx);
5931}
5932
5933
5934IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5935{
5936 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5937 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5938 if (pFpuCtx->FTW & RT_BIT(iReg))
5939 return VINF_SUCCESS;
5940 return VERR_NOT_FOUND;
5941}
5942
5943
5944IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5945{
5946 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5947 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5948 if (pFpuCtx->FTW & RT_BIT(iReg))
5949 {
5950 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5951 return VINF_SUCCESS;
5952 }
5953 return VERR_NOT_FOUND;
5954}
5955
5956
5957IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5958 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5959{
5960 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5961 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5962 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5963 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5964 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5965 {
5966 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5967 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5968 return VINF_SUCCESS;
5969 }
5970 return VERR_NOT_FOUND;
5971}
5972
5973
5974IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5975{
5976 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5977 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5978 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5979 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5980 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5981 {
5982 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5983 return VINF_SUCCESS;
5984 }
5985 return VERR_NOT_FOUND;
5986}
5987
5988
5989/**
5990 * Updates the FPU exception status after FCW is changed.
5991 *
5992 * @param pFpuCtx The FPU context.
5993 */
5994IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5995{
5996 uint16_t u16Fsw = pFpuCtx->FSW;
5997 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5998 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5999 else
6000 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6001 pFpuCtx->FSW = u16Fsw;
6002}
6003
6004
6005/**
6006 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6007 *
6008 * @returns The full FTW.
6009 * @param pFpuCtx The FPU context.
6010 */
6011IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6012{
6013 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6014 uint16_t u16Ftw = 0;
6015 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6016 for (unsigned iSt = 0; iSt < 8; iSt++)
6017 {
6018 unsigned const iReg = (iSt + iTop) & 7;
6019 if (!(u8Ftw & RT_BIT(iReg)))
6020 u16Ftw |= 3 << (iReg * 2); /* empty */
6021 else
6022 {
6023 uint16_t uTag;
6024 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6025 if (pr80Reg->s.uExponent == 0x7fff)
6026 uTag = 2; /* Exponent is all 1's => Special. */
6027 else if (pr80Reg->s.uExponent == 0x0000)
6028 {
6029 if (pr80Reg->s.u64Mantissa == 0x0000)
6030 uTag = 1; /* All bits are zero => Zero. */
6031 else
6032 uTag = 2; /* Must be special. */
6033 }
6034 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6035 uTag = 0; /* Valid. */
6036 else
6037 uTag = 2; /* Must be special. */
6038
6039 u16Ftw |= uTag << (iReg * 2); /* empty */
6040 }
6041 }
6042
6043 return u16Ftw;
6044}
6045
6046
6047/**
6048 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6049 *
6050 * @returns The compressed FTW.
6051 * @param u16FullFtw The full FTW to convert.
6052 */
6053IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6054{
6055 uint8_t u8Ftw = 0;
6056 for (unsigned i = 0; i < 8; i++)
6057 {
6058 if ((u16FullFtw & 3) != 3 /*empty*/)
6059 u8Ftw |= RT_BIT(i);
6060 u16FullFtw >>= 2;
6061 }
6062
6063 return u8Ftw;
6064}
6065
6066/** @} */
6067
6068
6069/** @name Memory access.
6070 *
6071 * @{
6072 */
6073
6074
6075/**
6076 * Updates the IEMCPU::cbWritten counter if applicable.
6077 *
6078 * @param pIemCpu The IEM per CPU data.
6079 * @param fAccess The access being accounted for.
6080 * @param cbMem The access size.
6081 */
6082DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6083{
6084 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6085 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6086 pIemCpu->cbWritten += (uint32_t)cbMem;
6087}
6088
6089
6090/**
6091 * Checks if the given segment can be written to, raise the appropriate
6092 * exception if not.
6093 *
6094 * @returns VBox strict status code.
6095 *
6096 * @param pIemCpu The IEM per CPU data.
6097 * @param pHid Pointer to the hidden register.
6098 * @param iSegReg The register number.
6099 * @param pu64BaseAddr Where to return the base address to use for the
6100 * segment. (In 64-bit code it may differ from the
6101 * base in the hidden segment.)
6102 */
6103IEM_STATIC VBOXSTRICTRC
6104iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6105{
6106 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6107 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6108 else
6109 {
6110 if (!pHid->Attr.n.u1Present)
6111 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6112
6113 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6114 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6115 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6116 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6117 *pu64BaseAddr = pHid->u64Base;
6118 }
6119 return VINF_SUCCESS;
6120}
6121
6122
6123/**
6124 * Checks if the given segment can be read from, raise the appropriate
6125 * exception if not.
6126 *
6127 * @returns VBox strict status code.
6128 *
6129 * @param pIemCpu The IEM per CPU data.
6130 * @param pHid Pointer to the hidden register.
6131 * @param iSegReg The register number.
6132 * @param pu64BaseAddr Where to return the base address to use for the
6133 * segment. (In 64-bit code it may differ from the
6134 * base in the hidden segment.)
6135 */
6136IEM_STATIC VBOXSTRICTRC
6137iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6138{
6139 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6140 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6141 else
6142 {
6143 if (!pHid->Attr.n.u1Present)
6144 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6145
6146 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6147 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6148 *pu64BaseAddr = pHid->u64Base;
6149 }
6150 return VINF_SUCCESS;
6151}
6152
6153
6154/**
6155 * Applies the segment limit, base and attributes.
6156 *
6157 * This may raise a \#GP or \#SS.
6158 *
6159 * @returns VBox strict status code.
6160 *
6161 * @param pIemCpu The IEM per CPU data.
6162 * @param fAccess The kind of access which is being performed.
6163 * @param iSegReg The index of the segment register to apply.
6164 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6165 * TSS, ++).
6166 * @param cbMem The access size.
6167 * @param pGCPtrMem Pointer to the guest memory address to apply
6168 * segmentation to. Input and output parameter.
6169 */
6170IEM_STATIC VBOXSTRICTRC
6171iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6172{
6173 if (iSegReg == UINT8_MAX)
6174 return VINF_SUCCESS;
6175
6176 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6177 switch (pIemCpu->enmCpuMode)
6178 {
6179 case IEMMODE_16BIT:
6180 case IEMMODE_32BIT:
6181 {
6182 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6183 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6184
6185 Assert(pSel->Attr.n.u1Present);
6186 Assert(pSel->Attr.n.u1DescType);
6187 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6188 {
6189 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6190 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6191 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6192
6193 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6194 {
6195 /** @todo CPL check. */
6196 }
6197
6198 /*
6199 * There are two kinds of data selectors, normal and expand down.
6200 */
6201 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6202 {
6203 if ( GCPtrFirst32 > pSel->u32Limit
6204 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6205 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6206 }
6207 else
6208 {
6209 /*
6210 * The upper boundary is defined by the B bit, not the G bit!
6211 */
6212 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6213 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6214 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6215 }
6216 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6217 }
6218 else
6219 {
6220
6221 /*
6222 * Code selector and usually be used to read thru, writing is
6223 * only permitted in real and V8086 mode.
6224 */
6225 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6226 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6227 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6228 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6229 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6230
6231 if ( GCPtrFirst32 > pSel->u32Limit
6232 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6233 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6234
6235 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6236 {
6237 /** @todo CPL check. */
6238 }
6239
6240 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6241 }
6242 return VINF_SUCCESS;
6243 }
6244
6245 case IEMMODE_64BIT:
6246 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6247 *pGCPtrMem += pSel->u64Base;
6248 return VINF_SUCCESS;
6249
6250 default:
6251 AssertFailedReturn(VERR_IEM_IPE_7);
6252 }
6253}
6254
6255
6256/**
6257 * Translates a virtual address to a physical physical address and checks if we
6258 * can access the page as specified.
6259 *
6260 * @param pIemCpu The IEM per CPU data.
6261 * @param GCPtrMem The virtual address.
6262 * @param fAccess The intended access.
6263 * @param pGCPhysMem Where to return the physical address.
6264 */
6265IEM_STATIC VBOXSTRICTRC
6266iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6267{
6268 /** @todo Need a different PGM interface here. We're currently using
6269 * generic / REM interfaces. this won't cut it for R0 & RC. */
6270 RTGCPHYS GCPhys;
6271 uint64_t fFlags;
6272 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6273 if (RT_FAILURE(rc))
6274 {
6275 /** @todo Check unassigned memory in unpaged mode. */
6276 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6277 *pGCPhysMem = NIL_RTGCPHYS;
6278 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6279 }
6280
6281 /* If the page is writable and does not have the no-exec bit set, all
6282 access is allowed. Otherwise we'll have to check more carefully... */
6283 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6284 {
6285 /* Write to read only memory? */
6286 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6287 && !(fFlags & X86_PTE_RW)
6288 && ( pIemCpu->uCpl != 0
6289 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6290 {
6291 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6292 *pGCPhysMem = NIL_RTGCPHYS;
6293 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6294 }
6295
6296 /* Kernel memory accessed by userland? */
6297 if ( !(fFlags & X86_PTE_US)
6298 && pIemCpu->uCpl == 3
6299 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6300 {
6301 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6302 *pGCPhysMem = NIL_RTGCPHYS;
6303 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6304 }
6305
6306 /* Executing non-executable memory? */
6307 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6308 && (fFlags & X86_PTE_PAE_NX)
6309 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6310 {
6311 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6312 *pGCPhysMem = NIL_RTGCPHYS;
6313 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6314 VERR_ACCESS_DENIED);
6315 }
6316 }
6317
6318 /*
6319 * Set the dirty / access flags.
6320 * ASSUMES this is set when the address is translated rather than on committ...
6321 */
6322 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6323 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6324 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6325 {
6326 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6327 AssertRC(rc2);
6328 }
6329
6330 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6331 *pGCPhysMem = GCPhys;
6332 return VINF_SUCCESS;
6333}
6334
6335
6336
6337/**
6338 * Maps a physical page.
6339 *
6340 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6341 * @param pIemCpu The IEM per CPU data.
6342 * @param GCPhysMem The physical address.
6343 * @param fAccess The intended access.
6344 * @param ppvMem Where to return the mapping address.
6345 * @param pLock The PGM lock.
6346 */
6347IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6348{
6349#ifdef IEM_VERIFICATION_MODE_FULL
6350 /* Force the alternative path so we can ignore writes. */
6351 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6352 {
6353 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6354 {
6355 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6356 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6357 if (RT_FAILURE(rc2))
6358 pIemCpu->fProblematicMemory = true;
6359 }
6360 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6361 }
6362#endif
6363#ifdef IEM_LOG_MEMORY_WRITES
6364 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6365 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6366#endif
6367#ifdef IEM_VERIFICATION_MODE_MINIMAL
6368 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6369#endif
6370
6371 /** @todo This API may require some improving later. A private deal with PGM
6372 * regarding locking and unlocking needs to be struct. A couple of TLBs
6373 * living in PGM, but with publicly accessible inlined access methods
6374 * could perhaps be an even better solution. */
6375 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6376 GCPhysMem,
6377 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6378 pIemCpu->fBypassHandlers,
6379 ppvMem,
6380 pLock);
6381 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6382 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6383
6384#ifdef IEM_VERIFICATION_MODE_FULL
6385 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6386 pIemCpu->fProblematicMemory = true;
6387#endif
6388 return rc;
6389}
6390
6391
6392/**
6393 * Unmap a page previously mapped by iemMemPageMap.
6394 *
6395 * @param pIemCpu The IEM per CPU data.
6396 * @param GCPhysMem The physical address.
6397 * @param fAccess The intended access.
6398 * @param pvMem What iemMemPageMap returned.
6399 * @param pLock The PGM lock.
6400 */
6401DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6402{
6403 NOREF(pIemCpu);
6404 NOREF(GCPhysMem);
6405 NOREF(fAccess);
6406 NOREF(pvMem);
6407 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6408}
6409
6410
6411/**
6412 * Looks up a memory mapping entry.
6413 *
6414 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6415 * @param pIemCpu The IEM per CPU data.
6416 * @param pvMem The memory address.
6417 * @param fAccess The access to.
6418 */
6419DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6420{
6421 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6422 if ( pIemCpu->aMemMappings[0].pv == pvMem
6423 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6424 return 0;
6425 if ( pIemCpu->aMemMappings[1].pv == pvMem
6426 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6427 return 1;
6428 if ( pIemCpu->aMemMappings[2].pv == pvMem
6429 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6430 return 2;
6431 return VERR_NOT_FOUND;
6432}
6433
6434
6435/**
6436 * Finds a free memmap entry when using iNextMapping doesn't work.
6437 *
6438 * @returns Memory mapping index, 1024 on failure.
6439 * @param pIemCpu The IEM per CPU data.
6440 */
6441IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6442{
6443 /*
6444 * The easy case.
6445 */
6446 if (pIemCpu->cActiveMappings == 0)
6447 {
6448 pIemCpu->iNextMapping = 1;
6449 return 0;
6450 }
6451
6452 /* There should be enough mappings for all instructions. */
6453 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6454
6455 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6456 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6457 return i;
6458
6459 AssertFailedReturn(1024);
6460}
6461
6462
6463/**
6464 * Commits a bounce buffer that needs writing back and unmaps it.
6465 *
6466 * @returns Strict VBox status code.
6467 * @param pIemCpu The IEM per CPU data.
6468 * @param iMemMap The index of the buffer to commit.
6469 */
6470IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6471{
6472 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6473 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6474
6475 /*
6476 * Do the writing.
6477 */
6478#ifndef IEM_VERIFICATION_MODE_MINIMAL
6479 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6480 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6481 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6482 {
6483 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6484 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6485 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6486 if (!pIemCpu->fBypassHandlers)
6487 {
6488 /*
6489 * Carefully and efficiently dealing with access handler return
6490 * codes make this a little bloated.
6491 */
6492 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6493 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6494 pbBuf,
6495 cbFirst,
6496 PGMACCESSORIGIN_IEM);
6497 if (rcStrict == VINF_SUCCESS)
6498 {
6499 if (cbSecond)
6500 {
6501 rcStrict = PGMPhysWrite(pVM,
6502 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6503 pbBuf + cbFirst,
6504 cbSecond,
6505 PGMACCESSORIGIN_IEM);
6506 if (rcStrict == VINF_SUCCESS)
6507 { /* nothing */ }
6508 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6509 {
6510 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6511 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6512 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6513 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6514 }
6515 else
6516 {
6517 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6518 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6519 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6520 return rcStrict;
6521 }
6522 }
6523 }
6524 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6525 {
6526 if (!cbSecond)
6527 {
6528 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6529 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6530 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6531 }
6532 else
6533 {
6534 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6535 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6536 pbBuf + cbFirst,
6537 cbSecond,
6538 PGMACCESSORIGIN_IEM);
6539 if (rcStrict2 == VINF_SUCCESS)
6540 {
6541 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6542 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6543 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6544 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6545 }
6546 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6547 {
6548 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6549 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6550 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6551 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6552 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6553 }
6554 else
6555 {
6556 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6557 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6558 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6559 return rcStrict2;
6560 }
6561 }
6562 }
6563 else
6564 {
6565 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6566 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6567 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6568 return rcStrict;
6569 }
6570 }
6571 else
6572 {
6573 /*
6574 * No access handlers, much simpler.
6575 */
6576 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6577 if (RT_SUCCESS(rc))
6578 {
6579 if (cbSecond)
6580 {
6581 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6582 if (RT_SUCCESS(rc))
6583 { /* likely */ }
6584 else
6585 {
6586 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6587 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6588 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6589 return rc;
6590 }
6591 }
6592 }
6593 else
6594 {
6595 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6596 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6597 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6598 return rc;
6599 }
6600 }
6601 }
6602#endif
6603
6604#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6605 /*
6606 * Record the write(s).
6607 */
6608 if (!pIemCpu->fNoRem)
6609 {
6610 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6611 if (pEvtRec)
6612 {
6613 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6614 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6615 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6616 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6617 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6618 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6619 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6620 }
6621 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6622 {
6623 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6624 if (pEvtRec)
6625 {
6626 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6627 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6628 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6629 memcpy(pEvtRec->u.RamWrite.ab,
6630 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6631 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6632 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6633 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6634 }
6635 }
6636 }
6637#endif
6638#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6639 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6640 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6641 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6642 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6643 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6644 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6645
6646 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6647 g_cbIemWrote = cbWrote;
6648 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6649#endif
6650
6651 /*
6652 * Free the mapping entry.
6653 */
6654 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6655 Assert(pIemCpu->cActiveMappings != 0);
6656 pIemCpu->cActiveMappings--;
6657 return VINF_SUCCESS;
6658}
6659
6660
6661/**
6662 * iemMemMap worker that deals with a request crossing pages.
6663 */
6664IEM_STATIC VBOXSTRICTRC
6665iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6666{
6667 /*
6668 * Do the address translations.
6669 */
6670 RTGCPHYS GCPhysFirst;
6671 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6672 if (rcStrict != VINF_SUCCESS)
6673 return rcStrict;
6674
6675/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6676 * last byte. */
6677 RTGCPHYS GCPhysSecond;
6678 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6679 if (rcStrict != VINF_SUCCESS)
6680 return rcStrict;
6681 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6682
6683 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6684#ifdef IEM_VERIFICATION_MODE_FULL
6685 /*
6686 * Detect problematic memory when verifying so we can select
6687 * the right execution engine. (TLB: Redo this.)
6688 */
6689 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6690 {
6691 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6692 if (RT_SUCCESS(rc2))
6693 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6694 if (RT_FAILURE(rc2))
6695 pIemCpu->fProblematicMemory = true;
6696 }
6697#endif
6698
6699
6700 /*
6701 * Read in the current memory content if it's a read, execute or partial
6702 * write access.
6703 */
6704 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6705 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6706 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6707
6708 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6709 {
6710 if (!pIemCpu->fBypassHandlers)
6711 {
6712 /*
6713 * Must carefully deal with access handler status codes here,
6714 * makes the code a bit bloated.
6715 */
6716 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6717 if (rcStrict == VINF_SUCCESS)
6718 {
6719 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6720 if (rcStrict == VINF_SUCCESS)
6721 { /*likely */ }
6722 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6723 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6724 else
6725 {
6726 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6727 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6728 return rcStrict;
6729 }
6730 }
6731 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6732 {
6733 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6734 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6735 {
6736 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6737 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6738 }
6739 else
6740 {
6741 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6742 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6743 return rcStrict2;
6744 }
6745 }
6746 else
6747 {
6748 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6749 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6750 return rcStrict;
6751 }
6752 }
6753 else
6754 {
6755 /*
6756 * No informational status codes here, much more straight forward.
6757 */
6758 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6759 if (RT_SUCCESS(rc))
6760 {
6761 Assert(rc == VINF_SUCCESS);
6762 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6763 if (RT_SUCCESS(rc))
6764 Assert(rc == VINF_SUCCESS);
6765 else
6766 {
6767 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6768 return rc;
6769 }
6770 }
6771 else
6772 {
6773 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6774 return rc;
6775 }
6776 }
6777
6778#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6779 if ( !pIemCpu->fNoRem
6780 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6781 {
6782 /*
6783 * Record the reads.
6784 */
6785 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6786 if (pEvtRec)
6787 {
6788 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6789 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6790 pEvtRec->u.RamRead.cb = cbFirstPage;
6791 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6792 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6793 }
6794 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6795 if (pEvtRec)
6796 {
6797 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6798 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6799 pEvtRec->u.RamRead.cb = cbSecondPage;
6800 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6801 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6802 }
6803 }
6804#endif
6805 }
6806#ifdef VBOX_STRICT
6807 else
6808 memset(pbBuf, 0xcc, cbMem);
6809 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6810 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6811#endif
6812
6813 /*
6814 * Commit the bounce buffer entry.
6815 */
6816 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6817 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6818 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6819 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6820 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6821 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6822 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6823 pIemCpu->iNextMapping = iMemMap + 1;
6824 pIemCpu->cActiveMappings++;
6825
6826 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6827 *ppvMem = pbBuf;
6828 return VINF_SUCCESS;
6829}
6830
6831
6832/**
6833 * iemMemMap woker that deals with iemMemPageMap failures.
6834 */
6835IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6836 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6837{
6838 /*
6839 * Filter out conditions we can handle and the ones which shouldn't happen.
6840 */
6841 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6842 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6843 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6844 {
6845 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6846 return rcMap;
6847 }
6848 pIemCpu->cPotentialExits++;
6849
6850 /*
6851 * Read in the current memory content if it's a read, execute or partial
6852 * write access.
6853 */
6854 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6855 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6856 {
6857 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6858 memset(pbBuf, 0xff, cbMem);
6859 else
6860 {
6861 int rc;
6862 if (!pIemCpu->fBypassHandlers)
6863 {
6864 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6865 if (rcStrict == VINF_SUCCESS)
6866 { /* nothing */ }
6867 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6868 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6869 else
6870 {
6871 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6872 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6873 return rcStrict;
6874 }
6875 }
6876 else
6877 {
6878 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6879 if (RT_SUCCESS(rc))
6880 { /* likely */ }
6881 else
6882 {
6883 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6884 GCPhysFirst, rc));
6885 return rc;
6886 }
6887 }
6888 }
6889
6890#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6891 if ( !pIemCpu->fNoRem
6892 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6893 {
6894 /*
6895 * Record the read.
6896 */
6897 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6898 if (pEvtRec)
6899 {
6900 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6901 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6902 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6903 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6904 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6905 }
6906 }
6907#endif
6908 }
6909#ifdef VBOX_STRICT
6910 else
6911 memset(pbBuf, 0xcc, cbMem);
6912#endif
6913#ifdef VBOX_STRICT
6914 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6915 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6916#endif
6917
6918 /*
6919 * Commit the bounce buffer entry.
6920 */
6921 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6922 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6923 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6924 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6925 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6926 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6927 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6928 pIemCpu->iNextMapping = iMemMap + 1;
6929 pIemCpu->cActiveMappings++;
6930
6931 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6932 *ppvMem = pbBuf;
6933 return VINF_SUCCESS;
6934}
6935
6936
6937
6938/**
6939 * Maps the specified guest memory for the given kind of access.
6940 *
6941 * This may be using bounce buffering of the memory if it's crossing a page
6942 * boundary or if there is an access handler installed for any of it. Because
6943 * of lock prefix guarantees, we're in for some extra clutter when this
6944 * happens.
6945 *
6946 * This may raise a \#GP, \#SS, \#PF or \#AC.
6947 *
6948 * @returns VBox strict status code.
6949 *
6950 * @param pIemCpu The IEM per CPU data.
6951 * @param ppvMem Where to return the pointer to the mapped
6952 * memory.
6953 * @param cbMem The number of bytes to map. This is usually 1,
6954 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6955 * string operations it can be up to a page.
6956 * @param iSegReg The index of the segment register to use for
6957 * this access. The base and limits are checked.
6958 * Use UINT8_MAX to indicate that no segmentation
6959 * is required (for IDT, GDT and LDT accesses).
6960 * @param GCPtrMem The address of the guest memory.
6961 * @param fAccess How the memory is being accessed. The
6962 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6963 * how to map the memory, while the
6964 * IEM_ACCESS_WHAT_XXX bit is used when raising
6965 * exceptions.
6966 */
6967IEM_STATIC VBOXSTRICTRC
6968iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6969{
6970 /*
6971 * Check the input and figure out which mapping entry to use.
6972 */
6973 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6974 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6975
6976 unsigned iMemMap = pIemCpu->iNextMapping;
6977 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6978 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6979 {
6980 iMemMap = iemMemMapFindFree(pIemCpu);
6981 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
6982 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
6983 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
6984 pIemCpu->aMemMappings[2].fAccess),
6985 VERR_IEM_IPE_9);
6986 }
6987
6988 /*
6989 * Map the memory, checking that we can actually access it. If something
6990 * slightly complicated happens, fall back on bounce buffering.
6991 */
6992 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6993 if (rcStrict != VINF_SUCCESS)
6994 return rcStrict;
6995
6996 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6997 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6998
6999 RTGCPHYS GCPhysFirst;
7000 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7001 if (rcStrict != VINF_SUCCESS)
7002 return rcStrict;
7003
7004 void *pvMem;
7005 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7006 if (rcStrict != VINF_SUCCESS)
7007 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7008
7009 /*
7010 * Fill in the mapping table entry.
7011 */
7012 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7013 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7014 pIemCpu->iNextMapping = iMemMap + 1;
7015 pIemCpu->cActiveMappings++;
7016
7017 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7018 *ppvMem = pvMem;
7019 return VINF_SUCCESS;
7020}
7021
7022
7023/**
7024 * Commits the guest memory if bounce buffered and unmaps it.
7025 *
7026 * @returns Strict VBox status code.
7027 * @param pIemCpu The IEM per CPU data.
7028 * @param pvMem The mapping.
7029 * @param fAccess The kind of access.
7030 */
7031IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7032{
7033 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7034 AssertReturn(iMemMap >= 0, iMemMap);
7035
7036 /* If it's bounce buffered, we may need to write back the buffer. */
7037 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7038 {
7039 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7040 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7041 }
7042 /* Otherwise unlock it. */
7043 else
7044 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7045
7046 /* Free the entry. */
7047 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7048 Assert(pIemCpu->cActiveMappings != 0);
7049 pIemCpu->cActiveMappings--;
7050 return VINF_SUCCESS;
7051}
7052
7053
7054/**
7055 * Rollbacks mappings, releasing page locks and such.
7056 *
7057 * The caller shall only call this after checking cActiveMappings.
7058 *
7059 * @returns Strict VBox status code to pass up.
7060 * @param pIemCpu The IEM per CPU data.
7061 */
7062IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7063{
7064 Assert(pIemCpu->cActiveMappings > 0);
7065
7066 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7067 while (iMemMap-- > 0)
7068 {
7069 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7070 if (fAccess != IEM_ACCESS_INVALID)
7071 {
7072 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7073 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7074 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7075 Assert(pIemCpu->cActiveMappings > 0);
7076 pIemCpu->cActiveMappings--;
7077 }
7078 }
7079}
7080
7081
7082/**
7083 * Fetches a data byte.
7084 *
7085 * @returns Strict VBox status code.
7086 * @param pIemCpu The IEM per CPU data.
7087 * @param pu8Dst Where to return the byte.
7088 * @param iSegReg The index of the segment register to use for
7089 * this access. The base and limits are checked.
7090 * @param GCPtrMem The address of the guest memory.
7091 */
7092IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7093{
7094 /* The lazy approach for now... */
7095 uint8_t const *pu8Src;
7096 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7097 if (rc == VINF_SUCCESS)
7098 {
7099 *pu8Dst = *pu8Src;
7100 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7101 }
7102 return rc;
7103}
7104
7105
7106/**
7107 * Fetches a data word.
7108 *
7109 * @returns Strict VBox status code.
7110 * @param pIemCpu The IEM per CPU data.
7111 * @param pu16Dst Where to return the word.
7112 * @param iSegReg The index of the segment register to use for
7113 * this access. The base and limits are checked.
7114 * @param GCPtrMem The address of the guest memory.
7115 */
7116IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7117{
7118 /* The lazy approach for now... */
7119 uint16_t const *pu16Src;
7120 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7121 if (rc == VINF_SUCCESS)
7122 {
7123 *pu16Dst = *pu16Src;
7124 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7125 }
7126 return rc;
7127}
7128
7129
7130/**
7131 * Fetches a data dword.
7132 *
7133 * @returns Strict VBox status code.
7134 * @param pIemCpu The IEM per CPU data.
7135 * @param pu32Dst Where to return the dword.
7136 * @param iSegReg The index of the segment register to use for
7137 * this access. The base and limits are checked.
7138 * @param GCPtrMem The address of the guest memory.
7139 */
7140IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7141{
7142 /* The lazy approach for now... */
7143 uint32_t const *pu32Src;
7144 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7145 if (rc == VINF_SUCCESS)
7146 {
7147 *pu32Dst = *pu32Src;
7148 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7149 }
7150 return rc;
7151}
7152
7153
7154#ifdef SOME_UNUSED_FUNCTION
7155/**
7156 * Fetches a data dword and sign extends it to a qword.
7157 *
7158 * @returns Strict VBox status code.
7159 * @param pIemCpu The IEM per CPU data.
7160 * @param pu64Dst Where to return the sign extended value.
7161 * @param iSegReg The index of the segment register to use for
7162 * this access. The base and limits are checked.
7163 * @param GCPtrMem The address of the guest memory.
7164 */
7165IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7166{
7167 /* The lazy approach for now... */
7168 int32_t const *pi32Src;
7169 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7170 if (rc == VINF_SUCCESS)
7171 {
7172 *pu64Dst = *pi32Src;
7173 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7174 }
7175#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7176 else
7177 *pu64Dst = 0;
7178#endif
7179 return rc;
7180}
7181#endif
7182
7183
7184/**
7185 * Fetches a data qword.
7186 *
7187 * @returns Strict VBox status code.
7188 * @param pIemCpu The IEM per CPU data.
7189 * @param pu64Dst Where to return the qword.
7190 * @param iSegReg The index of the segment register to use for
7191 * this access. The base and limits are checked.
7192 * @param GCPtrMem The address of the guest memory.
7193 */
7194IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7195{
7196 /* The lazy approach for now... */
7197 uint64_t const *pu64Src;
7198 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7199 if (rc == VINF_SUCCESS)
7200 {
7201 *pu64Dst = *pu64Src;
7202 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7203 }
7204 return rc;
7205}
7206
7207
7208/**
7209 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7210 *
7211 * @returns Strict VBox status code.
7212 * @param pIemCpu The IEM per CPU data.
7213 * @param pu64Dst Where to return the qword.
7214 * @param iSegReg The index of the segment register to use for
7215 * this access. The base and limits are checked.
7216 * @param GCPtrMem The address of the guest memory.
7217 */
7218IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7219{
7220 /* The lazy approach for now... */
7221 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7222 if (RT_UNLIKELY(GCPtrMem & 15))
7223 return iemRaiseGeneralProtectionFault0(pIemCpu);
7224
7225 uint64_t const *pu64Src;
7226 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7227 if (rc == VINF_SUCCESS)
7228 {
7229 *pu64Dst = *pu64Src;
7230 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7231 }
7232 return rc;
7233}
7234
7235
7236/**
7237 * Fetches a data tword.
7238 *
7239 * @returns Strict VBox status code.
7240 * @param pIemCpu The IEM per CPU data.
7241 * @param pr80Dst Where to return the tword.
7242 * @param iSegReg The index of the segment register to use for
7243 * this access. The base and limits are checked.
7244 * @param GCPtrMem The address of the guest memory.
7245 */
7246IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7247{
7248 /* The lazy approach for now... */
7249 PCRTFLOAT80U pr80Src;
7250 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7251 if (rc == VINF_SUCCESS)
7252 {
7253 *pr80Dst = *pr80Src;
7254 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7255 }
7256 return rc;
7257}
7258
7259
7260/**
7261 * Fetches a data dqword (double qword), generally SSE related.
7262 *
7263 * @returns Strict VBox status code.
7264 * @param pIemCpu The IEM per CPU data.
7265 * @param pu128Dst Where to return the qword.
7266 * @param iSegReg The index of the segment register to use for
7267 * this access. The base and limits are checked.
7268 * @param GCPtrMem The address of the guest memory.
7269 */
7270IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7271{
7272 /* The lazy approach for now... */
7273 uint128_t const *pu128Src;
7274 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7275 if (rc == VINF_SUCCESS)
7276 {
7277 *pu128Dst = *pu128Src;
7278 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7279 }
7280 return rc;
7281}
7282
7283
7284/**
7285 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7286 * related.
7287 *
7288 * Raises \#GP(0) if not aligned.
7289 *
7290 * @returns Strict VBox status code.
7291 * @param pIemCpu The IEM per CPU data.
7292 * @param pu128Dst Where to return the qword.
7293 * @param iSegReg The index of the segment register to use for
7294 * this access. The base and limits are checked.
7295 * @param GCPtrMem The address of the guest memory.
7296 */
7297IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7298{
7299 /* The lazy approach for now... */
7300 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7301 if ( (GCPtrMem & 15)
7302 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7303 return iemRaiseGeneralProtectionFault0(pIemCpu);
7304
7305 uint128_t const *pu128Src;
7306 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7307 if (rc == VINF_SUCCESS)
7308 {
7309 *pu128Dst = *pu128Src;
7310 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7311 }
7312 return rc;
7313}
7314
7315
7316
7317
7318/**
7319 * Fetches a descriptor register (lgdt, lidt).
7320 *
7321 * @returns Strict VBox status code.
7322 * @param pIemCpu The IEM per CPU data.
7323 * @param pcbLimit Where to return the limit.
7324 * @param pGCPtrBase Where to return the base.
7325 * @param iSegReg The index of the segment register to use for
7326 * this access. The base and limits are checked.
7327 * @param GCPtrMem The address of the guest memory.
7328 * @param enmOpSize The effective operand size.
7329 */
7330IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7331 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7332{
7333 uint8_t const *pu8Src;
7334 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7335 (void **)&pu8Src,
7336 enmOpSize == IEMMODE_64BIT
7337 ? 2 + 8
7338 : enmOpSize == IEMMODE_32BIT
7339 ? 2 + 4
7340 : 2 + 3,
7341 iSegReg,
7342 GCPtrMem,
7343 IEM_ACCESS_DATA_R);
7344 if (rcStrict == VINF_SUCCESS)
7345 {
7346 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7347 switch (enmOpSize)
7348 {
7349 case IEMMODE_16BIT:
7350 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7351 break;
7352 case IEMMODE_32BIT:
7353 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7354 break;
7355 case IEMMODE_64BIT:
7356 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7357 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7358 break;
7359
7360 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7361 }
7362 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7363 }
7364 return rcStrict;
7365}
7366
7367
7368
7369/**
7370 * Stores a data byte.
7371 *
7372 * @returns Strict VBox status code.
7373 * @param pIemCpu The IEM per CPU data.
7374 * @param iSegReg The index of the segment register to use for
7375 * this access. The base and limits are checked.
7376 * @param GCPtrMem The address of the guest memory.
7377 * @param u8Value The value to store.
7378 */
7379IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7380{
7381 /* The lazy approach for now... */
7382 uint8_t *pu8Dst;
7383 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7384 if (rc == VINF_SUCCESS)
7385 {
7386 *pu8Dst = u8Value;
7387 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7388 }
7389 return rc;
7390}
7391
7392
7393/**
7394 * Stores a data word.
7395 *
7396 * @returns Strict VBox status code.
7397 * @param pIemCpu The IEM per CPU data.
7398 * @param iSegReg The index of the segment register to use for
7399 * this access. The base and limits are checked.
7400 * @param GCPtrMem The address of the guest memory.
7401 * @param u16Value The value to store.
7402 */
7403IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7404{
7405 /* The lazy approach for now... */
7406 uint16_t *pu16Dst;
7407 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7408 if (rc == VINF_SUCCESS)
7409 {
7410 *pu16Dst = u16Value;
7411 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7412 }
7413 return rc;
7414}
7415
7416
7417/**
7418 * Stores a data dword.
7419 *
7420 * @returns Strict VBox status code.
7421 * @param pIemCpu The IEM per CPU data.
7422 * @param iSegReg The index of the segment register to use for
7423 * this access. The base and limits are checked.
7424 * @param GCPtrMem The address of the guest memory.
7425 * @param u32Value The value to store.
7426 */
7427IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7428{
7429 /* The lazy approach for now... */
7430 uint32_t *pu32Dst;
7431 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7432 if (rc == VINF_SUCCESS)
7433 {
7434 *pu32Dst = u32Value;
7435 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7436 }
7437 return rc;
7438}
7439
7440
7441/**
7442 * Stores a data qword.
7443 *
7444 * @returns Strict VBox status code.
7445 * @param pIemCpu The IEM per CPU data.
7446 * @param iSegReg The index of the segment register to use for
7447 * this access. The base and limits are checked.
7448 * @param GCPtrMem The address of the guest memory.
7449 * @param u64Value The value to store.
7450 */
7451IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7452{
7453 /* The lazy approach for now... */
7454 uint64_t *pu64Dst;
7455 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7456 if (rc == VINF_SUCCESS)
7457 {
7458 *pu64Dst = u64Value;
7459 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7460 }
7461 return rc;
7462}
7463
7464
7465/**
7466 * Stores a data dqword.
7467 *
7468 * @returns Strict VBox status code.
7469 * @param pIemCpu The IEM per CPU data.
7470 * @param iSegReg The index of the segment register to use for
7471 * this access. The base and limits are checked.
7472 * @param GCPtrMem The address of the guest memory.
7473 * @param u128Value The value to store.
7474 */
7475IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7476{
7477 /* The lazy approach for now... */
7478 uint128_t *pu128Dst;
7479 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7480 if (rc == VINF_SUCCESS)
7481 {
7482 *pu128Dst = u128Value;
7483 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7484 }
7485 return rc;
7486}
7487
7488
7489/**
7490 * Stores a data dqword, SSE aligned.
7491 *
7492 * @returns Strict VBox status code.
7493 * @param pIemCpu The IEM per CPU data.
7494 * @param iSegReg The index of the segment register to use for
7495 * this access. The base and limits are checked.
7496 * @param GCPtrMem The address of the guest memory.
7497 * @param u128Value The value to store.
7498 */
7499IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7500{
7501 /* The lazy approach for now... */
7502 if ( (GCPtrMem & 15)
7503 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7504 return iemRaiseGeneralProtectionFault0(pIemCpu);
7505
7506 uint128_t *pu128Dst;
7507 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7508 if (rc == VINF_SUCCESS)
7509 {
7510 *pu128Dst = u128Value;
7511 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7512 }
7513 return rc;
7514}
7515
7516
7517/**
7518 * Stores a descriptor register (sgdt, sidt).
7519 *
7520 * @returns Strict VBox status code.
7521 * @param pIemCpu The IEM per CPU data.
7522 * @param cbLimit The limit.
7523 * @param GCPtrBase The base address.
7524 * @param iSegReg The index of the segment register to use for
7525 * this access. The base and limits are checked.
7526 * @param GCPtrMem The address of the guest memory.
7527 * @param enmOpSize The effective operand size.
7528 */
7529IEM_STATIC VBOXSTRICTRC
7530iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7531{
7532 uint8_t *pu8Src;
7533 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7534 (void **)&pu8Src,
7535 enmOpSize == IEMMODE_64BIT
7536 ? 2 + 8
7537 : enmOpSize == IEMMODE_32BIT
7538 ? 2 + 4
7539 : 2 + 3,
7540 iSegReg,
7541 GCPtrMem,
7542 IEM_ACCESS_DATA_W);
7543 if (rcStrict == VINF_SUCCESS)
7544 {
7545 pu8Src[0] = RT_BYTE1(cbLimit);
7546 pu8Src[1] = RT_BYTE2(cbLimit);
7547 pu8Src[2] = RT_BYTE1(GCPtrBase);
7548 pu8Src[3] = RT_BYTE2(GCPtrBase);
7549 pu8Src[4] = RT_BYTE3(GCPtrBase);
7550 if (enmOpSize == IEMMODE_16BIT)
7551 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7552 else
7553 {
7554 pu8Src[5] = RT_BYTE4(GCPtrBase);
7555 if (enmOpSize == IEMMODE_64BIT)
7556 {
7557 pu8Src[6] = RT_BYTE5(GCPtrBase);
7558 pu8Src[7] = RT_BYTE6(GCPtrBase);
7559 pu8Src[8] = RT_BYTE7(GCPtrBase);
7560 pu8Src[9] = RT_BYTE8(GCPtrBase);
7561 }
7562 }
7563 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7564 }
7565 return rcStrict;
7566}
7567
7568
7569/**
7570 * Pushes a word onto the stack.
7571 *
7572 * @returns Strict VBox status code.
7573 * @param pIemCpu The IEM per CPU data.
7574 * @param u16Value The value to push.
7575 */
7576IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7577{
7578 /* Increment the stack pointer. */
7579 uint64_t uNewRsp;
7580 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7581 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7582
7583 /* Write the word the lazy way. */
7584 uint16_t *pu16Dst;
7585 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7586 if (rc == VINF_SUCCESS)
7587 {
7588 *pu16Dst = u16Value;
7589 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7590 }
7591
7592 /* Commit the new RSP value unless we an access handler made trouble. */
7593 if (rc == VINF_SUCCESS)
7594 pCtx->rsp = uNewRsp;
7595
7596 return rc;
7597}
7598
7599
7600/**
7601 * Pushes a dword onto the stack.
7602 *
7603 * @returns Strict VBox status code.
7604 * @param pIemCpu The IEM per CPU data.
7605 * @param u32Value The value to push.
7606 */
7607IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7608{
7609 /* Increment the stack pointer. */
7610 uint64_t uNewRsp;
7611 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7612 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7613
7614 /* Write the dword the lazy way. */
7615 uint32_t *pu32Dst;
7616 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7617 if (rc == VINF_SUCCESS)
7618 {
7619 *pu32Dst = u32Value;
7620 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7621 }
7622
7623 /* Commit the new RSP value unless we an access handler made trouble. */
7624 if (rc == VINF_SUCCESS)
7625 pCtx->rsp = uNewRsp;
7626
7627 return rc;
7628}
7629
7630
7631/**
7632 * Pushes a dword segment register value onto the stack.
7633 *
7634 * @returns Strict VBox status code.
7635 * @param pIemCpu The IEM per CPU data.
7636 * @param u32Value The value to push.
7637 */
7638IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7639{
7640 /* Increment the stack pointer. */
7641 uint64_t uNewRsp;
7642 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7643 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7644
7645 VBOXSTRICTRC rc;
7646 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7647 {
7648 /* The recompiler writes a full dword. */
7649 uint32_t *pu32Dst;
7650 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7651 if (rc == VINF_SUCCESS)
7652 {
7653 *pu32Dst = u32Value;
7654 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7655 }
7656 }
7657 else
7658 {
7659 /* The intel docs talks about zero extending the selector register
7660 value. My actual intel CPU here might be zero extending the value
7661 but it still only writes the lower word... */
7662 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7663 * happens when crossing an electric page boundrary, is the high word checked
7664 * for write accessibility or not? Probably it is. What about segment limits?
7665 * It appears this behavior is also shared with trap error codes.
7666 *
7667 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7668 * ancient hardware when it actually did change. */
7669 uint16_t *pu16Dst;
7670 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7671 if (rc == VINF_SUCCESS)
7672 {
7673 *pu16Dst = (uint16_t)u32Value;
7674 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7675 }
7676 }
7677
7678 /* Commit the new RSP value unless we an access handler made trouble. */
7679 if (rc == VINF_SUCCESS)
7680 pCtx->rsp = uNewRsp;
7681
7682 return rc;
7683}
7684
7685
7686/**
7687 * Pushes a qword onto the stack.
7688 *
7689 * @returns Strict VBox status code.
7690 * @param pIemCpu The IEM per CPU data.
7691 * @param u64Value The value to push.
7692 */
7693IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7694{
7695 /* Increment the stack pointer. */
7696 uint64_t uNewRsp;
7697 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7698 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7699
7700 /* Write the word the lazy way. */
7701 uint64_t *pu64Dst;
7702 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7703 if (rc == VINF_SUCCESS)
7704 {
7705 *pu64Dst = u64Value;
7706 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7707 }
7708
7709 /* Commit the new RSP value unless we an access handler made trouble. */
7710 if (rc == VINF_SUCCESS)
7711 pCtx->rsp = uNewRsp;
7712
7713 return rc;
7714}
7715
7716
7717/**
7718 * Pops a word from the stack.
7719 *
7720 * @returns Strict VBox status code.
7721 * @param pIemCpu The IEM per CPU data.
7722 * @param pu16Value Where to store the popped value.
7723 */
7724IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7725{
7726 /* Increment the stack pointer. */
7727 uint64_t uNewRsp;
7728 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7729 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7730
7731 /* Write the word the lazy way. */
7732 uint16_t const *pu16Src;
7733 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7734 if (rc == VINF_SUCCESS)
7735 {
7736 *pu16Value = *pu16Src;
7737 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7738
7739 /* Commit the new RSP value. */
7740 if (rc == VINF_SUCCESS)
7741 pCtx->rsp = uNewRsp;
7742 }
7743
7744 return rc;
7745}
7746
7747
7748/**
7749 * Pops a dword from the stack.
7750 *
7751 * @returns Strict VBox status code.
7752 * @param pIemCpu The IEM per CPU data.
7753 * @param pu32Value Where to store the popped value.
7754 */
7755IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7756{
7757 /* Increment the stack pointer. */
7758 uint64_t uNewRsp;
7759 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7760 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7761
7762 /* Write the word the lazy way. */
7763 uint32_t const *pu32Src;
7764 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7765 if (rc == VINF_SUCCESS)
7766 {
7767 *pu32Value = *pu32Src;
7768 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7769
7770 /* Commit the new RSP value. */
7771 if (rc == VINF_SUCCESS)
7772 pCtx->rsp = uNewRsp;
7773 }
7774
7775 return rc;
7776}
7777
7778
7779/**
7780 * Pops a qword from the stack.
7781 *
7782 * @returns Strict VBox status code.
7783 * @param pIemCpu The IEM per CPU data.
7784 * @param pu64Value Where to store the popped value.
7785 */
7786IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7787{
7788 /* Increment the stack pointer. */
7789 uint64_t uNewRsp;
7790 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7791 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7792
7793 /* Write the word the lazy way. */
7794 uint64_t const *pu64Src;
7795 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7796 if (rc == VINF_SUCCESS)
7797 {
7798 *pu64Value = *pu64Src;
7799 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7800
7801 /* Commit the new RSP value. */
7802 if (rc == VINF_SUCCESS)
7803 pCtx->rsp = uNewRsp;
7804 }
7805
7806 return rc;
7807}
7808
7809
7810/**
7811 * Pushes a word onto the stack, using a temporary stack pointer.
7812 *
7813 * @returns Strict VBox status code.
7814 * @param pIemCpu The IEM per CPU data.
7815 * @param u16Value The value to push.
7816 * @param pTmpRsp Pointer to the temporary stack pointer.
7817 */
7818IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7819{
7820 /* Increment the stack pointer. */
7821 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7822 RTUINT64U NewRsp = *pTmpRsp;
7823 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7824
7825 /* Write the word the lazy way. */
7826 uint16_t *pu16Dst;
7827 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7828 if (rc == VINF_SUCCESS)
7829 {
7830 *pu16Dst = u16Value;
7831 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7832 }
7833
7834 /* Commit the new RSP value unless we an access handler made trouble. */
7835 if (rc == VINF_SUCCESS)
7836 *pTmpRsp = NewRsp;
7837
7838 return rc;
7839}
7840
7841
7842/**
7843 * Pushes a dword onto the stack, using a temporary stack pointer.
7844 *
7845 * @returns Strict VBox status code.
7846 * @param pIemCpu The IEM per CPU data.
7847 * @param u32Value The value to push.
7848 * @param pTmpRsp Pointer to the temporary stack pointer.
7849 */
7850IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7851{
7852 /* Increment the stack pointer. */
7853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7854 RTUINT64U NewRsp = *pTmpRsp;
7855 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7856
7857 /* Write the word the lazy way. */
7858 uint32_t *pu32Dst;
7859 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7860 if (rc == VINF_SUCCESS)
7861 {
7862 *pu32Dst = u32Value;
7863 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7864 }
7865
7866 /* Commit the new RSP value unless we an access handler made trouble. */
7867 if (rc == VINF_SUCCESS)
7868 *pTmpRsp = NewRsp;
7869
7870 return rc;
7871}
7872
7873
7874/**
7875 * Pushes a dword onto the stack, using a temporary stack pointer.
7876 *
7877 * @returns Strict VBox status code.
7878 * @param pIemCpu The IEM per CPU data.
7879 * @param u64Value The value to push.
7880 * @param pTmpRsp Pointer to the temporary stack pointer.
7881 */
7882IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7883{
7884 /* Increment the stack pointer. */
7885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7886 RTUINT64U NewRsp = *pTmpRsp;
7887 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7888
7889 /* Write the word the lazy way. */
7890 uint64_t *pu64Dst;
7891 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7892 if (rc == VINF_SUCCESS)
7893 {
7894 *pu64Dst = u64Value;
7895 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7896 }
7897
7898 /* Commit the new RSP value unless we an access handler made trouble. */
7899 if (rc == VINF_SUCCESS)
7900 *pTmpRsp = NewRsp;
7901
7902 return rc;
7903}
7904
7905
7906/**
7907 * Pops a word from the stack, using a temporary stack pointer.
7908 *
7909 * @returns Strict VBox status code.
7910 * @param pIemCpu The IEM per CPU data.
7911 * @param pu16Value Where to store the popped value.
7912 * @param pTmpRsp Pointer to the temporary stack pointer.
7913 */
7914IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7915{
7916 /* Increment the stack pointer. */
7917 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7918 RTUINT64U NewRsp = *pTmpRsp;
7919 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7920
7921 /* Write the word the lazy way. */
7922 uint16_t const *pu16Src;
7923 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7924 if (rc == VINF_SUCCESS)
7925 {
7926 *pu16Value = *pu16Src;
7927 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7928
7929 /* Commit the new RSP value. */
7930 if (rc == VINF_SUCCESS)
7931 *pTmpRsp = NewRsp;
7932 }
7933
7934 return rc;
7935}
7936
7937
7938/**
7939 * Pops a dword from the stack, using a temporary stack pointer.
7940 *
7941 * @returns Strict VBox status code.
7942 * @param pIemCpu The IEM per CPU data.
7943 * @param pu32Value Where to store the popped value.
7944 * @param pTmpRsp Pointer to the temporary stack pointer.
7945 */
7946IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7947{
7948 /* Increment the stack pointer. */
7949 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7950 RTUINT64U NewRsp = *pTmpRsp;
7951 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7952
7953 /* Write the word the lazy way. */
7954 uint32_t const *pu32Src;
7955 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7956 if (rc == VINF_SUCCESS)
7957 {
7958 *pu32Value = *pu32Src;
7959 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7960
7961 /* Commit the new RSP value. */
7962 if (rc == VINF_SUCCESS)
7963 *pTmpRsp = NewRsp;
7964 }
7965
7966 return rc;
7967}
7968
7969
7970/**
7971 * Pops a qword from the stack, using a temporary stack pointer.
7972 *
7973 * @returns Strict VBox status code.
7974 * @param pIemCpu The IEM per CPU data.
7975 * @param pu64Value Where to store the popped value.
7976 * @param pTmpRsp Pointer to the temporary stack pointer.
7977 */
7978IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7979{
7980 /* Increment the stack pointer. */
7981 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7982 RTUINT64U NewRsp = *pTmpRsp;
7983 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7984
7985 /* Write the word the lazy way. */
7986 uint64_t const *pu64Src;
7987 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7988 if (rcStrict == VINF_SUCCESS)
7989 {
7990 *pu64Value = *pu64Src;
7991 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7992
7993 /* Commit the new RSP value. */
7994 if (rcStrict == VINF_SUCCESS)
7995 *pTmpRsp = NewRsp;
7996 }
7997
7998 return rcStrict;
7999}
8000
8001
8002/**
8003 * Begin a special stack push (used by interrupt, exceptions and such).
8004 *
8005 * This will raise \#SS or \#PF if appropriate.
8006 *
8007 * @returns Strict VBox status code.
8008 * @param pIemCpu The IEM per CPU data.
8009 * @param cbMem The number of bytes to push onto the stack.
8010 * @param ppvMem Where to return the pointer to the stack memory.
8011 * As with the other memory functions this could be
8012 * direct access or bounce buffered access, so
8013 * don't commit register until the commit call
8014 * succeeds.
8015 * @param puNewRsp Where to return the new RSP value. This must be
8016 * passed unchanged to
8017 * iemMemStackPushCommitSpecial().
8018 */
8019IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8020{
8021 Assert(cbMem < UINT8_MAX);
8022 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8023 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8024 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8025}
8026
8027
8028/**
8029 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8030 *
8031 * This will update the rSP.
8032 *
8033 * @returns Strict VBox status code.
8034 * @param pIemCpu The IEM per CPU data.
8035 * @param pvMem The pointer returned by
8036 * iemMemStackPushBeginSpecial().
8037 * @param uNewRsp The new RSP value returned by
8038 * iemMemStackPushBeginSpecial().
8039 */
8040IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8041{
8042 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8043 if (rcStrict == VINF_SUCCESS)
8044 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8045 return rcStrict;
8046}
8047
8048
8049/**
8050 * Begin a special stack pop (used by iret, retf and such).
8051 *
8052 * This will raise \#SS or \#PF if appropriate.
8053 *
8054 * @returns Strict VBox status code.
8055 * @param pIemCpu The IEM per CPU data.
8056 * @param cbMem The number of bytes to push onto the stack.
8057 * @param ppvMem Where to return the pointer to the stack memory.
8058 * @param puNewRsp Where to return the new RSP value. This must be
8059 * passed unchanged to
8060 * iemMemStackPopCommitSpecial() or applied
8061 * manually if iemMemStackPopDoneSpecial() is used.
8062 */
8063IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8064{
8065 Assert(cbMem < UINT8_MAX);
8066 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8067 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8068 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8069}
8070
8071
8072/**
8073 * Continue a special stack pop (used by iret and retf).
8074 *
8075 * This will raise \#SS or \#PF if appropriate.
8076 *
8077 * @returns Strict VBox status code.
8078 * @param pIemCpu The IEM per CPU data.
8079 * @param cbMem The number of bytes to push onto the stack.
8080 * @param ppvMem Where to return the pointer to the stack memory.
8081 * @param puNewRsp Where to return the new RSP value. This must be
8082 * passed unchanged to
8083 * iemMemStackPopCommitSpecial() or applied
8084 * manually if iemMemStackPopDoneSpecial() is used.
8085 */
8086IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8087{
8088 Assert(cbMem < UINT8_MAX);
8089 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8090 RTUINT64U NewRsp;
8091 NewRsp.u = *puNewRsp;
8092 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8093 *puNewRsp = NewRsp.u;
8094 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8095}
8096
8097
8098/**
8099 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8100 *
8101 * This will update the rSP.
8102 *
8103 * @returns Strict VBox status code.
8104 * @param pIemCpu The IEM per CPU data.
8105 * @param pvMem The pointer returned by
8106 * iemMemStackPopBeginSpecial().
8107 * @param uNewRsp The new RSP value returned by
8108 * iemMemStackPopBeginSpecial().
8109 */
8110IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8111{
8112 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8113 if (rcStrict == VINF_SUCCESS)
8114 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8115 return rcStrict;
8116}
8117
8118
8119/**
8120 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8121 * iemMemStackPopContinueSpecial).
8122 *
8123 * The caller will manually commit the rSP.
8124 *
8125 * @returns Strict VBox status code.
8126 * @param pIemCpu The IEM per CPU data.
8127 * @param pvMem The pointer returned by
8128 * iemMemStackPopBeginSpecial() or
8129 * iemMemStackPopContinueSpecial().
8130 */
8131IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8132{
8133 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8134}
8135
8136
8137/**
8138 * Fetches a system table byte.
8139 *
8140 * @returns Strict VBox status code.
8141 * @param pIemCpu The IEM per CPU data.
8142 * @param pbDst Where to return the byte.
8143 * @param iSegReg The index of the segment register to use for
8144 * this access. The base and limits are checked.
8145 * @param GCPtrMem The address of the guest memory.
8146 */
8147IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8148{
8149 /* The lazy approach for now... */
8150 uint8_t const *pbSrc;
8151 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8152 if (rc == VINF_SUCCESS)
8153 {
8154 *pbDst = *pbSrc;
8155 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8156 }
8157 return rc;
8158}
8159
8160
8161/**
8162 * Fetches a system table word.
8163 *
8164 * @returns Strict VBox status code.
8165 * @param pIemCpu The IEM per CPU data.
8166 * @param pu16Dst Where to return the word.
8167 * @param iSegReg The index of the segment register to use for
8168 * this access. The base and limits are checked.
8169 * @param GCPtrMem The address of the guest memory.
8170 */
8171IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8172{
8173 /* The lazy approach for now... */
8174 uint16_t const *pu16Src;
8175 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8176 if (rc == VINF_SUCCESS)
8177 {
8178 *pu16Dst = *pu16Src;
8179 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8180 }
8181 return rc;
8182}
8183
8184
8185/**
8186 * Fetches a system table dword.
8187 *
8188 * @returns Strict VBox status code.
8189 * @param pIemCpu The IEM per CPU data.
8190 * @param pu32Dst Where to return the dword.
8191 * @param iSegReg The index of the segment register to use for
8192 * this access. The base and limits are checked.
8193 * @param GCPtrMem The address of the guest memory.
8194 */
8195IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8196{
8197 /* The lazy approach for now... */
8198 uint32_t const *pu32Src;
8199 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8200 if (rc == VINF_SUCCESS)
8201 {
8202 *pu32Dst = *pu32Src;
8203 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8204 }
8205 return rc;
8206}
8207
8208
8209/**
8210 * Fetches a system table qword.
8211 *
8212 * @returns Strict VBox status code.
8213 * @param pIemCpu The IEM per CPU data.
8214 * @param pu64Dst Where to return the qword.
8215 * @param iSegReg The index of the segment register to use for
8216 * this access. The base and limits are checked.
8217 * @param GCPtrMem The address of the guest memory.
8218 */
8219IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8220{
8221 /* The lazy approach for now... */
8222 uint64_t const *pu64Src;
8223 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8224 if (rc == VINF_SUCCESS)
8225 {
8226 *pu64Dst = *pu64Src;
8227 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8228 }
8229 return rc;
8230}
8231
8232
8233/**
8234 * Fetches a descriptor table entry with caller specified error code.
8235 *
8236 * @returns Strict VBox status code.
8237 * @param pIemCpu The IEM per CPU.
8238 * @param pDesc Where to return the descriptor table entry.
8239 * @param uSel The selector which table entry to fetch.
8240 * @param uXcpt The exception to raise on table lookup error.
8241 * @param uErrorCode The error code associated with the exception.
8242 */
8243IEM_STATIC VBOXSTRICTRC
8244iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8245{
8246 AssertPtr(pDesc);
8247 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8248
8249 /** @todo did the 286 require all 8 bytes to be accessible? */
8250 /*
8251 * Get the selector table base and check bounds.
8252 */
8253 RTGCPTR GCPtrBase;
8254 if (uSel & X86_SEL_LDT)
8255 {
8256 if ( !pCtx->ldtr.Attr.n.u1Present
8257 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8258 {
8259 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8260 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8261 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8262 uErrorCode, 0);
8263 }
8264
8265 Assert(pCtx->ldtr.Attr.n.u1Present);
8266 GCPtrBase = pCtx->ldtr.u64Base;
8267 }
8268 else
8269 {
8270 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8271 {
8272 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8273 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8274 uErrorCode, 0);
8275 }
8276 GCPtrBase = pCtx->gdtr.pGdt;
8277 }
8278
8279 /*
8280 * Read the legacy descriptor and maybe the long mode extensions if
8281 * required.
8282 */
8283 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8284 if (rcStrict == VINF_SUCCESS)
8285 {
8286 if ( !IEM_IS_LONG_MODE(pIemCpu)
8287 || pDesc->Legacy.Gen.u1DescType)
8288 pDesc->Long.au64[1] = 0;
8289 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8290 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8291 else
8292 {
8293 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8294 /** @todo is this the right exception? */
8295 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8296 }
8297 }
8298 return rcStrict;
8299}
8300
8301
8302/**
8303 * Fetches a descriptor table entry.
8304 *
8305 * @returns Strict VBox status code.
8306 * @param pIemCpu The IEM per CPU.
8307 * @param pDesc Where to return the descriptor table entry.
8308 * @param uSel The selector which table entry to fetch.
8309 * @param uXcpt The exception to raise on table lookup error.
8310 */
8311IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8312{
8313 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8314}
8315
8316
8317/**
8318 * Fakes a long mode stack selector for SS = 0.
8319 *
8320 * @param pDescSs Where to return the fake stack descriptor.
8321 * @param uDpl The DPL we want.
8322 */
8323IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8324{
8325 pDescSs->Long.au64[0] = 0;
8326 pDescSs->Long.au64[1] = 0;
8327 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8328 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8329 pDescSs->Long.Gen.u2Dpl = uDpl;
8330 pDescSs->Long.Gen.u1Present = 1;
8331 pDescSs->Long.Gen.u1Long = 1;
8332}
8333
8334
8335/**
8336 * Marks the selector descriptor as accessed (only non-system descriptors).
8337 *
8338 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8339 * will therefore skip the limit checks.
8340 *
8341 * @returns Strict VBox status code.
8342 * @param pIemCpu The IEM per CPU.
8343 * @param uSel The selector.
8344 */
8345IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8346{
8347 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8348
8349 /*
8350 * Get the selector table base and calculate the entry address.
8351 */
8352 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8353 ? pCtx->ldtr.u64Base
8354 : pCtx->gdtr.pGdt;
8355 GCPtr += uSel & X86_SEL_MASK;
8356
8357 /*
8358 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8359 * ugly stuff to avoid this. This will make sure it's an atomic access
8360 * as well more or less remove any question about 8-bit or 32-bit accesss.
8361 */
8362 VBOXSTRICTRC rcStrict;
8363 uint32_t volatile *pu32;
8364 if ((GCPtr & 3) == 0)
8365 {
8366 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8367 GCPtr += 2 + 2;
8368 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8369 if (rcStrict != VINF_SUCCESS)
8370 return rcStrict;
8371 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8372 }
8373 else
8374 {
8375 /* The misaligned GDT/LDT case, map the whole thing. */
8376 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8377 if (rcStrict != VINF_SUCCESS)
8378 return rcStrict;
8379 switch ((uintptr_t)pu32 & 3)
8380 {
8381 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8382 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8383 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8384 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8385 }
8386 }
8387
8388 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8389}
8390
8391/** @} */
8392
8393
8394/*
8395 * Include the C/C++ implementation of instruction.
8396 */
8397#include "IEMAllCImpl.cpp.h"
8398
8399
8400
8401/** @name "Microcode" macros.
8402 *
8403 * The idea is that we should be able to use the same code to interpret
8404 * instructions as well as recompiler instructions. Thus this obfuscation.
8405 *
8406 * @{
8407 */
8408#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8409#define IEM_MC_END() }
8410#define IEM_MC_PAUSE() do {} while (0)
8411#define IEM_MC_CONTINUE() do {} while (0)
8412
8413/** Internal macro. */
8414#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8415 do \
8416 { \
8417 VBOXSTRICTRC rcStrict2 = a_Expr; \
8418 if (rcStrict2 != VINF_SUCCESS) \
8419 return rcStrict2; \
8420 } while (0)
8421
8422#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8423#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8424#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8425#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8426#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8427#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8428#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8429
8430#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8431#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8432 do { \
8433 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8434 return iemRaiseDeviceNotAvailable(pIemCpu); \
8435 } while (0)
8436#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8437 do { \
8438 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8439 return iemRaiseMathFault(pIemCpu); \
8440 } while (0)
8441#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8442 do { \
8443 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8444 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8445 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8446 return iemRaiseUndefinedOpcode(pIemCpu); \
8447 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8448 return iemRaiseDeviceNotAvailable(pIemCpu); \
8449 } while (0)
8450#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8451 do { \
8452 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8453 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8454 return iemRaiseUndefinedOpcode(pIemCpu); \
8455 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8456 return iemRaiseDeviceNotAvailable(pIemCpu); \
8457 } while (0)
8458#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8459 do { \
8460 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8461 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8462 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8463 return iemRaiseUndefinedOpcode(pIemCpu); \
8464 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8465 return iemRaiseDeviceNotAvailable(pIemCpu); \
8466 } while (0)
8467#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8468 do { \
8469 if (pIemCpu->uCpl != 0) \
8470 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8471 } while (0)
8472
8473
8474#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8475#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8476#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8477#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8478#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8479#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8480#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8481 uint32_t a_Name; \
8482 uint32_t *a_pName = &a_Name
8483#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8484 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8485
8486#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8487#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8488
8489#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8490#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8491#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8492#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8493#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8494#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8495#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8496#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8497#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8498#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8499#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8500#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8501#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8502#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8503#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8504#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8505#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8506#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8507#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8508#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8509#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8510#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8511#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8512#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8513#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8514#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8515#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8516#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8517#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8518/** @note Not for IOPL or IF testing or modification. */
8519#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8520#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8521#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8522#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8523
8524#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8525#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8526#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8527#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8528#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8529#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8530#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8531#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8532#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8533#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8534#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8535 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8536
8537#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8538#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8539/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8540 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8541#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8542#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8543/** @note Not for IOPL or IF testing or modification. */
8544#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8545
8546#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8547#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8548#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8549 do { \
8550 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8551 *pu32Reg += (a_u32Value); \
8552 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8553 } while (0)
8554#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8555
8556#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8557#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8558#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8559 do { \
8560 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8561 *pu32Reg -= (a_u32Value); \
8562 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8563 } while (0)
8564#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8565
8566#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8567#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8568#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8569#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8570#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8571#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8572#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8573
8574#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8575#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8576#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8577#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8578
8579#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8580#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8581#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8582
8583#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8584#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8585
8586#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8587#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8588#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8589
8590#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8591#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8592#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8593
8594#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8595
8596#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8597
8598#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8599#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8600#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8601 do { \
8602 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8603 *pu32Reg &= (a_u32Value); \
8604 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8605 } while (0)
8606#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8607
8608#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8609#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8610#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8611 do { \
8612 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8613 *pu32Reg |= (a_u32Value); \
8614 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8615 } while (0)
8616#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8617
8618
8619/** @note Not for IOPL or IF modification. */
8620#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8621/** @note Not for IOPL or IF modification. */
8622#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8623/** @note Not for IOPL or IF modification. */
8624#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8625
8626#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8627
8628
8629#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8630 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8631#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8632 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8633#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8634 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8635#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8636 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8637#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8638 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8639#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8640 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8641#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8642 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8643
8644#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8645 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8646#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8647 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8648#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8649 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8650#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8651 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8652#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8653 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8654 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8655 } while (0)
8656#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8657 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8658 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8659 } while (0)
8660#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8661 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8662#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8663 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8664#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8665 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8666
8667#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8669#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8671#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8673
8674#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8676#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8678#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8680
8681#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8683#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8685#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8687
8688#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8690
8691#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8693#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8695#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8697#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8699
8700#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8702#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8704#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8706
8707#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8709#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8711
8712
8713
8714#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8715 do { \
8716 uint8_t u8Tmp; \
8717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8718 (a_u16Dst) = u8Tmp; \
8719 } while (0)
8720#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8721 do { \
8722 uint8_t u8Tmp; \
8723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8724 (a_u32Dst) = u8Tmp; \
8725 } while (0)
8726#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8727 do { \
8728 uint8_t u8Tmp; \
8729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8730 (a_u64Dst) = u8Tmp; \
8731 } while (0)
8732#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8733 do { \
8734 uint16_t u16Tmp; \
8735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8736 (a_u32Dst) = u16Tmp; \
8737 } while (0)
8738#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8739 do { \
8740 uint16_t u16Tmp; \
8741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8742 (a_u64Dst) = u16Tmp; \
8743 } while (0)
8744#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8745 do { \
8746 uint32_t u32Tmp; \
8747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8748 (a_u64Dst) = u32Tmp; \
8749 } while (0)
8750
8751#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8752 do { \
8753 uint8_t u8Tmp; \
8754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8755 (a_u16Dst) = (int8_t)u8Tmp; \
8756 } while (0)
8757#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8758 do { \
8759 uint8_t u8Tmp; \
8760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8761 (a_u32Dst) = (int8_t)u8Tmp; \
8762 } while (0)
8763#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8764 do { \
8765 uint8_t u8Tmp; \
8766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8767 (a_u64Dst) = (int8_t)u8Tmp; \
8768 } while (0)
8769#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8770 do { \
8771 uint16_t u16Tmp; \
8772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8773 (a_u32Dst) = (int16_t)u16Tmp; \
8774 } while (0)
8775#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8776 do { \
8777 uint16_t u16Tmp; \
8778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8779 (a_u64Dst) = (int16_t)u16Tmp; \
8780 } while (0)
8781#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8782 do { \
8783 uint32_t u32Tmp; \
8784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8785 (a_u64Dst) = (int32_t)u32Tmp; \
8786 } while (0)
8787
8788#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8789 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8790#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8791 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8792#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8793 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8794#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8795 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8796
8797#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8798 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8799#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8800 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8801#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8802 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8803#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8804 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8805
8806#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8807#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8808#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8809#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8810#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8811#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8812#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8813 do { \
8814 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8815 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8816 } while (0)
8817
8818#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8819 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8820#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8821 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8822
8823
8824#define IEM_MC_PUSH_U16(a_u16Value) \
8825 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8826#define IEM_MC_PUSH_U32(a_u32Value) \
8827 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8828#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8829 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8830#define IEM_MC_PUSH_U64(a_u64Value) \
8831 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8832
8833#define IEM_MC_POP_U16(a_pu16Value) \
8834 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8835#define IEM_MC_POP_U32(a_pu32Value) \
8836 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8837#define IEM_MC_POP_U64(a_pu64Value) \
8838 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8839
8840/** Maps guest memory for direct or bounce buffered access.
8841 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8842 * @remarks May return.
8843 */
8844#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8845 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8846
8847/** Maps guest memory for direct or bounce buffered access.
8848 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8849 * @remarks May return.
8850 */
8851#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8852 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8853
8854/** Commits the memory and unmaps the guest memory.
8855 * @remarks May return.
8856 */
8857#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8858 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8859
8860/** Commits the memory and unmaps the guest memory unless the FPU status word
8861 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8862 * that would cause FLD not to store.
8863 *
8864 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8865 * store, while \#P will not.
8866 *
8867 * @remarks May in theory return - for now.
8868 */
8869#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8870 do { \
8871 if ( !(a_u16FSW & X86_FSW_ES) \
8872 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8873 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8874 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8875 } while (0)
8876
8877/** Calculate efficient address from R/M. */
8878#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8879 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8880
8881#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8882#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8883#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8884#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8885#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8886#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8887#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8888
8889/**
8890 * Defers the rest of the instruction emulation to a C implementation routine
8891 * and returns, only taking the standard parameters.
8892 *
8893 * @param a_pfnCImpl The pointer to the C routine.
8894 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8895 */
8896#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8897
8898/**
8899 * Defers the rest of instruction emulation to a C implementation routine and
8900 * returns, taking one argument in addition to the standard ones.
8901 *
8902 * @param a_pfnCImpl The pointer to the C routine.
8903 * @param a0 The argument.
8904 */
8905#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8906
8907/**
8908 * Defers the rest of the instruction emulation to a C implementation routine
8909 * and returns, taking two arguments in addition to the standard ones.
8910 *
8911 * @param a_pfnCImpl The pointer to the C routine.
8912 * @param a0 The first extra argument.
8913 * @param a1 The second extra argument.
8914 */
8915#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8916
8917/**
8918 * Defers the rest of the instruction emulation to a C implementation routine
8919 * and returns, taking three arguments in addition to the standard ones.
8920 *
8921 * @param a_pfnCImpl The pointer to the C routine.
8922 * @param a0 The first extra argument.
8923 * @param a1 The second extra argument.
8924 * @param a2 The third extra argument.
8925 */
8926#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8927
8928/**
8929 * Defers the rest of the instruction emulation to a C implementation routine
8930 * and returns, taking four arguments in addition to the standard ones.
8931 *
8932 * @param a_pfnCImpl The pointer to the C routine.
8933 * @param a0 The first extra argument.
8934 * @param a1 The second extra argument.
8935 * @param a2 The third extra argument.
8936 * @param a3 The fourth extra argument.
8937 */
8938#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8939
8940/**
8941 * Defers the rest of the instruction emulation to a C implementation routine
8942 * and returns, taking two arguments in addition to the standard ones.
8943 *
8944 * @param a_pfnCImpl The pointer to the C routine.
8945 * @param a0 The first extra argument.
8946 * @param a1 The second extra argument.
8947 * @param a2 The third extra argument.
8948 * @param a3 The fourth extra argument.
8949 * @param a4 The fifth extra argument.
8950 */
8951#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8952
8953/**
8954 * Defers the entire instruction emulation to a C implementation routine and
8955 * returns, only taking the standard parameters.
8956 *
8957 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8958 *
8959 * @param a_pfnCImpl The pointer to the C routine.
8960 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8961 */
8962#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8963
8964/**
8965 * Defers the entire instruction emulation to a C implementation routine and
8966 * returns, taking one argument in addition to the standard ones.
8967 *
8968 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8969 *
8970 * @param a_pfnCImpl The pointer to the C routine.
8971 * @param a0 The argument.
8972 */
8973#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8974
8975/**
8976 * Defers the entire instruction emulation to a C implementation routine and
8977 * returns, taking two arguments in addition to the standard ones.
8978 *
8979 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8980 *
8981 * @param a_pfnCImpl The pointer to the C routine.
8982 * @param a0 The first extra argument.
8983 * @param a1 The second extra argument.
8984 */
8985#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8986
8987/**
8988 * Defers the entire instruction emulation to a C implementation routine and
8989 * returns, taking three arguments in addition to the standard ones.
8990 *
8991 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8992 *
8993 * @param a_pfnCImpl The pointer to the C routine.
8994 * @param a0 The first extra argument.
8995 * @param a1 The second extra argument.
8996 * @param a2 The third extra argument.
8997 */
8998#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8999
9000/**
9001 * Calls a FPU assembly implementation taking one visible argument.
9002 *
9003 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9004 * @param a0 The first extra argument.
9005 */
9006#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9007 do { \
9008 iemFpuPrepareUsage(pIemCpu); \
9009 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9010 } while (0)
9011
9012/**
9013 * Calls a FPU assembly implementation taking two visible arguments.
9014 *
9015 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9016 * @param a0 The first extra argument.
9017 * @param a1 The second extra argument.
9018 */
9019#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9020 do { \
9021 iemFpuPrepareUsage(pIemCpu); \
9022 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9023 } while (0)
9024
9025/**
9026 * Calls a FPU assembly implementation taking three visible arguments.
9027 *
9028 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9029 * @param a0 The first extra argument.
9030 * @param a1 The second extra argument.
9031 * @param a2 The third extra argument.
9032 */
9033#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9034 do { \
9035 iemFpuPrepareUsage(pIemCpu); \
9036 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9037 } while (0)
9038
9039#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9040 do { \
9041 (a_FpuData).FSW = (a_FSW); \
9042 (a_FpuData).r80Result = *(a_pr80Value); \
9043 } while (0)
9044
9045/** Pushes FPU result onto the stack. */
9046#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9047 iemFpuPushResult(pIemCpu, &a_FpuData)
9048/** Pushes FPU result onto the stack and sets the FPUDP. */
9049#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9050 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9051
9052/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9053#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9054 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9055
9056/** Stores FPU result in a stack register. */
9057#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9058 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9059/** Stores FPU result in a stack register and pops the stack. */
9060#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9061 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9062/** Stores FPU result in a stack register and sets the FPUDP. */
9063#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9064 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9065/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9066 * stack. */
9067#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9068 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9069
9070/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9071#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9072 iemFpuUpdateOpcodeAndIp(pIemCpu)
9073/** Free a stack register (for FFREE and FFREEP). */
9074#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9075 iemFpuStackFree(pIemCpu, a_iStReg)
9076/** Increment the FPU stack pointer. */
9077#define IEM_MC_FPU_STACK_INC_TOP() \
9078 iemFpuStackIncTop(pIemCpu)
9079/** Decrement the FPU stack pointer. */
9080#define IEM_MC_FPU_STACK_DEC_TOP() \
9081 iemFpuStackDecTop(pIemCpu)
9082
9083/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9084#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9085 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9086/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9087#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9088 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9089/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9090#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9091 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9092/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9093#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9094 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9095/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9096 * stack. */
9097#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9098 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9099/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9100#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9101 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9102
9103/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9104#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9105 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9106/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9107 * stack. */
9108#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9109 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9110/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9111 * FPUDS. */
9112#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9113 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9114/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9115 * FPUDS. Pops stack. */
9116#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9117 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9118/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9119 * stack twice. */
9120#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9121 iemFpuStackUnderflowThenPopPop(pIemCpu)
9122/** Raises a FPU stack underflow exception for an instruction pushing a result
9123 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9124#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9125 iemFpuStackPushUnderflow(pIemCpu)
9126/** Raises a FPU stack underflow exception for an instruction pushing a result
9127 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9128#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9129 iemFpuStackPushUnderflowTwo(pIemCpu)
9130
9131/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9132 * FPUIP, FPUCS and FOP. */
9133#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9134 iemFpuStackPushOverflow(pIemCpu)
9135/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9136 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9137#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9138 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9139/** Indicates that we (might) have modified the FPU state. */
9140#define IEM_MC_USED_FPU() \
9141 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9142
9143/**
9144 * Calls a MMX assembly implementation taking two visible arguments.
9145 *
9146 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9147 * @param a0 The first extra argument.
9148 * @param a1 The second extra argument.
9149 */
9150#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9151 do { \
9152 iemFpuPrepareUsage(pIemCpu); \
9153 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9154 } while (0)
9155
9156/**
9157 * Calls a MMX assembly implementation taking three visible arguments.
9158 *
9159 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9160 * @param a0 The first extra argument.
9161 * @param a1 The second extra argument.
9162 * @param a2 The third extra argument.
9163 */
9164#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9165 do { \
9166 iemFpuPrepareUsage(pIemCpu); \
9167 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9168 } while (0)
9169
9170
9171/**
9172 * Calls a SSE assembly implementation taking two visible arguments.
9173 *
9174 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9175 * @param a0 The first extra argument.
9176 * @param a1 The second extra argument.
9177 */
9178#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9179 do { \
9180 iemFpuPrepareUsageSse(pIemCpu); \
9181 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9182 } while (0)
9183
9184/**
9185 * Calls a SSE assembly implementation taking three visible arguments.
9186 *
9187 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9188 * @param a0 The first extra argument.
9189 * @param a1 The second extra argument.
9190 * @param a2 The third extra argument.
9191 */
9192#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9193 do { \
9194 iemFpuPrepareUsageSse(pIemCpu); \
9195 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9196 } while (0)
9197
9198
9199/** @note Not for IOPL or IF testing. */
9200#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9201/** @note Not for IOPL or IF testing. */
9202#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9203/** @note Not for IOPL or IF testing. */
9204#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9205/** @note Not for IOPL or IF testing. */
9206#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9207/** @note Not for IOPL or IF testing. */
9208#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9209 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9210 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9211/** @note Not for IOPL or IF testing. */
9212#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9213 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9214 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9215/** @note Not for IOPL or IF testing. */
9216#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9217 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9218 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9219 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9220/** @note Not for IOPL or IF testing. */
9221#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9222 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9223 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9224 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9225#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9226#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9227#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9228/** @note Not for IOPL or IF testing. */
9229#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9230 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9231 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9232/** @note Not for IOPL or IF testing. */
9233#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9234 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9235 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9236/** @note Not for IOPL or IF testing. */
9237#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9238 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9239 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9240/** @note Not for IOPL or IF testing. */
9241#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9242 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9243 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9244/** @note Not for IOPL or IF testing. */
9245#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9246 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9247 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9248/** @note Not for IOPL or IF testing. */
9249#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9250 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9251 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9252#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9253#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9254#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9255 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9256#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9257 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9258#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9259 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9260#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9261 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9262#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9263 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9264#define IEM_MC_IF_FCW_IM() \
9265 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9266
9267#define IEM_MC_ELSE() } else {
9268#define IEM_MC_ENDIF() } do {} while (0)
9269
9270/** @} */
9271
9272
9273/** @name Opcode Debug Helpers.
9274 * @{
9275 */
9276#ifdef DEBUG
9277# define IEMOP_MNEMONIC(a_szMnemonic) \
9278 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9279 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9280# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9281 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9282 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9283#else
9284# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9285# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9286#endif
9287
9288/** @} */
9289
9290
9291/** @name Opcode Helpers.
9292 * @{
9293 */
9294
9295/** The instruction raises an \#UD in real and V8086 mode. */
9296#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9297 do \
9298 { \
9299 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9300 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9301 } while (0)
9302
9303/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9304 * lock prefixed.
9305 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9306#define IEMOP_HLP_NO_LOCK_PREFIX() \
9307 do \
9308 { \
9309 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9310 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9311 } while (0)
9312
9313/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9314 * 64-bit mode. */
9315#define IEMOP_HLP_NO_64BIT() \
9316 do \
9317 { \
9318 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9319 return IEMOP_RAISE_INVALID_OPCODE(); \
9320 } while (0)
9321
9322/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9323 * 64-bit mode. */
9324#define IEMOP_HLP_ONLY_64BIT() \
9325 do \
9326 { \
9327 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9328 return IEMOP_RAISE_INVALID_OPCODE(); \
9329 } while (0)
9330
9331/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9332#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9333 do \
9334 { \
9335 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9336 iemRecalEffOpSize64Default(pIemCpu); \
9337 } while (0)
9338
9339/** The instruction has 64-bit operand size if 64-bit mode. */
9340#define IEMOP_HLP_64BIT_OP_SIZE() \
9341 do \
9342 { \
9343 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9344 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9345 } while (0)
9346
9347/** Only a REX prefix immediately preceeding the first opcode byte takes
9348 * effect. This macro helps ensuring this as well as logging bad guest code. */
9349#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9350 do \
9351 { \
9352 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9353 { \
9354 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9355 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9356 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9357 pIemCpu->uRexB = 0; \
9358 pIemCpu->uRexIndex = 0; \
9359 pIemCpu->uRexReg = 0; \
9360 iemRecalEffOpSize(pIemCpu); \
9361 } \
9362 } while (0)
9363
9364/**
9365 * Done decoding.
9366 */
9367#define IEMOP_HLP_DONE_DECODING() \
9368 do \
9369 { \
9370 /*nothing for now, maybe later... */ \
9371 } while (0)
9372
9373/**
9374 * Done decoding, raise \#UD exception if lock prefix present.
9375 */
9376#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9377 do \
9378 { \
9379 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9380 { /* likely */ } \
9381 else \
9382 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9383 } while (0)
9384#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9385 do \
9386 { \
9387 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9388 { /* likely */ } \
9389 else \
9390 { \
9391 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9392 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9393 } \
9394 } while (0)
9395#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9396 do \
9397 { \
9398 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9399 { /* likely */ } \
9400 else \
9401 { \
9402 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9403 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9404 } \
9405 } while (0)
9406/**
9407 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9408 * are present.
9409 */
9410#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9411 do \
9412 { \
9413 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9414 { /* likely */ } \
9415 else \
9416 return IEMOP_RAISE_INVALID_OPCODE(); \
9417 } while (0)
9418
9419
9420/**
9421 * Calculates the effective address of a ModR/M memory operand.
9422 *
9423 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9424 *
9425 * @return Strict VBox status code.
9426 * @param pIemCpu The IEM per CPU data.
9427 * @param bRm The ModRM byte.
9428 * @param cbImm The size of any immediate following the
9429 * effective address opcode bytes. Important for
9430 * RIP relative addressing.
9431 * @param pGCPtrEff Where to return the effective address.
9432 */
9433IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9434{
9435 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9436 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9437#define SET_SS_DEF() \
9438 do \
9439 { \
9440 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9441 pIemCpu->iEffSeg = X86_SREG_SS; \
9442 } while (0)
9443
9444 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9445 {
9446/** @todo Check the effective address size crap! */
9447 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9448 {
9449 uint16_t u16EffAddr;
9450
9451 /* Handle the disp16 form with no registers first. */
9452 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9453 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9454 else
9455 {
9456 /* Get the displacment. */
9457 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9458 {
9459 case 0: u16EffAddr = 0; break;
9460 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9461 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9462 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9463 }
9464
9465 /* Add the base and index registers to the disp. */
9466 switch (bRm & X86_MODRM_RM_MASK)
9467 {
9468 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9469 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9470 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9471 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9472 case 4: u16EffAddr += pCtx->si; break;
9473 case 5: u16EffAddr += pCtx->di; break;
9474 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9475 case 7: u16EffAddr += pCtx->bx; break;
9476 }
9477 }
9478
9479 *pGCPtrEff = u16EffAddr;
9480 }
9481 else
9482 {
9483 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9484 uint32_t u32EffAddr;
9485
9486 /* Handle the disp32 form with no registers first. */
9487 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9488 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9489 else
9490 {
9491 /* Get the register (or SIB) value. */
9492 switch ((bRm & X86_MODRM_RM_MASK))
9493 {
9494 case 0: u32EffAddr = pCtx->eax; break;
9495 case 1: u32EffAddr = pCtx->ecx; break;
9496 case 2: u32EffAddr = pCtx->edx; break;
9497 case 3: u32EffAddr = pCtx->ebx; break;
9498 case 4: /* SIB */
9499 {
9500 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9501
9502 /* Get the index and scale it. */
9503 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9504 {
9505 case 0: u32EffAddr = pCtx->eax; break;
9506 case 1: u32EffAddr = pCtx->ecx; break;
9507 case 2: u32EffAddr = pCtx->edx; break;
9508 case 3: u32EffAddr = pCtx->ebx; break;
9509 case 4: u32EffAddr = 0; /*none */ break;
9510 case 5: u32EffAddr = pCtx->ebp; break;
9511 case 6: u32EffAddr = pCtx->esi; break;
9512 case 7: u32EffAddr = pCtx->edi; break;
9513 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9514 }
9515 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9516
9517 /* add base */
9518 switch (bSib & X86_SIB_BASE_MASK)
9519 {
9520 case 0: u32EffAddr += pCtx->eax; break;
9521 case 1: u32EffAddr += pCtx->ecx; break;
9522 case 2: u32EffAddr += pCtx->edx; break;
9523 case 3: u32EffAddr += pCtx->ebx; break;
9524 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9525 case 5:
9526 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9527 {
9528 u32EffAddr += pCtx->ebp;
9529 SET_SS_DEF();
9530 }
9531 else
9532 {
9533 uint32_t u32Disp;
9534 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9535 u32EffAddr += u32Disp;
9536 }
9537 break;
9538 case 6: u32EffAddr += pCtx->esi; break;
9539 case 7: u32EffAddr += pCtx->edi; break;
9540 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9541 }
9542 break;
9543 }
9544 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9545 case 6: u32EffAddr = pCtx->esi; break;
9546 case 7: u32EffAddr = pCtx->edi; break;
9547 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9548 }
9549
9550 /* Get and add the displacement. */
9551 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9552 {
9553 case 0:
9554 break;
9555 case 1:
9556 {
9557 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9558 u32EffAddr += i8Disp;
9559 break;
9560 }
9561 case 2:
9562 {
9563 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9564 u32EffAddr += u32Disp;
9565 break;
9566 }
9567 default:
9568 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9569 }
9570
9571 }
9572 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9573 *pGCPtrEff = u32EffAddr;
9574 else
9575 {
9576 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9577 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9578 }
9579 }
9580 }
9581 else
9582 {
9583 uint64_t u64EffAddr;
9584
9585 /* Handle the rip+disp32 form with no registers first. */
9586 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9587 {
9588 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9589 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9590 }
9591 else
9592 {
9593 /* Get the register (or SIB) value. */
9594 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9595 {
9596 case 0: u64EffAddr = pCtx->rax; break;
9597 case 1: u64EffAddr = pCtx->rcx; break;
9598 case 2: u64EffAddr = pCtx->rdx; break;
9599 case 3: u64EffAddr = pCtx->rbx; break;
9600 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9601 case 6: u64EffAddr = pCtx->rsi; break;
9602 case 7: u64EffAddr = pCtx->rdi; break;
9603 case 8: u64EffAddr = pCtx->r8; break;
9604 case 9: u64EffAddr = pCtx->r9; break;
9605 case 10: u64EffAddr = pCtx->r10; break;
9606 case 11: u64EffAddr = pCtx->r11; break;
9607 case 13: u64EffAddr = pCtx->r13; break;
9608 case 14: u64EffAddr = pCtx->r14; break;
9609 case 15: u64EffAddr = pCtx->r15; break;
9610 /* SIB */
9611 case 4:
9612 case 12:
9613 {
9614 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9615
9616 /* Get the index and scale it. */
9617 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9618 {
9619 case 0: u64EffAddr = pCtx->rax; break;
9620 case 1: u64EffAddr = pCtx->rcx; break;
9621 case 2: u64EffAddr = pCtx->rdx; break;
9622 case 3: u64EffAddr = pCtx->rbx; break;
9623 case 4: u64EffAddr = 0; /*none */ break;
9624 case 5: u64EffAddr = pCtx->rbp; break;
9625 case 6: u64EffAddr = pCtx->rsi; break;
9626 case 7: u64EffAddr = pCtx->rdi; break;
9627 case 8: u64EffAddr = pCtx->r8; break;
9628 case 9: u64EffAddr = pCtx->r9; break;
9629 case 10: u64EffAddr = pCtx->r10; break;
9630 case 11: u64EffAddr = pCtx->r11; break;
9631 case 12: u64EffAddr = pCtx->r12; break;
9632 case 13: u64EffAddr = pCtx->r13; break;
9633 case 14: u64EffAddr = pCtx->r14; break;
9634 case 15: u64EffAddr = pCtx->r15; break;
9635 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9636 }
9637 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9638
9639 /* add base */
9640 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9641 {
9642 case 0: u64EffAddr += pCtx->rax; break;
9643 case 1: u64EffAddr += pCtx->rcx; break;
9644 case 2: u64EffAddr += pCtx->rdx; break;
9645 case 3: u64EffAddr += pCtx->rbx; break;
9646 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9647 case 6: u64EffAddr += pCtx->rsi; break;
9648 case 7: u64EffAddr += pCtx->rdi; break;
9649 case 8: u64EffAddr += pCtx->r8; break;
9650 case 9: u64EffAddr += pCtx->r9; break;
9651 case 10: u64EffAddr += pCtx->r10; break;
9652 case 11: u64EffAddr += pCtx->r11; break;
9653 case 12: u64EffAddr += pCtx->r12; break;
9654 case 14: u64EffAddr += pCtx->r14; break;
9655 case 15: u64EffAddr += pCtx->r15; break;
9656 /* complicated encodings */
9657 case 5:
9658 case 13:
9659 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9660 {
9661 if (!pIemCpu->uRexB)
9662 {
9663 u64EffAddr += pCtx->rbp;
9664 SET_SS_DEF();
9665 }
9666 else
9667 u64EffAddr += pCtx->r13;
9668 }
9669 else
9670 {
9671 uint32_t u32Disp;
9672 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9673 u64EffAddr += (int32_t)u32Disp;
9674 }
9675 break;
9676 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9677 }
9678 break;
9679 }
9680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9681 }
9682
9683 /* Get and add the displacement. */
9684 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9685 {
9686 case 0:
9687 break;
9688 case 1:
9689 {
9690 int8_t i8Disp;
9691 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9692 u64EffAddr += i8Disp;
9693 break;
9694 }
9695 case 2:
9696 {
9697 uint32_t u32Disp;
9698 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9699 u64EffAddr += (int32_t)u32Disp;
9700 break;
9701 }
9702 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9703 }
9704
9705 }
9706
9707 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9708 *pGCPtrEff = u64EffAddr;
9709 else
9710 {
9711 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9712 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9713 }
9714 }
9715
9716 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9717 return VINF_SUCCESS;
9718}
9719
9720/** @} */
9721
9722
9723
9724/*
9725 * Include the instructions
9726 */
9727#include "IEMAllInstructions.cpp.h"
9728
9729
9730
9731
9732#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9733
9734/**
9735 * Sets up execution verification mode.
9736 */
9737IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9738{
9739 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9740 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9741
9742 /*
9743 * Always note down the address of the current instruction.
9744 */
9745 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9746 pIemCpu->uOldRip = pOrgCtx->rip;
9747
9748 /*
9749 * Enable verification and/or logging.
9750 */
9751 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9752 if ( fNewNoRem
9753 && ( 0
9754#if 0 /* auto enable on first paged protected mode interrupt */
9755 || ( pOrgCtx->eflags.Bits.u1IF
9756 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9757 && TRPMHasTrap(pVCpu)
9758 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9759#endif
9760#if 0
9761 || ( pOrgCtx->cs == 0x10
9762 && ( pOrgCtx->rip == 0x90119e3e
9763 || pOrgCtx->rip == 0x901d9810)
9764#endif
9765#if 0 /* Auto enable DSL - FPU stuff. */
9766 || ( pOrgCtx->cs == 0x10
9767 && (// pOrgCtx->rip == 0xc02ec07f
9768 //|| pOrgCtx->rip == 0xc02ec082
9769 //|| pOrgCtx->rip == 0xc02ec0c9
9770 0
9771 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9772#endif
9773#if 0 /* Auto enable DSL - fstp st0 stuff. */
9774 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9775#endif
9776#if 0
9777 || pOrgCtx->rip == 0x9022bb3a
9778#endif
9779#if 0
9780 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9781#endif
9782#if 0
9783 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9784 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9785#endif
9786#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9787 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9788 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9789 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9790#endif
9791#if 0 /* NT4SP1 - xadd early boot. */
9792 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9793#endif
9794#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9795 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9796#endif
9797#if 0 /* NT4SP1 - cmpxchg (AMD). */
9798 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9799#endif
9800#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9801 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9802#endif
9803#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9804 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9805
9806#endif
9807#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9808 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9809
9810#endif
9811#if 0 /* NT4SP1 - frstor [ecx] */
9812 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9813#endif
9814#if 0 /* xxxxxx - All long mode code. */
9815 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9816#endif
9817#if 0 /* rep movsq linux 3.7 64-bit boot. */
9818 || (pOrgCtx->rip == 0x0000000000100241)
9819#endif
9820#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9821 || (pOrgCtx->rip == 0x000000000215e240)
9822#endif
9823#if 0 /* DOS's size-overridden iret to v8086. */
9824 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9825#endif
9826 )
9827 )
9828 {
9829 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9830 RTLogFlags(NULL, "enabled");
9831 fNewNoRem = false;
9832 }
9833 if (fNewNoRem != pIemCpu->fNoRem)
9834 {
9835 pIemCpu->fNoRem = fNewNoRem;
9836 if (!fNewNoRem)
9837 {
9838 LogAlways(("Enabling verification mode!\n"));
9839 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9840 }
9841 else
9842 LogAlways(("Disabling verification mode!\n"));
9843 }
9844
9845 /*
9846 * Switch state.
9847 */
9848 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9849 {
9850 static CPUMCTX s_DebugCtx; /* Ugly! */
9851
9852 s_DebugCtx = *pOrgCtx;
9853 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9854 }
9855
9856 /*
9857 * See if there is an interrupt pending in TRPM and inject it if we can.
9858 */
9859 pIemCpu->uInjectCpl = UINT8_MAX;
9860 if ( pOrgCtx->eflags.Bits.u1IF
9861 && TRPMHasTrap(pVCpu)
9862 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9863 {
9864 uint8_t u8TrapNo;
9865 TRPMEVENT enmType;
9866 RTGCUINT uErrCode;
9867 RTGCPTR uCr2;
9868 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9869 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9870 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9871 TRPMResetTrap(pVCpu);
9872 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9873 }
9874
9875 /*
9876 * Reset the counters.
9877 */
9878 pIemCpu->cIOReads = 0;
9879 pIemCpu->cIOWrites = 0;
9880 pIemCpu->fIgnoreRaxRdx = false;
9881 pIemCpu->fOverlappingMovs = false;
9882 pIemCpu->fProblematicMemory = false;
9883 pIemCpu->fUndefinedEFlags = 0;
9884
9885 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9886 {
9887 /*
9888 * Free all verification records.
9889 */
9890 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9891 pIemCpu->pIemEvtRecHead = NULL;
9892 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9893 do
9894 {
9895 while (pEvtRec)
9896 {
9897 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9898 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9899 pIemCpu->pFreeEvtRec = pEvtRec;
9900 pEvtRec = pNext;
9901 }
9902 pEvtRec = pIemCpu->pOtherEvtRecHead;
9903 pIemCpu->pOtherEvtRecHead = NULL;
9904 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9905 } while (pEvtRec);
9906 }
9907}
9908
9909
9910/**
9911 * Allocate an event record.
9912 * @returns Pointer to a record.
9913 */
9914IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9915{
9916 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9917 return NULL;
9918
9919 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9920 if (pEvtRec)
9921 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9922 else
9923 {
9924 if (!pIemCpu->ppIemEvtRecNext)
9925 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9926
9927 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9928 if (!pEvtRec)
9929 return NULL;
9930 }
9931 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9932 pEvtRec->pNext = NULL;
9933 return pEvtRec;
9934}
9935
9936
9937/**
9938 * IOMMMIORead notification.
9939 */
9940VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9941{
9942 PVMCPU pVCpu = VMMGetCpu(pVM);
9943 if (!pVCpu)
9944 return;
9945 PIEMCPU pIemCpu = &pVCpu->iem.s;
9946 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9947 if (!pEvtRec)
9948 return;
9949 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9950 pEvtRec->u.RamRead.GCPhys = GCPhys;
9951 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9952 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9953 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9954}
9955
9956
9957/**
9958 * IOMMMIOWrite notification.
9959 */
9960VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9961{
9962 PVMCPU pVCpu = VMMGetCpu(pVM);
9963 if (!pVCpu)
9964 return;
9965 PIEMCPU pIemCpu = &pVCpu->iem.s;
9966 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9967 if (!pEvtRec)
9968 return;
9969 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9970 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9971 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9972 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9973 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9974 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9975 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9976 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9977 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9978}
9979
9980
9981/**
9982 * IOMIOPortRead notification.
9983 */
9984VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9985{
9986 PVMCPU pVCpu = VMMGetCpu(pVM);
9987 if (!pVCpu)
9988 return;
9989 PIEMCPU pIemCpu = &pVCpu->iem.s;
9990 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9991 if (!pEvtRec)
9992 return;
9993 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9994 pEvtRec->u.IOPortRead.Port = Port;
9995 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9996 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9997 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9998}
9999
10000/**
10001 * IOMIOPortWrite notification.
10002 */
10003VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10004{
10005 PVMCPU pVCpu = VMMGetCpu(pVM);
10006 if (!pVCpu)
10007 return;
10008 PIEMCPU pIemCpu = &pVCpu->iem.s;
10009 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10010 if (!pEvtRec)
10011 return;
10012 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10013 pEvtRec->u.IOPortWrite.Port = Port;
10014 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10015 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10016 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10017 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10018}
10019
10020
10021VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10022{
10023 AssertFailed();
10024}
10025
10026
10027VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10028{
10029 AssertFailed();
10030}
10031
10032
10033/**
10034 * Fakes and records an I/O port read.
10035 *
10036 * @returns VINF_SUCCESS.
10037 * @param pIemCpu The IEM per CPU data.
10038 * @param Port The I/O port.
10039 * @param pu32Value Where to store the fake value.
10040 * @param cbValue The size of the access.
10041 */
10042IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10043{
10044 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10045 if (pEvtRec)
10046 {
10047 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10048 pEvtRec->u.IOPortRead.Port = Port;
10049 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10050 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10051 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10052 }
10053 pIemCpu->cIOReads++;
10054 *pu32Value = 0xcccccccc;
10055 return VINF_SUCCESS;
10056}
10057
10058
10059/**
10060 * Fakes and records an I/O port write.
10061 *
10062 * @returns VINF_SUCCESS.
10063 * @param pIemCpu The IEM per CPU data.
10064 * @param Port The I/O port.
10065 * @param u32Value The value being written.
10066 * @param cbValue The size of the access.
10067 */
10068IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10069{
10070 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10071 if (pEvtRec)
10072 {
10073 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10074 pEvtRec->u.IOPortWrite.Port = Port;
10075 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10076 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10077 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10078 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10079 }
10080 pIemCpu->cIOWrites++;
10081 return VINF_SUCCESS;
10082}
10083
10084
10085/**
10086 * Used to add extra details about a stub case.
10087 * @param pIemCpu The IEM per CPU state.
10088 */
10089IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10090{
10091 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10092 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10093 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10094 char szRegs[4096];
10095 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10096 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10097 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10098 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10099 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10100 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10101 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10102 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10103 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10104 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10105 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10106 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10107 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10108 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10109 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10110 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10111 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10112 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10113 " efer=%016VR{efer}\n"
10114 " pat=%016VR{pat}\n"
10115 " sf_mask=%016VR{sf_mask}\n"
10116 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10117 " lstar=%016VR{lstar}\n"
10118 " star=%016VR{star} cstar=%016VR{cstar}\n"
10119 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10120 );
10121
10122 char szInstr1[256];
10123 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10124 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10125 szInstr1, sizeof(szInstr1), NULL);
10126 char szInstr2[256];
10127 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10128 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10129 szInstr2, sizeof(szInstr2), NULL);
10130
10131 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10132}
10133
10134
10135/**
10136 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10137 * dump to the assertion info.
10138 *
10139 * @param pEvtRec The record to dump.
10140 */
10141IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10142{
10143 switch (pEvtRec->enmEvent)
10144 {
10145 case IEMVERIFYEVENT_IOPORT_READ:
10146 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10147 pEvtRec->u.IOPortWrite.Port,
10148 pEvtRec->u.IOPortWrite.cbValue);
10149 break;
10150 case IEMVERIFYEVENT_IOPORT_WRITE:
10151 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10152 pEvtRec->u.IOPortWrite.Port,
10153 pEvtRec->u.IOPortWrite.cbValue,
10154 pEvtRec->u.IOPortWrite.u32Value);
10155 break;
10156 case IEMVERIFYEVENT_RAM_READ:
10157 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10158 pEvtRec->u.RamRead.GCPhys,
10159 pEvtRec->u.RamRead.cb);
10160 break;
10161 case IEMVERIFYEVENT_RAM_WRITE:
10162 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10163 pEvtRec->u.RamWrite.GCPhys,
10164 pEvtRec->u.RamWrite.cb,
10165 (int)pEvtRec->u.RamWrite.cb,
10166 pEvtRec->u.RamWrite.ab);
10167 break;
10168 default:
10169 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10170 break;
10171 }
10172}
10173
10174
10175/**
10176 * Raises an assertion on the specified record, showing the given message with
10177 * a record dump attached.
10178 *
10179 * @param pIemCpu The IEM per CPU data.
10180 * @param pEvtRec1 The first record.
10181 * @param pEvtRec2 The second record.
10182 * @param pszMsg The message explaining why we're asserting.
10183 */
10184IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10185{
10186 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10187 iemVerifyAssertAddRecordDump(pEvtRec1);
10188 iemVerifyAssertAddRecordDump(pEvtRec2);
10189 iemVerifyAssertMsg2(pIemCpu);
10190 RTAssertPanic();
10191}
10192
10193
10194/**
10195 * Raises an assertion on the specified record, showing the given message with
10196 * a record dump attached.
10197 *
10198 * @param pIemCpu The IEM per CPU data.
10199 * @param pEvtRec1 The first record.
10200 * @param pszMsg The message explaining why we're asserting.
10201 */
10202IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10203{
10204 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10205 iemVerifyAssertAddRecordDump(pEvtRec);
10206 iemVerifyAssertMsg2(pIemCpu);
10207 RTAssertPanic();
10208}
10209
10210
10211/**
10212 * Verifies a write record.
10213 *
10214 * @param pIemCpu The IEM per CPU data.
10215 * @param pEvtRec The write record.
10216 * @param fRem Set if REM was doing the other executing. If clear
10217 * it was HM.
10218 */
10219IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10220{
10221 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10222 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10223 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10224 if ( RT_FAILURE(rc)
10225 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10226 {
10227 /* fend off ins */
10228 if ( !pIemCpu->cIOReads
10229 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10230 || ( pEvtRec->u.RamWrite.cb != 1
10231 && pEvtRec->u.RamWrite.cb != 2
10232 && pEvtRec->u.RamWrite.cb != 4) )
10233 {
10234 /* fend off ROMs and MMIO */
10235 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10236 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10237 {
10238 /* fend off fxsave */
10239 if (pEvtRec->u.RamWrite.cb != 512)
10240 {
10241 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10242 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10243 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10244 RTAssertMsg2Add("%s: %.*Rhxs\n"
10245 "iem: %.*Rhxs\n",
10246 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10247 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10248 iemVerifyAssertAddRecordDump(pEvtRec);
10249 iemVerifyAssertMsg2(pIemCpu);
10250 RTAssertPanic();
10251 }
10252 }
10253 }
10254 }
10255
10256}
10257
10258/**
10259 * Performs the post-execution verfication checks.
10260 */
10261IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10262{
10263 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10264 return;
10265
10266 /*
10267 * Switch back the state.
10268 */
10269 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10270 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10271 Assert(pOrgCtx != pDebugCtx);
10272 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10273
10274 /*
10275 * Execute the instruction in REM.
10276 */
10277 bool fRem = false;
10278 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10279 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10280 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10281#ifdef IEM_VERIFICATION_MODE_FULL_HM
10282 if ( HMIsEnabled(pVM)
10283 && pIemCpu->cIOReads == 0
10284 && pIemCpu->cIOWrites == 0
10285 && !pIemCpu->fProblematicMemory)
10286 {
10287 uint64_t uStartRip = pOrgCtx->rip;
10288 unsigned iLoops = 0;
10289 do
10290 {
10291 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10292 iLoops++;
10293 } while ( rc == VINF_SUCCESS
10294 || ( rc == VINF_EM_DBG_STEPPED
10295 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10296 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10297 || ( pOrgCtx->rip != pDebugCtx->rip
10298 && pIemCpu->uInjectCpl != UINT8_MAX
10299 && iLoops < 8) );
10300 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10301 rc = VINF_SUCCESS;
10302 }
10303#endif
10304 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10305 || rc == VINF_IOM_R3_IOPORT_READ
10306 || rc == VINF_IOM_R3_IOPORT_WRITE
10307 || rc == VINF_IOM_R3_MMIO_READ
10308 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10309 || rc == VINF_IOM_R3_MMIO_WRITE
10310 || rc == VINF_CPUM_R3_MSR_READ
10311 || rc == VINF_CPUM_R3_MSR_WRITE
10312 || rc == VINF_EM_RESCHEDULE
10313 )
10314 {
10315 EMRemLock(pVM);
10316 rc = REMR3EmulateInstruction(pVM, pVCpu);
10317 AssertRC(rc);
10318 EMRemUnlock(pVM);
10319 fRem = true;
10320 }
10321
10322 /*
10323 * Compare the register states.
10324 */
10325 unsigned cDiffs = 0;
10326 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10327 {
10328 //Log(("REM and IEM ends up with different registers!\n"));
10329 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10330
10331# define CHECK_FIELD(a_Field) \
10332 do \
10333 { \
10334 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10335 { \
10336 switch (sizeof(pOrgCtx->a_Field)) \
10337 { \
10338 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10339 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10340 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10341 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10342 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10343 } \
10344 cDiffs++; \
10345 } \
10346 } while (0)
10347# define CHECK_XSTATE_FIELD(a_Field) \
10348 do \
10349 { \
10350 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10351 { \
10352 switch (sizeof(pOrgXState->a_Field)) \
10353 { \
10354 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10355 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10356 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10357 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10358 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10359 } \
10360 cDiffs++; \
10361 } \
10362 } while (0)
10363
10364# define CHECK_BIT_FIELD(a_Field) \
10365 do \
10366 { \
10367 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10368 { \
10369 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10370 cDiffs++; \
10371 } \
10372 } while (0)
10373
10374# define CHECK_SEL(a_Sel) \
10375 do \
10376 { \
10377 CHECK_FIELD(a_Sel.Sel); \
10378 CHECK_FIELD(a_Sel.Attr.u); \
10379 CHECK_FIELD(a_Sel.u64Base); \
10380 CHECK_FIELD(a_Sel.u32Limit); \
10381 CHECK_FIELD(a_Sel.fFlags); \
10382 } while (0)
10383
10384 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10385 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10386
10387#if 1 /* The recompiler doesn't update these the intel way. */
10388 if (fRem)
10389 {
10390 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10391 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10392 pOrgXState->x87.CS = pDebugXState->x87.CS;
10393 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10394 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10395 pOrgXState->x87.DS = pDebugXState->x87.DS;
10396 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10397 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10398 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10399 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10400 }
10401#endif
10402 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10403 {
10404 RTAssertMsg2Weak(" the FPU state differs\n");
10405 cDiffs++;
10406 CHECK_XSTATE_FIELD(x87.FCW);
10407 CHECK_XSTATE_FIELD(x87.FSW);
10408 CHECK_XSTATE_FIELD(x87.FTW);
10409 CHECK_XSTATE_FIELD(x87.FOP);
10410 CHECK_XSTATE_FIELD(x87.FPUIP);
10411 CHECK_XSTATE_FIELD(x87.CS);
10412 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10413 CHECK_XSTATE_FIELD(x87.FPUDP);
10414 CHECK_XSTATE_FIELD(x87.DS);
10415 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10416 CHECK_XSTATE_FIELD(x87.MXCSR);
10417 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10418 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10419 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10420 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10421 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10422 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10423 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10424 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10425 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10426 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10427 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10428 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10429 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10430 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10431 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10432 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10433 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10434 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10435 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10436 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10437 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10438 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10439 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10440 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10441 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10442 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10443 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10444 }
10445 CHECK_FIELD(rip);
10446 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10447 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10448 {
10449 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10450 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10451 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10452 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10453 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10454 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10455 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10456 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10457 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10458 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10459 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10460 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10461 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10462 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10463 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10464 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10465 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10466 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10467 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10468 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10469 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10470 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10471 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10472 }
10473
10474 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10475 CHECK_FIELD(rax);
10476 CHECK_FIELD(rcx);
10477 if (!pIemCpu->fIgnoreRaxRdx)
10478 CHECK_FIELD(rdx);
10479 CHECK_FIELD(rbx);
10480 CHECK_FIELD(rsp);
10481 CHECK_FIELD(rbp);
10482 CHECK_FIELD(rsi);
10483 CHECK_FIELD(rdi);
10484 CHECK_FIELD(r8);
10485 CHECK_FIELD(r9);
10486 CHECK_FIELD(r10);
10487 CHECK_FIELD(r11);
10488 CHECK_FIELD(r12);
10489 CHECK_FIELD(r13);
10490 CHECK_SEL(cs);
10491 CHECK_SEL(ss);
10492 CHECK_SEL(ds);
10493 CHECK_SEL(es);
10494 CHECK_SEL(fs);
10495 CHECK_SEL(gs);
10496 CHECK_FIELD(cr0);
10497
10498 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10499 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10500 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10501 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10502 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10503 {
10504 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10505 { /* ignore */ }
10506 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10507 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10508 && fRem)
10509 { /* ignore */ }
10510 else
10511 CHECK_FIELD(cr2);
10512 }
10513 CHECK_FIELD(cr3);
10514 CHECK_FIELD(cr4);
10515 CHECK_FIELD(dr[0]);
10516 CHECK_FIELD(dr[1]);
10517 CHECK_FIELD(dr[2]);
10518 CHECK_FIELD(dr[3]);
10519 CHECK_FIELD(dr[6]);
10520 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10521 CHECK_FIELD(dr[7]);
10522 CHECK_FIELD(gdtr.cbGdt);
10523 CHECK_FIELD(gdtr.pGdt);
10524 CHECK_FIELD(idtr.cbIdt);
10525 CHECK_FIELD(idtr.pIdt);
10526 CHECK_SEL(ldtr);
10527 CHECK_SEL(tr);
10528 CHECK_FIELD(SysEnter.cs);
10529 CHECK_FIELD(SysEnter.eip);
10530 CHECK_FIELD(SysEnter.esp);
10531 CHECK_FIELD(msrEFER);
10532 CHECK_FIELD(msrSTAR);
10533 CHECK_FIELD(msrPAT);
10534 CHECK_FIELD(msrLSTAR);
10535 CHECK_FIELD(msrCSTAR);
10536 CHECK_FIELD(msrSFMASK);
10537 CHECK_FIELD(msrKERNELGSBASE);
10538
10539 if (cDiffs != 0)
10540 {
10541 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10542 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10543 iemVerifyAssertMsg2(pIemCpu);
10544 RTAssertPanic();
10545 }
10546# undef CHECK_FIELD
10547# undef CHECK_BIT_FIELD
10548 }
10549
10550 /*
10551 * If the register state compared fine, check the verification event
10552 * records.
10553 */
10554 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10555 {
10556 /*
10557 * Compare verficiation event records.
10558 * - I/O port accesses should be a 1:1 match.
10559 */
10560 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10561 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10562 while (pIemRec && pOtherRec)
10563 {
10564 /* Since we might miss RAM writes and reads, ignore reads and check
10565 that any written memory is the same extra ones. */
10566 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10567 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10568 && pIemRec->pNext)
10569 {
10570 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10571 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10572 pIemRec = pIemRec->pNext;
10573 }
10574
10575 /* Do the compare. */
10576 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10577 {
10578 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10579 break;
10580 }
10581 bool fEquals;
10582 switch (pIemRec->enmEvent)
10583 {
10584 case IEMVERIFYEVENT_IOPORT_READ:
10585 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10586 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10587 break;
10588 case IEMVERIFYEVENT_IOPORT_WRITE:
10589 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10590 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10591 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10592 break;
10593 case IEMVERIFYEVENT_RAM_READ:
10594 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10595 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10596 break;
10597 case IEMVERIFYEVENT_RAM_WRITE:
10598 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10599 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10600 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10601 break;
10602 default:
10603 fEquals = false;
10604 break;
10605 }
10606 if (!fEquals)
10607 {
10608 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10609 break;
10610 }
10611
10612 /* advance */
10613 pIemRec = pIemRec->pNext;
10614 pOtherRec = pOtherRec->pNext;
10615 }
10616
10617 /* Ignore extra writes and reads. */
10618 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10619 {
10620 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10621 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10622 pIemRec = pIemRec->pNext;
10623 }
10624 if (pIemRec != NULL)
10625 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10626 else if (pOtherRec != NULL)
10627 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10628 }
10629 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10630}
10631
10632#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10633
10634/* stubs */
10635IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10636{
10637 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10638 return VERR_INTERNAL_ERROR;
10639}
10640
10641IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10642{
10643 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10644 return VERR_INTERNAL_ERROR;
10645}
10646
10647#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10648
10649
10650#ifdef LOG_ENABLED
10651/**
10652 * Logs the current instruction.
10653 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10654 * @param pCtx The current CPU context.
10655 * @param fSameCtx Set if we have the same context information as the VMM,
10656 * clear if we may have already executed an instruction in
10657 * our debug context. When clear, we assume IEMCPU holds
10658 * valid CPU mode info.
10659 */
10660IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10661{
10662# ifdef IN_RING3
10663 if (LogIs2Enabled())
10664 {
10665 char szInstr[256];
10666 uint32_t cbInstr = 0;
10667 if (fSameCtx)
10668 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10669 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10670 szInstr, sizeof(szInstr), &cbInstr);
10671 else
10672 {
10673 uint32_t fFlags = 0;
10674 switch (pVCpu->iem.s.enmCpuMode)
10675 {
10676 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10677 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10678 case IEMMODE_16BIT:
10679 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10680 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10681 else
10682 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10683 break;
10684 }
10685 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10686 szInstr, sizeof(szInstr), &cbInstr);
10687 }
10688
10689 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10690 Log2(("****\n"
10691 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10692 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10693 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10694 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10695 " %s\n"
10696 ,
10697 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10698 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10699 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10700 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10701 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10702 szInstr));
10703
10704 if (LogIs3Enabled())
10705 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10706 }
10707 else
10708# endif
10709 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10710 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10711}
10712#endif
10713
10714
10715/**
10716 * Makes status code addjustments (pass up from I/O and access handler)
10717 * as well as maintaining statistics.
10718 *
10719 * @returns Strict VBox status code to pass up.
10720 * @param pIemCpu The IEM per CPU data.
10721 * @param rcStrict The status from executing an instruction.
10722 */
10723DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10724{
10725 if (rcStrict != VINF_SUCCESS)
10726 {
10727 if (RT_SUCCESS(rcStrict))
10728 {
10729 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10730 || rcStrict == VINF_IOM_R3_IOPORT_READ
10731 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10732 || rcStrict == VINF_IOM_R3_MMIO_READ
10733 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10734 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10735 || rcStrict == VINF_CPUM_R3_MSR_READ
10736 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10737 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10738 || rcStrict == VINF_EM_RAW_TO_R3
10739 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10740 /* raw-mode / virt handlers only: */
10741 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10742 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10743 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10744 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10745 || rcStrict == VINF_SELM_SYNC_GDT
10746 || rcStrict == VINF_CSAM_PENDING_ACTION
10747 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10748 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10749/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10750 int32_t const rcPassUp = pIemCpu->rcPassUp;
10751 if (rcPassUp == VINF_SUCCESS)
10752 pIemCpu->cRetInfStatuses++;
10753 else if ( rcPassUp < VINF_EM_FIRST
10754 || rcPassUp > VINF_EM_LAST
10755 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10756 {
10757 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10758 pIemCpu->cRetPassUpStatus++;
10759 rcStrict = rcPassUp;
10760 }
10761 else
10762 {
10763 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10764 pIemCpu->cRetInfStatuses++;
10765 }
10766 }
10767 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10768 pIemCpu->cRetAspectNotImplemented++;
10769 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10770 pIemCpu->cRetInstrNotImplemented++;
10771#ifdef IEM_VERIFICATION_MODE_FULL
10772 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10773 rcStrict = VINF_SUCCESS;
10774#endif
10775 else
10776 pIemCpu->cRetErrStatuses++;
10777 }
10778 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10779 {
10780 pIemCpu->cRetPassUpStatus++;
10781 rcStrict = pIemCpu->rcPassUp;
10782 }
10783
10784 return rcStrict;
10785}
10786
10787
10788/**
10789 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10790 * IEMExecOneWithPrefetchedByPC.
10791 *
10792 * @return Strict VBox status code.
10793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10794 * @param pIemCpu The IEM per CPU data.
10795 * @param fExecuteInhibit If set, execute the instruction following CLI,
10796 * POP SS and MOV SS,GR.
10797 */
10798DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10799{
10800 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10801 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10802 if (rcStrict == VINF_SUCCESS)
10803 pIemCpu->cInstructions++;
10804 if (pIemCpu->cActiveMappings > 0)
10805 iemMemRollback(pIemCpu);
10806//#ifdef DEBUG
10807// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10808//#endif
10809
10810 /* Execute the next instruction as well if a cli, pop ss or
10811 mov ss, Gr has just completed successfully. */
10812 if ( fExecuteInhibit
10813 && rcStrict == VINF_SUCCESS
10814 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10815 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10816 {
10817 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10818 if (rcStrict == VINF_SUCCESS)
10819 {
10820# ifdef LOG_ENABLED
10821 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10822# endif
10823 IEM_OPCODE_GET_NEXT_U8(&b);
10824 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10825 if (rcStrict == VINF_SUCCESS)
10826 pIemCpu->cInstructions++;
10827 if (pIemCpu->cActiveMappings > 0)
10828 iemMemRollback(pIemCpu);
10829 }
10830 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10831 }
10832
10833 /*
10834 * Return value fiddling, statistics and sanity assertions.
10835 */
10836 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10837
10838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10839 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10840#if defined(IEM_VERIFICATION_MODE_FULL)
10841 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10845#endif
10846 return rcStrict;
10847}
10848
10849
10850#ifdef IN_RC
10851/**
10852 * Re-enters raw-mode or ensure we return to ring-3.
10853 *
10854 * @returns rcStrict, maybe modified.
10855 * @param pIemCpu The IEM CPU structure.
10856 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10857 * @param pCtx The current CPU context.
10858 * @param rcStrict The status code returne by the interpreter.
10859 */
10860DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10861{
10862 if (!pIemCpu->fInPatchCode)
10863 CPUMRawEnter(pVCpu);
10864 return rcStrict;
10865}
10866#endif
10867
10868
10869/**
10870 * Execute one instruction.
10871 *
10872 * @return Strict VBox status code.
10873 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10874 */
10875VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10876{
10877 PIEMCPU pIemCpu = &pVCpu->iem.s;
10878
10879#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10880 iemExecVerificationModeSetup(pIemCpu);
10881#endif
10882#ifdef LOG_ENABLED
10883 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10884 iemLogCurInstr(pVCpu, pCtx, true);
10885#endif
10886
10887 /*
10888 * Do the decoding and emulation.
10889 */
10890 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10891 if (rcStrict == VINF_SUCCESS)
10892 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10893
10894#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10895 /*
10896 * Assert some sanity.
10897 */
10898 iemExecVerificationModeCheck(pIemCpu);
10899#endif
10900#ifdef IN_RC
10901 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10902#endif
10903 if (rcStrict != VINF_SUCCESS)
10904 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10905 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10906 return rcStrict;
10907}
10908
10909
10910VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10911{
10912 PIEMCPU pIemCpu = &pVCpu->iem.s;
10913 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10914 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10915
10916 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10917 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10918 if (rcStrict == VINF_SUCCESS)
10919 {
10920 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10921 if (pcbWritten)
10922 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10923 }
10924
10925#ifdef IN_RC
10926 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10927#endif
10928 return rcStrict;
10929}
10930
10931
10932VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10933 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10934{
10935 PIEMCPU pIemCpu = &pVCpu->iem.s;
10936 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10937 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10938
10939 VBOXSTRICTRC rcStrict;
10940 if ( cbOpcodeBytes
10941 && pCtx->rip == OpcodeBytesPC)
10942 {
10943 iemInitDecoder(pIemCpu, false);
10944 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10945 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10946 rcStrict = VINF_SUCCESS;
10947 }
10948 else
10949 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10950 if (rcStrict == VINF_SUCCESS)
10951 {
10952 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10953 }
10954
10955#ifdef IN_RC
10956 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10957#endif
10958 return rcStrict;
10959}
10960
10961
10962VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10963{
10964 PIEMCPU pIemCpu = &pVCpu->iem.s;
10965 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10966 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10967
10968 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10969 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10970 if (rcStrict == VINF_SUCCESS)
10971 {
10972 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10973 if (pcbWritten)
10974 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10975 }
10976
10977#ifdef IN_RC
10978 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10979#endif
10980 return rcStrict;
10981}
10982
10983
10984VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10985 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10986{
10987 PIEMCPU pIemCpu = &pVCpu->iem.s;
10988 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10989 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10990
10991 VBOXSTRICTRC rcStrict;
10992 if ( cbOpcodeBytes
10993 && pCtx->rip == OpcodeBytesPC)
10994 {
10995 iemInitDecoder(pIemCpu, true);
10996 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10997 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10998 rcStrict = VINF_SUCCESS;
10999 }
11000 else
11001 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11002 if (rcStrict == VINF_SUCCESS)
11003 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11004
11005#ifdef IN_RC
11006 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11007#endif
11008 return rcStrict;
11009}
11010
11011
11012VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11013{
11014 PIEMCPU pIemCpu = &pVCpu->iem.s;
11015
11016 /*
11017 * See if there is an interrupt pending in TRPM and inject it if we can.
11018 */
11019#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11020 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11021# ifdef IEM_VERIFICATION_MODE_FULL
11022 pIemCpu->uInjectCpl = UINT8_MAX;
11023# endif
11024 if ( pCtx->eflags.Bits.u1IF
11025 && TRPMHasTrap(pVCpu)
11026 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11027 {
11028 uint8_t u8TrapNo;
11029 TRPMEVENT enmType;
11030 RTGCUINT uErrCode;
11031 RTGCPTR uCr2;
11032 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11033 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11034 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11035 TRPMResetTrap(pVCpu);
11036 }
11037#else
11038 iemExecVerificationModeSetup(pIemCpu);
11039 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11040#endif
11041
11042 /*
11043 * Log the state.
11044 */
11045#ifdef LOG_ENABLED
11046 iemLogCurInstr(pVCpu, pCtx, true);
11047#endif
11048
11049 /*
11050 * Do the decoding and emulation.
11051 */
11052 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11053 if (rcStrict == VINF_SUCCESS)
11054 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11055
11056#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11057 /*
11058 * Assert some sanity.
11059 */
11060 iemExecVerificationModeCheck(pIemCpu);
11061#endif
11062
11063 /*
11064 * Maybe re-enter raw-mode and log.
11065 */
11066#ifdef IN_RC
11067 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11068#endif
11069 if (rcStrict != VINF_SUCCESS)
11070 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11071 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11072 return rcStrict;
11073}
11074
11075
11076
11077/**
11078 * Injects a trap, fault, abort, software interrupt or external interrupt.
11079 *
11080 * The parameter list matches TRPMQueryTrapAll pretty closely.
11081 *
11082 * @returns Strict VBox status code.
11083 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11084 * @param u8TrapNo The trap number.
11085 * @param enmType What type is it (trap/fault/abort), software
11086 * interrupt or hardware interrupt.
11087 * @param uErrCode The error code if applicable.
11088 * @param uCr2 The CR2 value if applicable.
11089 * @param cbInstr The instruction length (only relevant for
11090 * software interrupts).
11091 */
11092VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11093 uint8_t cbInstr)
11094{
11095 iemInitDecoder(&pVCpu->iem.s, false);
11096#ifdef DBGFTRACE_ENABLED
11097 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11098 u8TrapNo, enmType, uErrCode, uCr2);
11099#endif
11100
11101 uint32_t fFlags;
11102 switch (enmType)
11103 {
11104 case TRPM_HARDWARE_INT:
11105 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11106 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11107 uErrCode = uCr2 = 0;
11108 break;
11109
11110 case TRPM_SOFTWARE_INT:
11111 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11112 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11113 uErrCode = uCr2 = 0;
11114 break;
11115
11116 case TRPM_TRAP:
11117 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11118 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11119 if (u8TrapNo == X86_XCPT_PF)
11120 fFlags |= IEM_XCPT_FLAGS_CR2;
11121 switch (u8TrapNo)
11122 {
11123 case X86_XCPT_DF:
11124 case X86_XCPT_TS:
11125 case X86_XCPT_NP:
11126 case X86_XCPT_SS:
11127 case X86_XCPT_PF:
11128 case X86_XCPT_AC:
11129 fFlags |= IEM_XCPT_FLAGS_ERR;
11130 break;
11131
11132 case X86_XCPT_NMI:
11133 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11134 break;
11135 }
11136 break;
11137
11138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11139 }
11140
11141 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11142}
11143
11144
11145/**
11146 * Injects the active TRPM event.
11147 *
11148 * @returns Strict VBox status code.
11149 * @param pVCpu The cross context virtual CPU structure.
11150 */
11151VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11152{
11153#ifndef IEM_IMPLEMENTS_TASKSWITCH
11154 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11155#else
11156 uint8_t u8TrapNo;
11157 TRPMEVENT enmType;
11158 RTGCUINT uErrCode;
11159 RTGCUINTPTR uCr2;
11160 uint8_t cbInstr;
11161 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11162 if (RT_FAILURE(rc))
11163 return rc;
11164
11165 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11166
11167 /** @todo Are there any other codes that imply the event was successfully
11168 * delivered to the guest? See @bugref{6607}. */
11169 if ( rcStrict == VINF_SUCCESS
11170 || rcStrict == VINF_IEM_RAISED_XCPT)
11171 {
11172 TRPMResetTrap(pVCpu);
11173 }
11174 return rcStrict;
11175#endif
11176}
11177
11178
11179VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11180{
11181 return VERR_NOT_IMPLEMENTED;
11182}
11183
11184
11185VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11186{
11187 return VERR_NOT_IMPLEMENTED;
11188}
11189
11190
11191#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11192/**
11193 * Executes a IRET instruction with default operand size.
11194 *
11195 * This is for PATM.
11196 *
11197 * @returns VBox status code.
11198 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11199 * @param pCtxCore The register frame.
11200 */
11201VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11202{
11203 PIEMCPU pIemCpu = &pVCpu->iem.s;
11204 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11205
11206 iemCtxCoreToCtx(pCtx, pCtxCore);
11207 iemInitDecoder(pIemCpu);
11208 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11209 if (rcStrict == VINF_SUCCESS)
11210 iemCtxToCtxCore(pCtxCore, pCtx);
11211 else
11212 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11213 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11214 return rcStrict;
11215}
11216#endif
11217
11218
11219/**
11220 * Macro used by the IEMExec* method to check the given instruction length.
11221 *
11222 * Will return on failure!
11223 *
11224 * @param a_cbInstr The given instruction length.
11225 * @param a_cbMin The minimum length.
11226 */
11227#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11228 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11229 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11230
11231
11232/**
11233 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11234 *
11235 * This API ASSUMES that the caller has already verified that the guest code is
11236 * allowed to access the I/O port. (The I/O port is in the DX register in the
11237 * guest state.)
11238 *
11239 * @returns Strict VBox status code.
11240 * @param pVCpu The cross context virtual CPU structure.
11241 * @param cbValue The size of the I/O port access (1, 2, or 4).
11242 * @param enmAddrMode The addressing mode.
11243 * @param fRepPrefix Indicates whether a repeat prefix is used
11244 * (doesn't matter which for this instruction).
11245 * @param cbInstr The instruction length in bytes.
11246 * @param iEffSeg The effective segment address.
11247 */
11248VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11249 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11250{
11251 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11252 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11253
11254 /*
11255 * State init.
11256 */
11257 PIEMCPU pIemCpu = &pVCpu->iem.s;
11258 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11259
11260 /*
11261 * Switch orgy for getting to the right handler.
11262 */
11263 VBOXSTRICTRC rcStrict;
11264 if (fRepPrefix)
11265 {
11266 switch (enmAddrMode)
11267 {
11268 case IEMMODE_16BIT:
11269 switch (cbValue)
11270 {
11271 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11272 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11273 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11274 default:
11275 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11276 }
11277 break;
11278
11279 case IEMMODE_32BIT:
11280 switch (cbValue)
11281 {
11282 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11283 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11284 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11285 default:
11286 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11287 }
11288 break;
11289
11290 case IEMMODE_64BIT:
11291 switch (cbValue)
11292 {
11293 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11294 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11295 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11296 default:
11297 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11298 }
11299 break;
11300
11301 default:
11302 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11303 }
11304 }
11305 else
11306 {
11307 switch (enmAddrMode)
11308 {
11309 case IEMMODE_16BIT:
11310 switch (cbValue)
11311 {
11312 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11313 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11314 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11315 default:
11316 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11317 }
11318 break;
11319
11320 case IEMMODE_32BIT:
11321 switch (cbValue)
11322 {
11323 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11324 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11325 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11326 default:
11327 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11328 }
11329 break;
11330
11331 case IEMMODE_64BIT:
11332 switch (cbValue)
11333 {
11334 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11335 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11336 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11337 default:
11338 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11339 }
11340 break;
11341
11342 default:
11343 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11344 }
11345 }
11346
11347 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11348}
11349
11350
11351/**
11352 * Interface for HM and EM for executing string I/O IN (read) instructions.
11353 *
11354 * This API ASSUMES that the caller has already verified that the guest code is
11355 * allowed to access the I/O port. (The I/O port is in the DX register in the
11356 * guest state.)
11357 *
11358 * @returns Strict VBox status code.
11359 * @param pVCpu The cross context virtual CPU structure.
11360 * @param cbValue The size of the I/O port access (1, 2, or 4).
11361 * @param enmAddrMode The addressing mode.
11362 * @param fRepPrefix Indicates whether a repeat prefix is used
11363 * (doesn't matter which for this instruction).
11364 * @param cbInstr The instruction length in bytes.
11365 */
11366VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11367 bool fRepPrefix, uint8_t cbInstr)
11368{
11369 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11370
11371 /*
11372 * State init.
11373 */
11374 PIEMCPU pIemCpu = &pVCpu->iem.s;
11375 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11376
11377 /*
11378 * Switch orgy for getting to the right handler.
11379 */
11380 VBOXSTRICTRC rcStrict;
11381 if (fRepPrefix)
11382 {
11383 switch (enmAddrMode)
11384 {
11385 case IEMMODE_16BIT:
11386 switch (cbValue)
11387 {
11388 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11389 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11390 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11391 default:
11392 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11393 }
11394 break;
11395
11396 case IEMMODE_32BIT:
11397 switch (cbValue)
11398 {
11399 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11400 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11401 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11402 default:
11403 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11404 }
11405 break;
11406
11407 case IEMMODE_64BIT:
11408 switch (cbValue)
11409 {
11410 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11411 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11412 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11413 default:
11414 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11415 }
11416 break;
11417
11418 default:
11419 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11420 }
11421 }
11422 else
11423 {
11424 switch (enmAddrMode)
11425 {
11426 case IEMMODE_16BIT:
11427 switch (cbValue)
11428 {
11429 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11430 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11431 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11432 default:
11433 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11434 }
11435 break;
11436
11437 case IEMMODE_32BIT:
11438 switch (cbValue)
11439 {
11440 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11441 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11442 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11443 default:
11444 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11445 }
11446 break;
11447
11448 case IEMMODE_64BIT:
11449 switch (cbValue)
11450 {
11451 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11452 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11453 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11454 default:
11455 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11456 }
11457 break;
11458
11459 default:
11460 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11461 }
11462 }
11463
11464 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11465}
11466
11467
11468
11469/**
11470 * Interface for HM and EM to write to a CRx register.
11471 *
11472 * @returns Strict VBox status code.
11473 * @param pVCpu The cross context virtual CPU structure.
11474 * @param cbInstr The instruction length in bytes.
11475 * @param iCrReg The control register number (destination).
11476 * @param iGReg The general purpose register number (source).
11477 *
11478 * @remarks In ring-0 not all of the state needs to be synced in.
11479 */
11480VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11481{
11482 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11483 Assert(iCrReg < 16);
11484 Assert(iGReg < 16);
11485
11486 PIEMCPU pIemCpu = &pVCpu->iem.s;
11487 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11488 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11489 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11490}
11491
11492
11493/**
11494 * Interface for HM and EM to read from a CRx register.
11495 *
11496 * @returns Strict VBox status code.
11497 * @param pVCpu The cross context virtual CPU structure.
11498 * @param cbInstr The instruction length in bytes.
11499 * @param iGReg The general purpose register number (destination).
11500 * @param iCrReg The control register number (source).
11501 *
11502 * @remarks In ring-0 not all of the state needs to be synced in.
11503 */
11504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11505{
11506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11507 Assert(iCrReg < 16);
11508 Assert(iGReg < 16);
11509
11510 PIEMCPU pIemCpu = &pVCpu->iem.s;
11511 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11512 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11513 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11514}
11515
11516
11517/**
11518 * Interface for HM and EM to clear the CR0[TS] bit.
11519 *
11520 * @returns Strict VBox status code.
11521 * @param pVCpu The cross context virtual CPU structure.
11522 * @param cbInstr The instruction length in bytes.
11523 *
11524 * @remarks In ring-0 not all of the state needs to be synced in.
11525 */
11526VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11527{
11528 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11529
11530 PIEMCPU pIemCpu = &pVCpu->iem.s;
11531 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11532 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11533 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11534}
11535
11536
11537/**
11538 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11539 *
11540 * @returns Strict VBox status code.
11541 * @param pVCpu The cross context virtual CPU structure.
11542 * @param cbInstr The instruction length in bytes.
11543 * @param uValue The value to load into CR0.
11544 *
11545 * @remarks In ring-0 not all of the state needs to be synced in.
11546 */
11547VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11548{
11549 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11550
11551 PIEMCPU pIemCpu = &pVCpu->iem.s;
11552 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11553 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11554 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11555}
11556
11557
11558/**
11559 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11560 *
11561 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11562 *
11563 * @returns Strict VBox status code.
11564 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11565 * @param cbInstr The instruction length in bytes.
11566 * @remarks In ring-0 not all of the state needs to be synced in.
11567 * @thread EMT(pVCpu)
11568 */
11569VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11570{
11571 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11572
11573 PIEMCPU pIemCpu = &pVCpu->iem.s;
11574 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11575 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11576 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11577}
11578
11579#ifdef IN_RING3
11580
11581/**
11582 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11583 *
11584 * @returns Merge between @a rcStrict and what the commit operation returned.
11585 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11586 * @param rcStrict The status code returned by ring-0 or raw-mode.
11587 */
11588VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11589{
11590 PIEMCPU pIemCpu = &pVCpu->iem.s;
11591
11592 /*
11593 * Retrieve and reset the pending commit.
11594 */
11595 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11596 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11597 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11598
11599 /*
11600 * Must reset pass-up status code.
11601 */
11602 pIemCpu->rcPassUp = VINF_SUCCESS;
11603
11604 /*
11605 * Call the function. Currently using switch here instead of function
11606 * pointer table as a switch won't get skewed.
11607 */
11608 VBOXSTRICTRC rcStrictCommit;
11609 switch (enmFn)
11610 {
11611 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11612 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11613 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11614 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11615 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11616 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11617 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11618 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11619 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11620 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11621 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11622 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11623 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11624 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11625 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11626 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11627 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11628 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11629 default:
11630 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11631 }
11632
11633 /*
11634 * Merge status code (if any) with the incomming one.
11635 */
11636 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11637 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11638 return rcStrict;
11639 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11640 return rcStrictCommit;
11641
11642 /* Complicated. */
11643 if (RT_FAILURE(rcStrict))
11644 return rcStrict;
11645 if (RT_FAILURE(rcStrictCommit))
11646 return rcStrictCommit;
11647 if ( rcStrict >= VINF_EM_FIRST
11648 && rcStrict <= VINF_EM_LAST)
11649 {
11650 if ( rcStrictCommit >= VINF_EM_FIRST
11651 && rcStrictCommit <= VINF_EM_LAST)
11652 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11653
11654 /* This really shouldn't happen. Check PGM + handler code! */
11655 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11656 }
11657 /* This shouldn't really happen either, see IOM_SUCCESS. */
11658 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11659}
11660
11661#endif /* IN_RING */
11662
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette