VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 60716

Last change on this file since 60716 was 60671, checked in by vboxsync, 9 years ago

IEM: Made SIDT+SGDT store 0xff in high base byte when emulating a 286.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 450.2 KB
Line 
1/* $Id: IEMAll.cpp 60671 2016-04-23 00:07:17Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 *
72 */
73
74/** @def IEM_VERIFICATION_MODE_MINIMAL
75 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
76 * context. */
77#if defined(DOXYGEN_RUNNING)
78# define IEM_VERIFICATION_MODE_MINIMAL
79#endif
80//#define IEM_LOG_MEMORY_WRITES
81#define IEM_IMPLEMENTS_TASKSWITCH
82
83
84/*********************************************************************************************************************************
85* Header Files *
86*********************************************************************************************************************************/
87#define LOG_GROUP LOG_GROUP_IEM
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <internal/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/tm.h>
97#include <VBox/vmm/dbgf.h>
98#include <VBox/vmm/dbgftrace.h>
99#ifdef VBOX_WITH_RAW_MODE_NOT_R0
100# include <VBox/vmm/patm.h>
101# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
102# include <VBox/vmm/csam.h>
103# endif
104#endif
105#include "IEMInternal.h"
106#ifdef IEM_VERIFICATION_MODE_FULL
107# include <VBox/vmm/rem.h>
108# include <VBox/vmm/mm.h>
109#endif
110#include <VBox/vmm/vm.h>
111#include <VBox/log.h>
112#include <VBox/err.h>
113#include <VBox/param.h>
114#include <VBox/dis.h>
115#include <VBox/disopcode.h>
116#include <iprt/assert.h>
117#include <iprt/string.h>
118#include <iprt/x86.h>
119
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/** @typedef PFNIEMOP
126 * Pointer to an opcode decoder function.
127 */
128
129/** @def FNIEMOP_DEF
130 * Define an opcode decoder function.
131 *
132 * We're using macors for this so that adding and removing parameters as well as
133 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
134 *
135 * @param a_Name The function name.
136 */
137
138
139#if defined(__GNUC__) && defined(RT_ARCH_X86)
140typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
141# define FNIEMOP_DEF(a_Name) \
142 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
143# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
144 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
145# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
146 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
147
148#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
149typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
150# define FNIEMOP_DEF(a_Name) \
151 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
152# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
153 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
154# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
155 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
156
157#elif defined(__GNUC__)
158typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
159# define FNIEMOP_DEF(a_Name) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
161# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
163# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
165
166#else
167typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#endif
176
177
178/**
179 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
180 */
181typedef union IEMSELDESC
182{
183 /** The legacy view. */
184 X86DESC Legacy;
185 /** The long mode view. */
186 X86DESC64 Long;
187} IEMSELDESC;
188/** Pointer to a selector descriptor table entry. */
189typedef IEMSELDESC *PIEMSELDESC;
190
191
192/*********************************************************************************************************************************
193* Defined Constants And Macros *
194*********************************************************************************************************************************/
195/** Temporary hack to disable the double execution. Will be removed in favor
196 * of a dedicated execution mode in EM. */
197//#define IEM_VERIFICATION_MODE_NO_REM
198
199/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
200 * due to GCC lacking knowledge about the value range of a switch. */
201#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
202
203/**
204 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
205 * occation.
206 */
207#ifdef LOG_ENABLED
208# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
209 do { \
210 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
211 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
212 } while (0)
213#else
214# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
216#endif
217
218/**
219 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
220 * occation using the supplied logger statement.
221 *
222 * @param a_LoggerArgs What to log on failure.
223 */
224#ifdef LOG_ENABLED
225# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
226 do { \
227 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
228 /*LogFunc(a_LoggerArgs);*/ \
229 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
230 } while (0)
231#else
232# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
233 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
234#endif
235
236/**
237 * Call an opcode decoder function.
238 *
239 * We're using macors for this so that adding and removing parameters can be
240 * done as we please. See FNIEMOP_DEF.
241 */
242#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
243
244/**
245 * Call a common opcode decoder function taking one extra argument.
246 *
247 * We're using macors for this so that adding and removing parameters can be
248 * done as we please. See FNIEMOP_DEF_1.
249 */
250#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
251
252/**
253 * Call a common opcode decoder function taking one extra argument.
254 *
255 * We're using macors for this so that adding and removing parameters can be
256 * done as we please. See FNIEMOP_DEF_1.
257 */
258#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
259
260/**
261 * Check if we're currently executing in real or virtual 8086 mode.
262 *
263 * @returns @c true if it is, @c false if not.
264 * @param a_pIemCpu The IEM state of the current CPU.
265 */
266#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
267
268/**
269 * Check if we're currently executing in virtual 8086 mode.
270 *
271 * @returns @c true if it is, @c false if not.
272 * @param a_pIemCpu The IEM state of the current CPU.
273 */
274#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
275
276/**
277 * Check if we're currently executing in long mode.
278 *
279 * @returns @c true if it is, @c false if not.
280 * @param a_pIemCpu The IEM state of the current CPU.
281 */
282#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
283
284/**
285 * Check if we're currently executing in real mode.
286 *
287 * @returns @c true if it is, @c false if not.
288 * @param a_pIemCpu The IEM state of the current CPU.
289 */
290#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
291
292/**
293 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
294 * @returns PCCPUMFEATURES
295 * @param a_pIemCpu The IEM state of the current CPU.
296 */
297#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
298
299/**
300 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
301 * @returns PCCPUMFEATURES
302 * @param a_pIemCpu The IEM state of the current CPU.
303 */
304#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
305
306/**
307 * Evaluates to true if we're presenting an Intel CPU to the guest.
308 */
309#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
310
311/**
312 * Evaluates to true if we're presenting an AMD CPU to the guest.
313 */
314#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
315
316/**
317 * Check if the address is canonical.
318 */
319#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
320
321
322/*********************************************************************************************************************************
323* Global Variables *
324*********************************************************************************************************************************/
325extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
326
327
328/** Function table for the ADD instruction. */
329IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
330{
331 iemAImpl_add_u8, iemAImpl_add_u8_locked,
332 iemAImpl_add_u16, iemAImpl_add_u16_locked,
333 iemAImpl_add_u32, iemAImpl_add_u32_locked,
334 iemAImpl_add_u64, iemAImpl_add_u64_locked
335};
336
337/** Function table for the ADC instruction. */
338IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
339{
340 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
341 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
342 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
343 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
344};
345
346/** Function table for the SUB instruction. */
347IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
348{
349 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
350 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
351 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
352 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
353};
354
355/** Function table for the SBB instruction. */
356IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
357{
358 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
359 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
360 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
361 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
362};
363
364/** Function table for the OR instruction. */
365IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
366{
367 iemAImpl_or_u8, iemAImpl_or_u8_locked,
368 iemAImpl_or_u16, iemAImpl_or_u16_locked,
369 iemAImpl_or_u32, iemAImpl_or_u32_locked,
370 iemAImpl_or_u64, iemAImpl_or_u64_locked
371};
372
373/** Function table for the XOR instruction. */
374IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
375{
376 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
377 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
378 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
379 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
380};
381
382/** Function table for the AND instruction. */
383IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
384{
385 iemAImpl_and_u8, iemAImpl_and_u8_locked,
386 iemAImpl_and_u16, iemAImpl_and_u16_locked,
387 iemAImpl_and_u32, iemAImpl_and_u32_locked,
388 iemAImpl_and_u64, iemAImpl_and_u64_locked
389};
390
391/** Function table for the CMP instruction.
392 * @remarks Making operand order ASSUMPTIONS.
393 */
394IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
395{
396 iemAImpl_cmp_u8, NULL,
397 iemAImpl_cmp_u16, NULL,
398 iemAImpl_cmp_u32, NULL,
399 iemAImpl_cmp_u64, NULL
400};
401
402/** Function table for the TEST instruction.
403 * @remarks Making operand order ASSUMPTIONS.
404 */
405IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
406{
407 iemAImpl_test_u8, NULL,
408 iemAImpl_test_u16, NULL,
409 iemAImpl_test_u32, NULL,
410 iemAImpl_test_u64, NULL
411};
412
413/** Function table for the BT instruction. */
414IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
415{
416 NULL, NULL,
417 iemAImpl_bt_u16, NULL,
418 iemAImpl_bt_u32, NULL,
419 iemAImpl_bt_u64, NULL
420};
421
422/** Function table for the BTC instruction. */
423IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
424{
425 NULL, NULL,
426 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
427 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
428 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
429};
430
431/** Function table for the BTR instruction. */
432IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
433{
434 NULL, NULL,
435 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
436 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
437 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
438};
439
440/** Function table for the BTS instruction. */
441IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
442{
443 NULL, NULL,
444 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
445 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
446 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
447};
448
449/** Function table for the BSF instruction. */
450IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
451{
452 NULL, NULL,
453 iemAImpl_bsf_u16, NULL,
454 iemAImpl_bsf_u32, NULL,
455 iemAImpl_bsf_u64, NULL
456};
457
458/** Function table for the BSR instruction. */
459IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
460{
461 NULL, NULL,
462 iemAImpl_bsr_u16, NULL,
463 iemAImpl_bsr_u32, NULL,
464 iemAImpl_bsr_u64, NULL
465};
466
467/** Function table for the IMUL instruction. */
468IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
469{
470 NULL, NULL,
471 iemAImpl_imul_two_u16, NULL,
472 iemAImpl_imul_two_u32, NULL,
473 iemAImpl_imul_two_u64, NULL
474};
475
476/** Group 1 /r lookup table. */
477IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
478{
479 &g_iemAImpl_add,
480 &g_iemAImpl_or,
481 &g_iemAImpl_adc,
482 &g_iemAImpl_sbb,
483 &g_iemAImpl_and,
484 &g_iemAImpl_sub,
485 &g_iemAImpl_xor,
486 &g_iemAImpl_cmp
487};
488
489/** Function table for the INC instruction. */
490IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
491{
492 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
493 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
494 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
495 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
496};
497
498/** Function table for the DEC instruction. */
499IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
500{
501 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
502 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
503 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
504 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
505};
506
507/** Function table for the NEG instruction. */
508IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
509{
510 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
511 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
512 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
513 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
514};
515
516/** Function table for the NOT instruction. */
517IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
518{
519 iemAImpl_not_u8, iemAImpl_not_u8_locked,
520 iemAImpl_not_u16, iemAImpl_not_u16_locked,
521 iemAImpl_not_u32, iemAImpl_not_u32_locked,
522 iemAImpl_not_u64, iemAImpl_not_u64_locked
523};
524
525
526/** Function table for the ROL instruction. */
527IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
528{
529 iemAImpl_rol_u8,
530 iemAImpl_rol_u16,
531 iemAImpl_rol_u32,
532 iemAImpl_rol_u64
533};
534
535/** Function table for the ROR instruction. */
536IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
537{
538 iemAImpl_ror_u8,
539 iemAImpl_ror_u16,
540 iemAImpl_ror_u32,
541 iemAImpl_ror_u64
542};
543
544/** Function table for the RCL instruction. */
545IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
546{
547 iemAImpl_rcl_u8,
548 iemAImpl_rcl_u16,
549 iemAImpl_rcl_u32,
550 iemAImpl_rcl_u64
551};
552
553/** Function table for the RCR instruction. */
554IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
555{
556 iemAImpl_rcr_u8,
557 iemAImpl_rcr_u16,
558 iemAImpl_rcr_u32,
559 iemAImpl_rcr_u64
560};
561
562/** Function table for the SHL instruction. */
563IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
564{
565 iemAImpl_shl_u8,
566 iemAImpl_shl_u16,
567 iemAImpl_shl_u32,
568 iemAImpl_shl_u64
569};
570
571/** Function table for the SHR instruction. */
572IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
573{
574 iemAImpl_shr_u8,
575 iemAImpl_shr_u16,
576 iemAImpl_shr_u32,
577 iemAImpl_shr_u64
578};
579
580/** Function table for the SAR instruction. */
581IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
582{
583 iemAImpl_sar_u8,
584 iemAImpl_sar_u16,
585 iemAImpl_sar_u32,
586 iemAImpl_sar_u64
587};
588
589
590/** Function table for the MUL instruction. */
591IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
592{
593 iemAImpl_mul_u8,
594 iemAImpl_mul_u16,
595 iemAImpl_mul_u32,
596 iemAImpl_mul_u64
597};
598
599/** Function table for the IMUL instruction working implicitly on rAX. */
600IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
601{
602 iemAImpl_imul_u8,
603 iemAImpl_imul_u16,
604 iemAImpl_imul_u32,
605 iemAImpl_imul_u64
606};
607
608/** Function table for the DIV instruction. */
609IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
610{
611 iemAImpl_div_u8,
612 iemAImpl_div_u16,
613 iemAImpl_div_u32,
614 iemAImpl_div_u64
615};
616
617/** Function table for the MUL instruction. */
618IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
619{
620 iemAImpl_idiv_u8,
621 iemAImpl_idiv_u16,
622 iemAImpl_idiv_u32,
623 iemAImpl_idiv_u64
624};
625
626/** Function table for the SHLD instruction */
627IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
628{
629 iemAImpl_shld_u16,
630 iemAImpl_shld_u32,
631 iemAImpl_shld_u64,
632};
633
634/** Function table for the SHRD instruction */
635IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
636{
637 iemAImpl_shrd_u16,
638 iemAImpl_shrd_u32,
639 iemAImpl_shrd_u64,
640};
641
642
643/** Function table for the PUNPCKLBW instruction */
644IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
645/** Function table for the PUNPCKLBD instruction */
646IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
647/** Function table for the PUNPCKLDQ instruction */
648IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
649/** Function table for the PUNPCKLQDQ instruction */
650IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
651
652/** Function table for the PUNPCKHBW instruction */
653IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
654/** Function table for the PUNPCKHBD instruction */
655IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
656/** Function table for the PUNPCKHDQ instruction */
657IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
658/** Function table for the PUNPCKHQDQ instruction */
659IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
660
661/** Function table for the PXOR instruction */
662IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
663/** Function table for the PCMPEQB instruction */
664IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
665/** Function table for the PCMPEQW instruction */
666IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
667/** Function table for the PCMPEQD instruction */
668IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
669
670
671#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
672/** What IEM just wrote. */
673uint8_t g_abIemWrote[256];
674/** How much IEM just wrote. */
675size_t g_cbIemWrote;
676#endif
677
678
679/*********************************************************************************************************************************
680* Internal Functions *
681*********************************************************************************************************************************/
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
685IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
686/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
688IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
690IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
693IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
696IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
697IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
698IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
699IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
708IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
712IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
713IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
714IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
715
716#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
717IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
718#endif
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
720IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
721
722
723
724/**
725 * Sets the pass up status.
726 *
727 * @returns VINF_SUCCESS.
728 * @param pIemCpu The per CPU IEM state of the calling thread.
729 * @param rcPassUp The pass up status. Must be informational.
730 * VINF_SUCCESS is not allowed.
731 */
732IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
733{
734 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
735
736 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
737 if (rcOldPassUp == VINF_SUCCESS)
738 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
739 /* If both are EM scheduling codes, use EM priority rules. */
740 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
741 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
742 {
743 if (rcPassUp < rcOldPassUp)
744 {
745 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
746 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
747 }
748 else
749 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
750 }
751 /* Override EM scheduling with specific status code. */
752 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
753 {
754 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
755 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
756 }
757 /* Don't override specific status code, first come first served. */
758 else
759 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
760 return VINF_SUCCESS;
761}
762
763
764/**
765 * Calculates the CPU mode.
766 *
767 * This is mainly for updating IEMCPU::enmCpuMode.
768 *
769 * @returns CPU mode.
770 * @param pCtx The register context for the CPU.
771 */
772DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
773{
774 if (CPUMIsGuestIn64BitCodeEx(pCtx))
775 return IEMMODE_64BIT;
776 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
777 return IEMMODE_32BIT;
778 return IEMMODE_16BIT;
779}
780
781
782/**
783 * Initializes the execution state.
784 *
785 * @param pIemCpu The per CPU IEM state.
786 * @param fBypassHandlers Whether to bypass access handlers.
787 *
788 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
789 * side-effects in strict builds.
790 */
791DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
792{
793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
794 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
795
796 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
797 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
798
799#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
800 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
801 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
802 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
805 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
807 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
808#endif
809
810#ifdef VBOX_WITH_RAW_MODE_NOT_R0
811 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
812#endif
813 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
814 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
815#ifdef VBOX_STRICT
816 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
817 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
818 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
819 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
820 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
821 pIemCpu->uRexReg = 127;
822 pIemCpu->uRexB = 127;
823 pIemCpu->uRexIndex = 127;
824 pIemCpu->iEffSeg = 127;
825 pIemCpu->offOpcode = 127;
826 pIemCpu->cbOpcode = 127;
827#endif
828
829 pIemCpu->cActiveMappings = 0;
830 pIemCpu->iNextMapping = 0;
831 pIemCpu->rcPassUp = VINF_SUCCESS;
832 pIemCpu->fBypassHandlers = fBypassHandlers;
833#ifdef VBOX_WITH_RAW_MODE_NOT_R0
834 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
835 && pCtx->cs.u64Base == 0
836 && pCtx->cs.u32Limit == UINT32_MAX
837 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
838 if (!pIemCpu->fInPatchCode)
839 CPUMRawLeave(pVCpu, VINF_SUCCESS);
840#endif
841
842#ifdef IEM_VERIFICATION_MODE_FULL
843 pIemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;
844 pIemCpu->fNoRem = true;
845#endif
846}
847
848
849/**
850 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
851 *
852 * @param pIemCpu The per CPU IEM state.
853 */
854DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu)
855{
856#ifdef IEM_VERIFICATION_MODE_FULL
857 pIemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;
858#endif
859#ifdef VBOX_STRICT
860 pIemCpu->cbOpcode = 0;
861#else
862 NOREF(pIemCpu);
863#endif
864}
865
866
867/**
868 * Initializes the decoder state.
869 *
870 * @param pIemCpu The per CPU IEM state.
871 * @param fBypassHandlers Whether to bypass access handlers.
872 */
873DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
877
878 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
879 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
880
881#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
882 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
885 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
886 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
888 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
889 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
890#endif
891
892#ifdef VBOX_WITH_RAW_MODE_NOT_R0
893 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
894#endif
895 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
896#ifdef IEM_VERIFICATION_MODE_FULL
897 if (pIemCpu->uInjectCpl != UINT8_MAX)
898 pIemCpu->uCpl = pIemCpu->uInjectCpl;
899#endif
900 IEMMODE enmMode = iemCalcCpuMode(pCtx);
901 pIemCpu->enmCpuMode = enmMode;
902 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
903 pIemCpu->enmEffAddrMode = enmMode;
904 if (enmMode != IEMMODE_64BIT)
905 {
906 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
907 pIemCpu->enmEffOpSize = enmMode;
908 }
909 else
910 {
911 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
912 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
913 }
914 pIemCpu->fPrefixes = 0;
915 pIemCpu->uRexReg = 0;
916 pIemCpu->uRexB = 0;
917 pIemCpu->uRexIndex = 0;
918 pIemCpu->iEffSeg = X86_SREG_DS;
919 pIemCpu->offOpcode = 0;
920 pIemCpu->cbOpcode = 0;
921 pIemCpu->cActiveMappings = 0;
922 pIemCpu->iNextMapping = 0;
923 pIemCpu->rcPassUp = VINF_SUCCESS;
924 pIemCpu->fBypassHandlers = fBypassHandlers;
925#ifdef VBOX_WITH_RAW_MODE_NOT_R0
926 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
927 && pCtx->cs.u64Base == 0
928 && pCtx->cs.u32Limit == UINT32_MAX
929 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
930 if (!pIemCpu->fInPatchCode)
931 CPUMRawLeave(pVCpu, VINF_SUCCESS);
932#endif
933
934#ifdef DBGFTRACE_ENABLED
935 switch (enmMode)
936 {
937 case IEMMODE_64BIT:
938 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
939 break;
940 case IEMMODE_32BIT:
941 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
942 break;
943 case IEMMODE_16BIT:
944 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
945 break;
946 }
947#endif
948}
949
950
951/**
952 * Prefetch opcodes the first time when starting executing.
953 *
954 * @returns Strict VBox status code.
955 * @param pIemCpu The IEM state.
956 * @param fBypassHandlers Whether to bypass access handlers.
957 */
958IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
959{
960#ifdef IEM_VERIFICATION_MODE_FULL
961 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
962#endif
963 iemInitDecoder(pIemCpu, fBypassHandlers);
964
965 /*
966 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
967 *
968 * First translate CS:rIP to a physical address.
969 */
970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
971 uint32_t cbToTryRead;
972 RTGCPTR GCPtrPC;
973 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
974 {
975 cbToTryRead = PAGE_SIZE;
976 GCPtrPC = pCtx->rip;
977 if (!IEM_IS_CANONICAL(GCPtrPC))
978 return iemRaiseGeneralProtectionFault0(pIemCpu);
979 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
980 }
981 else
982 {
983 uint32_t GCPtrPC32 = pCtx->eip;
984 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
985 if (GCPtrPC32 > pCtx->cs.u32Limit)
986 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
987 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
988 if (!cbToTryRead) /* overflowed */
989 {
990 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
991 cbToTryRead = UINT32_MAX;
992 }
993 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
994 Assert(GCPtrPC <= UINT32_MAX);
995 }
996
997#ifdef VBOX_WITH_RAW_MODE_NOT_R0
998 /* Allow interpretation of patch manager code blocks since they can for
999 instance throw #PFs for perfectly good reasons. */
1000 if (pIemCpu->fInPatchCode)
1001 {
1002 size_t cbRead = 0;
1003 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
1004 AssertRCReturn(rc, rc);
1005 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1006 return VINF_SUCCESS;
1007 }
1008#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1009
1010 RTGCPHYS GCPhys;
1011 uint64_t fFlags;
1012 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
1013 if (RT_FAILURE(rc))
1014 {
1015 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1016 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1017 }
1018 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1019 {
1020 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1021 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1022 }
1023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1024 {
1025 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1026 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1029 /** @todo Check reserved bits and such stuff. PGM is better at doing
1030 * that, so do it when implementing the guest virtual address
1031 * TLB... */
1032
1033#ifdef IEM_VERIFICATION_MODE_FULL
1034 /*
1035 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1036 * instruction.
1037 */
1038 /** @todo optimize this differently by not using PGMPhysRead. */
1039 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1040 pIemCpu->GCPhysOpcodes = GCPhys;
1041 if ( offPrevOpcodes < cbOldOpcodes
1042 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1043 {
1044 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1045 Assert(cbNew <= RT_ELEMENTS(pIemCpu->abOpcode));
1046 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1047 pIemCpu->cbOpcode = cbNew;
1048 return VINF_SUCCESS;
1049 }
1050#endif
1051
1052 /*
1053 * Read the bytes at this address.
1054 */
1055 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1056#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1057 size_t cbActual;
1058 if ( PATMIsEnabled(pVM)
1059 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1060 {
1061 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1062 Assert(cbActual > 0);
1063 pIemCpu->cbOpcode = (uint8_t)cbActual;
1064 }
1065 else
1066#endif
1067 {
1068 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1069 if (cbToTryRead > cbLeftOnPage)
1070 cbToTryRead = cbLeftOnPage;
1071 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1072 cbToTryRead = sizeof(pIemCpu->abOpcode);
1073
1074 if (!pIemCpu->fBypassHandlers)
1075 {
1076 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1077 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1078 { /* likely */ }
1079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1080 {
1081 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1082 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1083 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1091 return rcStrict;
1092 }
1093 }
1094 else
1095 {
1096 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1097 if (RT_SUCCESS(rc))
1098 { /* likely */ }
1099 else
1100 {
1101 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1102 GCPtrPC, GCPhys, rc, cbToTryRead));
1103 return rc;
1104 }
1105 }
1106 pIemCpu->cbOpcode = cbToTryRead;
1107 }
1108
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1115 * exception if it fails.
1116 *
1117 * @returns Strict VBox status code.
1118 * @param pIemCpu The IEM state.
1119 * @param cbMin The minimum number of bytes relative offOpcode
1120 * that must be read.
1121 */
1122IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1123{
1124 /*
1125 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1126 *
1127 * First translate CS:rIP to a physical address.
1128 */
1129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1130 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1131 uint32_t cbToTryRead;
1132 RTGCPTR GCPtrNext;
1133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1134 {
1135 cbToTryRead = PAGE_SIZE;
1136 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1137 if (!IEM_IS_CANONICAL(GCPtrNext))
1138 return iemRaiseGeneralProtectionFault0(pIemCpu);
1139 }
1140 else
1141 {
1142 uint32_t GCPtrNext32 = pCtx->eip;
1143 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1144 GCPtrNext32 += pIemCpu->cbOpcode;
1145 if (GCPtrNext32 > pCtx->cs.u32Limit)
1146 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1147 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1148 if (!cbToTryRead) /* overflowed */
1149 {
1150 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1151 cbToTryRead = UINT32_MAX;
1152 /** @todo check out wrapping around the code segment. */
1153 }
1154 if (cbToTryRead < cbMin - cbLeft)
1155 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1156 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1157 }
1158
1159 /* Only read up to the end of the page, and make sure we don't read more
1160 than the opcode buffer can hold. */
1161 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1162 if (cbToTryRead > cbLeftOnPage)
1163 cbToTryRead = cbLeftOnPage;
1164 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1165 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1166/** @todo r=bird: Convert assertion into undefined opcode exception? */
1167 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1168
1169#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1170 /* Allow interpretation of patch manager code blocks since they can for
1171 instance throw #PFs for perfectly good reasons. */
1172 if (pIemCpu->fInPatchCode)
1173 {
1174 size_t cbRead = 0;
1175 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1176 AssertRCReturn(rc, rc);
1177 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1178 return VINF_SUCCESS;
1179 }
1180#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1181
1182 RTGCPHYS GCPhys;
1183 uint64_t fFlags;
1184 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1185 if (RT_FAILURE(rc))
1186 {
1187 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1188 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1189 }
1190 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1191 {
1192 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1193 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1194 }
1195 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1196 {
1197 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1198 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1199 }
1200 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1201 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1202 /** @todo Check reserved bits and such stuff. PGM is better at doing
1203 * that, so do it when implementing the guest virtual address
1204 * TLB... */
1205
1206 /*
1207 * Read the bytes at this address.
1208 *
1209 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1210 * and since PATM should only patch the start of an instruction there
1211 * should be no need to check again here.
1212 */
1213 if (!pIemCpu->fBypassHandlers)
1214 {
1215 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1216 cbToTryRead, PGMACCESSORIGIN_IEM);
1217 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1218 { /* likely */ }
1219 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1220 {
1221 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1222 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1223 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1224 }
1225 else
1226 {
1227 Log((RT_SUCCESS(rcStrict)
1228 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1229 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1230 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1231 return rcStrict;
1232 }
1233 }
1234 else
1235 {
1236 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1237 if (RT_SUCCESS(rc))
1238 { /* likely */ }
1239 else
1240 {
1241 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1242 return rc;
1243 }
1244 }
1245 pIemCpu->cbOpcode += cbToTryRead;
1246 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1247
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pIemCpu The IEM state.
1257 * @param pb Where to return the opcode byte.
1258 */
1259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1260{
1261 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1262 if (rcStrict == VINF_SUCCESS)
1263 {
1264 uint8_t offOpcode = pIemCpu->offOpcode;
1265 *pb = pIemCpu->abOpcode[offOpcode];
1266 pIemCpu->offOpcode = offOpcode + 1;
1267 }
1268 else
1269 *pb = 0;
1270 return rcStrict;
1271}
1272
1273
1274/**
1275 * Fetches the next opcode byte.
1276 *
1277 * @returns Strict VBox status code.
1278 * @param pIemCpu The IEM state.
1279 * @param pu8 Where to return the opcode byte.
1280 */
1281DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1282{
1283 uint8_t const offOpcode = pIemCpu->offOpcode;
1284 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1285 {
1286 *pu8 = pIemCpu->abOpcode[offOpcode];
1287 pIemCpu->offOpcode = offOpcode + 1;
1288 return VINF_SUCCESS;
1289 }
1290 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1291}
1292
1293
1294/**
1295 * Fetches the next opcode byte, returns automatically on failure.
1296 *
1297 * @param a_pu8 Where to return the opcode byte.
1298 * @remark Implicitly references pIemCpu.
1299 */
1300#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1301 do \
1302 { \
1303 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1304 if (rcStrict2 != VINF_SUCCESS) \
1305 return rcStrict2; \
1306 } while (0)
1307
1308
1309/**
1310 * Fetches the next signed byte from the opcode stream.
1311 *
1312 * @returns Strict VBox status code.
1313 * @param pIemCpu The IEM state.
1314 * @param pi8 Where to return the signed byte.
1315 */
1316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1317{
1318 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1319}
1320
1321
1322/**
1323 * Fetches the next signed byte from the opcode stream, returning automatically
1324 * on failure.
1325 *
1326 * @param a_pi8 Where to return the signed byte.
1327 * @remark Implicitly references pIemCpu.
1328 */
1329#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1330 do \
1331 { \
1332 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1333 if (rcStrict2 != VINF_SUCCESS) \
1334 return rcStrict2; \
1335 } while (0)
1336
1337
1338/**
1339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pIemCpu The IEM state.
1343 * @param pu16 Where to return the opcode dword.
1344 */
1345DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1346{
1347 uint8_t u8;
1348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1349 if (rcStrict == VINF_SUCCESS)
1350 *pu16 = (int8_t)u8;
1351 return rcStrict;
1352}
1353
1354
1355/**
1356 * Fetches the next signed byte from the opcode stream, extending it to
1357 * unsigned 16-bit.
1358 *
1359 * @returns Strict VBox status code.
1360 * @param pIemCpu The IEM state.
1361 * @param pu16 Where to return the unsigned word.
1362 */
1363DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1364{
1365 uint8_t const offOpcode = pIemCpu->offOpcode;
1366 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1367 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1368
1369 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1370 pIemCpu->offOpcode = offOpcode + 1;
1371 return VINF_SUCCESS;
1372}
1373
1374
1375/**
1376 * Fetches the next signed byte from the opcode stream and sign-extending it to
1377 * a word, returning automatically on failure.
1378 *
1379 * @param a_pu16 Where to return the word.
1380 * @remark Implicitly references pIemCpu.
1381 */
1382#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1383 do \
1384 { \
1385 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1386 if (rcStrict2 != VINF_SUCCESS) \
1387 return rcStrict2; \
1388 } while (0)
1389
1390
1391/**
1392 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1393 *
1394 * @returns Strict VBox status code.
1395 * @param pIemCpu The IEM state.
1396 * @param pu32 Where to return the opcode dword.
1397 */
1398DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1399{
1400 uint8_t u8;
1401 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1402 if (rcStrict == VINF_SUCCESS)
1403 *pu32 = (int8_t)u8;
1404 return rcStrict;
1405}
1406
1407
1408/**
1409 * Fetches the next signed byte from the opcode stream, extending it to
1410 * unsigned 32-bit.
1411 *
1412 * @returns Strict VBox status code.
1413 * @param pIemCpu The IEM state.
1414 * @param pu32 Where to return the unsigned dword.
1415 */
1416DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1417{
1418 uint8_t const offOpcode = pIemCpu->offOpcode;
1419 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1420 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1421
1422 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1423 pIemCpu->offOpcode = offOpcode + 1;
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Fetches the next signed byte from the opcode stream and sign-extending it to
1430 * a word, returning automatically on failure.
1431 *
1432 * @param a_pu32 Where to return the word.
1433 * @remark Implicitly references pIemCpu.
1434 */
1435#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1436 do \
1437 { \
1438 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1439 if (rcStrict2 != VINF_SUCCESS) \
1440 return rcStrict2; \
1441 } while (0)
1442
1443
1444/**
1445 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1446 *
1447 * @returns Strict VBox status code.
1448 * @param pIemCpu The IEM state.
1449 * @param pu64 Where to return the opcode qword.
1450 */
1451DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1452{
1453 uint8_t u8;
1454 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1455 if (rcStrict == VINF_SUCCESS)
1456 *pu64 = (int8_t)u8;
1457 return rcStrict;
1458}
1459
1460
1461/**
1462 * Fetches the next signed byte from the opcode stream, extending it to
1463 * unsigned 64-bit.
1464 *
1465 * @returns Strict VBox status code.
1466 * @param pIemCpu The IEM state.
1467 * @param pu64 Where to return the unsigned qword.
1468 */
1469DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1470{
1471 uint8_t const offOpcode = pIemCpu->offOpcode;
1472 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1473 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1474
1475 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1476 pIemCpu->offOpcode = offOpcode + 1;
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Fetches the next signed byte from the opcode stream and sign-extending it to
1483 * a word, returning automatically on failure.
1484 *
1485 * @param a_pu64 Where to return the word.
1486 * @remark Implicitly references pIemCpu.
1487 */
1488#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1489 do \
1490 { \
1491 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1492 if (rcStrict2 != VINF_SUCCESS) \
1493 return rcStrict2; \
1494 } while (0)
1495
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pIemCpu The IEM state.
1502 * @param pu16 Where to return the opcode word.
1503 */
1504DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1505{
1506 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1507 if (rcStrict == VINF_SUCCESS)
1508 {
1509 uint8_t offOpcode = pIemCpu->offOpcode;
1510 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1511 pIemCpu->offOpcode = offOpcode + 2;
1512 }
1513 else
1514 *pu16 = 0;
1515 return rcStrict;
1516}
1517
1518
1519/**
1520 * Fetches the next opcode word.
1521 *
1522 * @returns Strict VBox status code.
1523 * @param pIemCpu The IEM state.
1524 * @param pu16 Where to return the opcode word.
1525 */
1526DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1527{
1528 uint8_t const offOpcode = pIemCpu->offOpcode;
1529 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1530 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1531
1532 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1533 pIemCpu->offOpcode = offOpcode + 2;
1534 return VINF_SUCCESS;
1535}
1536
1537
1538/**
1539 * Fetches the next opcode word, returns automatically on failure.
1540 *
1541 * @param a_pu16 Where to return the opcode word.
1542 * @remark Implicitly references pIemCpu.
1543 */
1544#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1545 do \
1546 { \
1547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1548 if (rcStrict2 != VINF_SUCCESS) \
1549 return rcStrict2; \
1550 } while (0)
1551
1552
1553/**
1554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1555 *
1556 * @returns Strict VBox status code.
1557 * @param pIemCpu The IEM state.
1558 * @param pu32 Where to return the opcode double word.
1559 */
1560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1561{
1562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1563 if (rcStrict == VINF_SUCCESS)
1564 {
1565 uint8_t offOpcode = pIemCpu->offOpcode;
1566 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1567 pIemCpu->offOpcode = offOpcode + 2;
1568 }
1569 else
1570 *pu32 = 0;
1571 return rcStrict;
1572}
1573
1574
1575/**
1576 * Fetches the next opcode word, zero extending it to a double word.
1577 *
1578 * @returns Strict VBox status code.
1579 * @param pIemCpu The IEM state.
1580 * @param pu32 Where to return the opcode double word.
1581 */
1582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1583{
1584 uint8_t const offOpcode = pIemCpu->offOpcode;
1585 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1586 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1587
1588 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1589 pIemCpu->offOpcode = offOpcode + 2;
1590 return VINF_SUCCESS;
1591}
1592
1593
1594/**
1595 * Fetches the next opcode word and zero extends it to a double word, returns
1596 * automatically on failure.
1597 *
1598 * @param a_pu32 Where to return the opcode double word.
1599 * @remark Implicitly references pIemCpu.
1600 */
1601#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1602 do \
1603 { \
1604 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1605 if (rcStrict2 != VINF_SUCCESS) \
1606 return rcStrict2; \
1607 } while (0)
1608
1609
1610/**
1611 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1612 *
1613 * @returns Strict VBox status code.
1614 * @param pIemCpu The IEM state.
1615 * @param pu64 Where to return the opcode quad word.
1616 */
1617DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1618{
1619 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1620 if (rcStrict == VINF_SUCCESS)
1621 {
1622 uint8_t offOpcode = pIemCpu->offOpcode;
1623 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1624 pIemCpu->offOpcode = offOpcode + 2;
1625 }
1626 else
1627 *pu64 = 0;
1628 return rcStrict;
1629}
1630
1631
1632/**
1633 * Fetches the next opcode word, zero extending it to a quad word.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pIemCpu The IEM state.
1637 * @param pu64 Where to return the opcode quad word.
1638 */
1639DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1640{
1641 uint8_t const offOpcode = pIemCpu->offOpcode;
1642 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1643 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1644
1645 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1646 pIemCpu->offOpcode = offOpcode + 2;
1647 return VINF_SUCCESS;
1648}
1649
1650
1651/**
1652 * Fetches the next opcode word and zero extends it to a quad word, returns
1653 * automatically on failure.
1654 *
1655 * @param a_pu64 Where to return the opcode quad word.
1656 * @remark Implicitly references pIemCpu.
1657 */
1658#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1659 do \
1660 { \
1661 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1662 if (rcStrict2 != VINF_SUCCESS) \
1663 return rcStrict2; \
1664 } while (0)
1665
1666
1667/**
1668 * Fetches the next signed word from the opcode stream.
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pIemCpu The IEM state.
1672 * @param pi16 Where to return the signed word.
1673 */
1674DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1675{
1676 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1677}
1678
1679
1680/**
1681 * Fetches the next signed word from the opcode stream, returning automatically
1682 * on failure.
1683 *
1684 * @param a_pi16 Where to return the signed word.
1685 * @remark Implicitly references pIemCpu.
1686 */
1687#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1688 do \
1689 { \
1690 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1691 if (rcStrict2 != VINF_SUCCESS) \
1692 return rcStrict2; \
1693 } while (0)
1694
1695
1696/**
1697 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1698 *
1699 * @returns Strict VBox status code.
1700 * @param pIemCpu The IEM state.
1701 * @param pu32 Where to return the opcode dword.
1702 */
1703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1704{
1705 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1706 if (rcStrict == VINF_SUCCESS)
1707 {
1708 uint8_t offOpcode = pIemCpu->offOpcode;
1709 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1710 pIemCpu->abOpcode[offOpcode + 1],
1711 pIemCpu->abOpcode[offOpcode + 2],
1712 pIemCpu->abOpcode[offOpcode + 3]);
1713 pIemCpu->offOpcode = offOpcode + 4;
1714 }
1715 else
1716 *pu32 = 0;
1717 return rcStrict;
1718}
1719
1720
1721/**
1722 * Fetches the next opcode dword.
1723 *
1724 * @returns Strict VBox status code.
1725 * @param pIemCpu The IEM state.
1726 * @param pu32 Where to return the opcode double word.
1727 */
1728DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1729{
1730 uint8_t const offOpcode = pIemCpu->offOpcode;
1731 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1732 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1733
1734 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1735 pIemCpu->abOpcode[offOpcode + 1],
1736 pIemCpu->abOpcode[offOpcode + 2],
1737 pIemCpu->abOpcode[offOpcode + 3]);
1738 pIemCpu->offOpcode = offOpcode + 4;
1739 return VINF_SUCCESS;
1740}
1741
1742
1743/**
1744 * Fetches the next opcode dword, returns automatically on failure.
1745 *
1746 * @param a_pu32 Where to return the opcode dword.
1747 * @remark Implicitly references pIemCpu.
1748 */
1749#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1750 do \
1751 { \
1752 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1753 if (rcStrict2 != VINF_SUCCESS) \
1754 return rcStrict2; \
1755 } while (0)
1756
1757
1758/**
1759 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1760 *
1761 * @returns Strict VBox status code.
1762 * @param pIemCpu The IEM state.
1763 * @param pu64 Where to return the opcode dword.
1764 */
1765DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1766{
1767 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1768 if (rcStrict == VINF_SUCCESS)
1769 {
1770 uint8_t offOpcode = pIemCpu->offOpcode;
1771 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1772 pIemCpu->abOpcode[offOpcode + 1],
1773 pIemCpu->abOpcode[offOpcode + 2],
1774 pIemCpu->abOpcode[offOpcode + 3]);
1775 pIemCpu->offOpcode = offOpcode + 4;
1776 }
1777 else
1778 *pu64 = 0;
1779 return rcStrict;
1780}
1781
1782
1783/**
1784 * Fetches the next opcode dword, zero extending it to a quad word.
1785 *
1786 * @returns Strict VBox status code.
1787 * @param pIemCpu The IEM state.
1788 * @param pu64 Where to return the opcode quad word.
1789 */
1790DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1791{
1792 uint8_t const offOpcode = pIemCpu->offOpcode;
1793 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1794 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1795
1796 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1797 pIemCpu->abOpcode[offOpcode + 1],
1798 pIemCpu->abOpcode[offOpcode + 2],
1799 pIemCpu->abOpcode[offOpcode + 3]);
1800 pIemCpu->offOpcode = offOpcode + 4;
1801 return VINF_SUCCESS;
1802}
1803
1804
1805/**
1806 * Fetches the next opcode dword and zero extends it to a quad word, returns
1807 * automatically on failure.
1808 *
1809 * @param a_pu64 Where to return the opcode quad word.
1810 * @remark Implicitly references pIemCpu.
1811 */
1812#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1813 do \
1814 { \
1815 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1816 if (rcStrict2 != VINF_SUCCESS) \
1817 return rcStrict2; \
1818 } while (0)
1819
1820
1821/**
1822 * Fetches the next signed double word from the opcode stream.
1823 *
1824 * @returns Strict VBox status code.
1825 * @param pIemCpu The IEM state.
1826 * @param pi32 Where to return the signed double word.
1827 */
1828DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1829{
1830 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1831}
1832
1833/**
1834 * Fetches the next signed double word from the opcode stream, returning
1835 * automatically on failure.
1836 *
1837 * @param a_pi32 Where to return the signed double word.
1838 * @remark Implicitly references pIemCpu.
1839 */
1840#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1841 do \
1842 { \
1843 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1844 if (rcStrict2 != VINF_SUCCESS) \
1845 return rcStrict2; \
1846 } while (0)
1847
1848
1849/**
1850 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1851 *
1852 * @returns Strict VBox status code.
1853 * @param pIemCpu The IEM state.
1854 * @param pu64 Where to return the opcode qword.
1855 */
1856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1857{
1858 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1859 if (rcStrict == VINF_SUCCESS)
1860 {
1861 uint8_t offOpcode = pIemCpu->offOpcode;
1862 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1863 pIemCpu->abOpcode[offOpcode + 1],
1864 pIemCpu->abOpcode[offOpcode + 2],
1865 pIemCpu->abOpcode[offOpcode + 3]);
1866 pIemCpu->offOpcode = offOpcode + 4;
1867 }
1868 else
1869 *pu64 = 0;
1870 return rcStrict;
1871}
1872
1873
1874/**
1875 * Fetches the next opcode dword, sign extending it into a quad word.
1876 *
1877 * @returns Strict VBox status code.
1878 * @param pIemCpu The IEM state.
1879 * @param pu64 Where to return the opcode quad word.
1880 */
1881DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1882{
1883 uint8_t const offOpcode = pIemCpu->offOpcode;
1884 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1885 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1886
1887 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1888 pIemCpu->abOpcode[offOpcode + 1],
1889 pIemCpu->abOpcode[offOpcode + 2],
1890 pIemCpu->abOpcode[offOpcode + 3]);
1891 *pu64 = i32;
1892 pIemCpu->offOpcode = offOpcode + 4;
1893 return VINF_SUCCESS;
1894}
1895
1896
1897/**
1898 * Fetches the next opcode double word and sign extends it to a quad word,
1899 * returns automatically on failure.
1900 *
1901 * @param a_pu64 Where to return the opcode quad word.
1902 * @remark Implicitly references pIemCpu.
1903 */
1904#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1905 do \
1906 { \
1907 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1908 if (rcStrict2 != VINF_SUCCESS) \
1909 return rcStrict2; \
1910 } while (0)
1911
1912
1913/**
1914 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1915 *
1916 * @returns Strict VBox status code.
1917 * @param pIemCpu The IEM state.
1918 * @param pu64 Where to return the opcode qword.
1919 */
1920DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1921{
1922 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1923 if (rcStrict == VINF_SUCCESS)
1924 {
1925 uint8_t offOpcode = pIemCpu->offOpcode;
1926 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1927 pIemCpu->abOpcode[offOpcode + 1],
1928 pIemCpu->abOpcode[offOpcode + 2],
1929 pIemCpu->abOpcode[offOpcode + 3],
1930 pIemCpu->abOpcode[offOpcode + 4],
1931 pIemCpu->abOpcode[offOpcode + 5],
1932 pIemCpu->abOpcode[offOpcode + 6],
1933 pIemCpu->abOpcode[offOpcode + 7]);
1934 pIemCpu->offOpcode = offOpcode + 8;
1935 }
1936 else
1937 *pu64 = 0;
1938 return rcStrict;
1939}
1940
1941
1942/**
1943 * Fetches the next opcode qword.
1944 *
1945 * @returns Strict VBox status code.
1946 * @param pIemCpu The IEM state.
1947 * @param pu64 Where to return the opcode qword.
1948 */
1949DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1950{
1951 uint8_t const offOpcode = pIemCpu->offOpcode;
1952 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1953 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1954
1955 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1956 pIemCpu->abOpcode[offOpcode + 1],
1957 pIemCpu->abOpcode[offOpcode + 2],
1958 pIemCpu->abOpcode[offOpcode + 3],
1959 pIemCpu->abOpcode[offOpcode + 4],
1960 pIemCpu->abOpcode[offOpcode + 5],
1961 pIemCpu->abOpcode[offOpcode + 6],
1962 pIemCpu->abOpcode[offOpcode + 7]);
1963 pIemCpu->offOpcode = offOpcode + 8;
1964 return VINF_SUCCESS;
1965}
1966
1967
1968/**
1969 * Fetches the next opcode quad word, returns automatically on failure.
1970 *
1971 * @param a_pu64 Where to return the opcode quad word.
1972 * @remark Implicitly references pIemCpu.
1973 */
1974#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1975 do \
1976 { \
1977 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1978 if (rcStrict2 != VINF_SUCCESS) \
1979 return rcStrict2; \
1980 } while (0)
1981
1982
1983/** @name Misc Worker Functions.
1984 * @{
1985 */
1986
1987
1988/**
1989 * Validates a new SS segment.
1990 *
1991 * @returns VBox strict status code.
1992 * @param pIemCpu The IEM per CPU instance data.
1993 * @param pCtx The CPU context.
1994 * @param NewSS The new SS selctor.
1995 * @param uCpl The CPL to load the stack for.
1996 * @param pDesc Where to return the descriptor.
1997 */
1998IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1999{
2000 NOREF(pCtx);
2001
2002 /* Null selectors are not allowed (we're not called for dispatching
2003 interrupts with SS=0 in long mode). */
2004 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2005 {
2006 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2007 return iemRaiseTaskSwitchFault0(pIemCpu);
2008 }
2009
2010 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2011 if ((NewSS & X86_SEL_RPL) != uCpl)
2012 {
2013 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2014 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2015 }
2016
2017 /*
2018 * Read the descriptor.
2019 */
2020 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
2021 if (rcStrict != VINF_SUCCESS)
2022 return rcStrict;
2023
2024 /*
2025 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2026 */
2027 if (!pDesc->Legacy.Gen.u1DescType)
2028 {
2029 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2030 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2031 }
2032
2033 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2034 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2035 {
2036 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2037 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2038 }
2039 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2040 {
2041 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2042 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2043 }
2044
2045 /* Is it there? */
2046 /** @todo testcase: Is this checked before the canonical / limit check below? */
2047 if (!pDesc->Legacy.Gen.u1Present)
2048 {
2049 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2050 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2051 }
2052
2053 return VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2059 * not.
2060 *
2061 * @param a_pIemCpu The IEM per CPU data.
2062 * @param a_pCtx The CPU context.
2063 */
2064#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2065# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2066 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2067 ? (a_pCtx)->eflags.u \
2068 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2069#else
2070# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2071 ( (a_pCtx)->eflags.u )
2072#endif
2073
2074/**
2075 * Updates the EFLAGS in the correct manner wrt. PATM.
2076 *
2077 * @param a_pIemCpu The IEM per CPU data.
2078 * @param a_pCtx The CPU context.
2079 * @param a_fEfl The new EFLAGS.
2080 */
2081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2082# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2083 do { \
2084 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2085 (a_pCtx)->eflags.u = (a_fEfl); \
2086 else \
2087 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2088 } while (0)
2089#else
2090# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2091 do { \
2092 (a_pCtx)->eflags.u = (a_fEfl); \
2093 } while (0)
2094#endif
2095
2096
2097/** @} */
2098
2099/** @name Raising Exceptions.
2100 *
2101 * @{
2102 */
2103
2104/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2105 * @{ */
2106/** CPU exception. */
2107#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2108/** External interrupt (from PIC, APIC, whatever). */
2109#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2110/** Software interrupt (int or into, not bound).
2111 * Returns to the following instruction */
2112#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2113/** Takes an error code. */
2114#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2115/** Takes a CR2. */
2116#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2117/** Generated by the breakpoint instruction. */
2118#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2119/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2120#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2121/** @} */
2122
2123
2124/**
2125 * Loads the specified stack far pointer from the TSS.
2126 *
2127 * @returns VBox strict status code.
2128 * @param pIemCpu The IEM per CPU instance data.
2129 * @param pCtx The CPU context.
2130 * @param uCpl The CPL to load the stack for.
2131 * @param pSelSS Where to return the new stack segment.
2132 * @param puEsp Where to return the new stack pointer.
2133 */
2134IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2135 PRTSEL pSelSS, uint32_t *puEsp)
2136{
2137 VBOXSTRICTRC rcStrict;
2138 Assert(uCpl < 4);
2139
2140 switch (pCtx->tr.Attr.n.u4Type)
2141 {
2142 /*
2143 * 16-bit TSS (X86TSS16).
2144 */
2145 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2146 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2147 {
2148 uint32_t off = uCpl * 4 + 2;
2149 if (off + 4 <= pCtx->tr.u32Limit)
2150 {
2151 /** @todo check actual access pattern here. */
2152 uint32_t u32Tmp = 0; /* gcc maybe... */
2153 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2154 if (rcStrict == VINF_SUCCESS)
2155 {
2156 *puEsp = RT_LOWORD(u32Tmp);
2157 *pSelSS = RT_HIWORD(u32Tmp);
2158 return VINF_SUCCESS;
2159 }
2160 }
2161 else
2162 {
2163 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2164 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2165 }
2166 break;
2167 }
2168
2169 /*
2170 * 32-bit TSS (X86TSS32).
2171 */
2172 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2174 {
2175 uint32_t off = uCpl * 8 + 4;
2176 if (off + 7 <= pCtx->tr.u32Limit)
2177 {
2178/** @todo check actual access pattern here. */
2179 uint64_t u64Tmp;
2180 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2181 if (rcStrict == VINF_SUCCESS)
2182 {
2183 *puEsp = u64Tmp & UINT32_MAX;
2184 *pSelSS = (RTSEL)(u64Tmp >> 32);
2185 return VINF_SUCCESS;
2186 }
2187 }
2188 else
2189 {
2190 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2191 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2192 }
2193 break;
2194 }
2195
2196 default:
2197 AssertFailed();
2198 rcStrict = VERR_IEM_IPE_4;
2199 break;
2200 }
2201
2202 *puEsp = 0; /* make gcc happy */
2203 *pSelSS = 0; /* make gcc happy */
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Loads the specified stack pointer from the 64-bit TSS.
2210 *
2211 * @returns VBox strict status code.
2212 * @param pIemCpu The IEM per CPU instance data.
2213 * @param pCtx The CPU context.
2214 * @param uCpl The CPL to load the stack for.
2215 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2216 * @param puRsp Where to return the new stack pointer.
2217 */
2218IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2219{
2220 Assert(uCpl < 4);
2221 Assert(uIst < 8);
2222 *puRsp = 0; /* make gcc happy */
2223
2224 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2225
2226 uint32_t off;
2227 if (uIst)
2228 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2229 else
2230 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2231 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2232 {
2233 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2234 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2235 }
2236
2237 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2238}
2239
2240
2241/**
2242 * Adjust the CPU state according to the exception being raised.
2243 *
2244 * @param pCtx The CPU context.
2245 * @param u8Vector The exception that has been raised.
2246 */
2247DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2248{
2249 switch (u8Vector)
2250 {
2251 case X86_XCPT_DB:
2252 pCtx->dr[7] &= ~X86_DR7_GD;
2253 break;
2254 /** @todo Read the AMD and Intel exception reference... */
2255 }
2256}
2257
2258
2259/**
2260 * Implements exceptions and interrupts for real mode.
2261 *
2262 * @returns VBox strict status code.
2263 * @param pIemCpu The IEM per CPU instance data.
2264 * @param pCtx The CPU context.
2265 * @param cbInstr The number of bytes to offset rIP by in the return
2266 * address.
2267 * @param u8Vector The interrupt / exception vector number.
2268 * @param fFlags The flags.
2269 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2270 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2271 */
2272IEM_STATIC VBOXSTRICTRC
2273iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2274 PCPUMCTX pCtx,
2275 uint8_t cbInstr,
2276 uint8_t u8Vector,
2277 uint32_t fFlags,
2278 uint16_t uErr,
2279 uint64_t uCr2)
2280{
2281 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2282 NOREF(uErr); NOREF(uCr2);
2283
2284 /*
2285 * Read the IDT entry.
2286 */
2287 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2288 {
2289 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2290 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2291 }
2292 RTFAR16 Idte;
2293 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2294 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2295 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2296 return rcStrict;
2297
2298 /*
2299 * Push the stack frame.
2300 */
2301 uint16_t *pu16Frame;
2302 uint64_t uNewRsp;
2303 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2304 if (rcStrict != VINF_SUCCESS)
2305 return rcStrict;
2306
2307 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2308#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2309 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2310 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
2311 fEfl |= UINT16_C(0xf000);
2312#endif
2313 pu16Frame[2] = (uint16_t)fEfl;
2314 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2315 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2316 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2317 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2318 return rcStrict;
2319
2320 /*
2321 * Load the vector address into cs:ip and make exception specific state
2322 * adjustments.
2323 */
2324 pCtx->cs.Sel = Idte.sel;
2325 pCtx->cs.ValidSel = Idte.sel;
2326 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2327 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2328 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2329 pCtx->rip = Idte.off;
2330 fEfl &= ~X86_EFL_IF;
2331 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2332
2333 /** @todo do we actually do this in real mode? */
2334 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2335 iemRaiseXcptAdjustState(pCtx, u8Vector);
2336
2337 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2338}
2339
2340
2341/**
2342 * Loads a NULL data selector into when coming from V8086 mode.
2343 *
2344 * @param pIemCpu The IEM per CPU instance data.
2345 * @param pSReg Pointer to the segment register.
2346 */
2347IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2348{
2349 pSReg->Sel = 0;
2350 pSReg->ValidSel = 0;
2351 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2352 {
2353 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2354 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2355 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2356 }
2357 else
2358 {
2359 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2360 /** @todo check this on AMD-V */
2361 pSReg->u64Base = 0;
2362 pSReg->u32Limit = 0;
2363 }
2364}
2365
2366
2367/**
2368 * Loads a segment selector during a task switch in V8086 mode.
2369 *
2370 * @param pIemCpu The IEM per CPU instance data.
2371 * @param pSReg Pointer to the segment register.
2372 * @param uSel The selector value to load.
2373 */
2374IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2375{
2376 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2377 pSReg->Sel = uSel;
2378 pSReg->ValidSel = uSel;
2379 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2380 pSReg->u64Base = uSel << 4;
2381 pSReg->u32Limit = 0xffff;
2382 pSReg->Attr.u = 0xf3;
2383}
2384
2385
2386/**
2387 * Loads a NULL data selector into a selector register, both the hidden and
2388 * visible parts, in protected mode.
2389 *
2390 * @param pIemCpu The IEM state of the calling EMT.
2391 * @param pSReg Pointer to the segment register.
2392 * @param uRpl The RPL.
2393 */
2394IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2395{
2396 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2397 * data selector in protected mode. */
2398 pSReg->Sel = uRpl;
2399 pSReg->ValidSel = uRpl;
2400 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2401 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2402 {
2403 /* VT-x (Intel 3960x) observed doing something like this. */
2404 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2405 pSReg->u32Limit = UINT32_MAX;
2406 pSReg->u64Base = 0;
2407 }
2408 else
2409 {
2410 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2411 pSReg->u32Limit = 0;
2412 pSReg->u64Base = 0;
2413 }
2414}
2415
2416
2417/**
2418 * Loads a segment selector during a task switch in protected mode.
2419 *
2420 * In this task switch scenario, we would throw \#TS exceptions rather than
2421 * \#GPs.
2422 *
2423 * @returns VBox strict status code.
2424 * @param pIemCpu The IEM per CPU instance data.
2425 * @param pSReg Pointer to the segment register.
2426 * @param uSel The new selector value.
2427 *
2428 * @remarks This does _not_ handle CS or SS.
2429 * @remarks This expects pIemCpu->uCpl to be up to date.
2430 */
2431IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2432{
2433 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2434
2435 /* Null data selector. */
2436 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2437 {
2438 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2440 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2441 return VINF_SUCCESS;
2442 }
2443
2444 /* Fetch the descriptor. */
2445 IEMSELDESC Desc;
2446 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2447 if (rcStrict != VINF_SUCCESS)
2448 {
2449 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2450 VBOXSTRICTRC_VAL(rcStrict)));
2451 return rcStrict;
2452 }
2453
2454 /* Must be a data segment or readable code segment. */
2455 if ( !Desc.Legacy.Gen.u1DescType
2456 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2457 {
2458 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2459 Desc.Legacy.Gen.u4Type));
2460 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2461 }
2462
2463 /* Check privileges for data segments and non-conforming code segments. */
2464 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2465 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2466 {
2467 /* The RPL and the new CPL must be less than or equal to the DPL. */
2468 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2469 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2470 {
2471 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2472 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2473 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2474 }
2475 }
2476
2477 /* Is it there? */
2478 if (!Desc.Legacy.Gen.u1Present)
2479 {
2480 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2481 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2482 }
2483
2484 /* The base and limit. */
2485 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2486 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2487
2488 /*
2489 * Ok, everything checked out fine. Now set the accessed bit before
2490 * committing the result into the registers.
2491 */
2492 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2493 {
2494 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2495 if (rcStrict != VINF_SUCCESS)
2496 return rcStrict;
2497 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2498 }
2499
2500 /* Commit */
2501 pSReg->Sel = uSel;
2502 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2503 pSReg->u32Limit = cbLimit;
2504 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2505 pSReg->ValidSel = uSel;
2506 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2507 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2508 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2509
2510 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2511 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2512 return VINF_SUCCESS;
2513}
2514
2515
2516/**
2517 * Performs a task switch.
2518 *
2519 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2520 * caller is responsible for performing the necessary checks (like DPL, TSS
2521 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2522 * reference for JMP, CALL, IRET.
2523 *
2524 * If the task switch is the due to a software interrupt or hardware exception,
2525 * the caller is responsible for validating the TSS selector and descriptor. See
2526 * Intel Instruction reference for INT n.
2527 *
2528 * @returns VBox strict status code.
2529 * @param pIemCpu The IEM per CPU instance data.
2530 * @param pCtx The CPU context.
2531 * @param enmTaskSwitch What caused this task switch.
2532 * @param uNextEip The EIP effective after the task switch.
2533 * @param fFlags The flags.
2534 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2535 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2536 * @param SelTSS The TSS selector of the new task.
2537 * @param pNewDescTSS Pointer to the new TSS descriptor.
2538 */
2539IEM_STATIC VBOXSTRICTRC
2540iemTaskSwitch(PIEMCPU pIemCpu,
2541 PCPUMCTX pCtx,
2542 IEMTASKSWITCH enmTaskSwitch,
2543 uint32_t uNextEip,
2544 uint32_t fFlags,
2545 uint16_t uErr,
2546 uint64_t uCr2,
2547 RTSEL SelTSS,
2548 PIEMSELDESC pNewDescTSS)
2549{
2550 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2551 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2552
2553 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2554 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2555 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2556 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2557 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2558
2559 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2560 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2561
2562 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2563 fIsNewTSS386, pCtx->eip, uNextEip));
2564
2565 /* Update CR2 in case it's a page-fault. */
2566 /** @todo This should probably be done much earlier in IEM/PGM. See
2567 * @bugref{5653#c49}. */
2568 if (fFlags & IEM_XCPT_FLAGS_CR2)
2569 pCtx->cr2 = uCr2;
2570
2571 /*
2572 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2573 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2574 */
2575 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2576 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2577 if (uNewTSSLimit < uNewTSSLimitMin)
2578 {
2579 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2580 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2581 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2582 }
2583
2584 /*
2585 * Check the current TSS limit. The last written byte to the current TSS during the
2586 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2587 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2588 *
2589 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2590 * end up with smaller than "legal" TSS limits.
2591 */
2592 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2593 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2594 if (uCurTSSLimit < uCurTSSLimitMin)
2595 {
2596 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2597 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2598 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2599 }
2600
2601 /*
2602 * Verify that the new TSS can be accessed and map it. Map only the required contents
2603 * and not the entire TSS.
2604 */
2605 void *pvNewTSS;
2606 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2607 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2608 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2609 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2610 * not perform correct translation if this happens. See Intel spec. 7.2.1
2611 * "Task-State Segment" */
2612 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2613 if (rcStrict != VINF_SUCCESS)
2614 {
2615 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2616 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2617 return rcStrict;
2618 }
2619
2620 /*
2621 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2622 */
2623 uint32_t u32EFlags = pCtx->eflags.u32;
2624 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2625 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2626 {
2627 PX86DESC pDescCurTSS;
2628 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2629 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2630 if (rcStrict != VINF_SUCCESS)
2631 {
2632 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2633 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2634 return rcStrict;
2635 }
2636
2637 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2638 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2639 if (rcStrict != VINF_SUCCESS)
2640 {
2641 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2642 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2643 return rcStrict;
2644 }
2645
2646 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2647 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2648 {
2649 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2650 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2651 u32EFlags &= ~X86_EFL_NT;
2652 }
2653 }
2654
2655 /*
2656 * Save the CPU state into the current TSS.
2657 */
2658 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2659 if (GCPtrNewTSS == GCPtrCurTSS)
2660 {
2661 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2662 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2663 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2664 }
2665 if (fIsNewTSS386)
2666 {
2667 /*
2668 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2669 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2670 */
2671 void *pvCurTSS32;
2672 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2673 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2674 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2675 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2676 if (rcStrict != VINF_SUCCESS)
2677 {
2678 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2679 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2680 return rcStrict;
2681 }
2682
2683 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2684 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2685 pCurTSS32->eip = uNextEip;
2686 pCurTSS32->eflags = u32EFlags;
2687 pCurTSS32->eax = pCtx->eax;
2688 pCurTSS32->ecx = pCtx->ecx;
2689 pCurTSS32->edx = pCtx->edx;
2690 pCurTSS32->ebx = pCtx->ebx;
2691 pCurTSS32->esp = pCtx->esp;
2692 pCurTSS32->ebp = pCtx->ebp;
2693 pCurTSS32->esi = pCtx->esi;
2694 pCurTSS32->edi = pCtx->edi;
2695 pCurTSS32->es = pCtx->es.Sel;
2696 pCurTSS32->cs = pCtx->cs.Sel;
2697 pCurTSS32->ss = pCtx->ss.Sel;
2698 pCurTSS32->ds = pCtx->ds.Sel;
2699 pCurTSS32->fs = pCtx->fs.Sel;
2700 pCurTSS32->gs = pCtx->gs.Sel;
2701
2702 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2703 if (rcStrict != VINF_SUCCESS)
2704 {
2705 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2706 VBOXSTRICTRC_VAL(rcStrict)));
2707 return rcStrict;
2708 }
2709 }
2710 else
2711 {
2712 /*
2713 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2714 */
2715 void *pvCurTSS16;
2716 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2717 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2718 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2719 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2720 if (rcStrict != VINF_SUCCESS)
2721 {
2722 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2723 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2724 return rcStrict;
2725 }
2726
2727 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2728 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2729 pCurTSS16->ip = uNextEip;
2730 pCurTSS16->flags = u32EFlags;
2731 pCurTSS16->ax = pCtx->ax;
2732 pCurTSS16->cx = pCtx->cx;
2733 pCurTSS16->dx = pCtx->dx;
2734 pCurTSS16->bx = pCtx->bx;
2735 pCurTSS16->sp = pCtx->sp;
2736 pCurTSS16->bp = pCtx->bp;
2737 pCurTSS16->si = pCtx->si;
2738 pCurTSS16->di = pCtx->di;
2739 pCurTSS16->es = pCtx->es.Sel;
2740 pCurTSS16->cs = pCtx->cs.Sel;
2741 pCurTSS16->ss = pCtx->ss.Sel;
2742 pCurTSS16->ds = pCtx->ds.Sel;
2743
2744 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2745 if (rcStrict != VINF_SUCCESS)
2746 {
2747 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2748 VBOXSTRICTRC_VAL(rcStrict)));
2749 return rcStrict;
2750 }
2751 }
2752
2753 /*
2754 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2755 */
2756 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2757 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2758 {
2759 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2760 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2761 pNewTSS->selPrev = pCtx->tr.Sel;
2762 }
2763
2764 /*
2765 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2766 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2767 */
2768 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2769 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2770 bool fNewDebugTrap;
2771 if (fIsNewTSS386)
2772 {
2773 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2774 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2775 uNewEip = pNewTSS32->eip;
2776 uNewEflags = pNewTSS32->eflags;
2777 uNewEax = pNewTSS32->eax;
2778 uNewEcx = pNewTSS32->ecx;
2779 uNewEdx = pNewTSS32->edx;
2780 uNewEbx = pNewTSS32->ebx;
2781 uNewEsp = pNewTSS32->esp;
2782 uNewEbp = pNewTSS32->ebp;
2783 uNewEsi = pNewTSS32->esi;
2784 uNewEdi = pNewTSS32->edi;
2785 uNewES = pNewTSS32->es;
2786 uNewCS = pNewTSS32->cs;
2787 uNewSS = pNewTSS32->ss;
2788 uNewDS = pNewTSS32->ds;
2789 uNewFS = pNewTSS32->fs;
2790 uNewGS = pNewTSS32->gs;
2791 uNewLdt = pNewTSS32->selLdt;
2792 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2793 }
2794 else
2795 {
2796 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2797 uNewCr3 = 0;
2798 uNewEip = pNewTSS16->ip;
2799 uNewEflags = pNewTSS16->flags;
2800 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2801 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2802 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2803 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2804 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2805 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2806 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2807 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2808 uNewES = pNewTSS16->es;
2809 uNewCS = pNewTSS16->cs;
2810 uNewSS = pNewTSS16->ss;
2811 uNewDS = pNewTSS16->ds;
2812 uNewFS = 0;
2813 uNewGS = 0;
2814 uNewLdt = pNewTSS16->selLdt;
2815 fNewDebugTrap = false;
2816 }
2817
2818 if (GCPtrNewTSS == GCPtrCurTSS)
2819 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2820 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2821
2822 /*
2823 * We're done accessing the new TSS.
2824 */
2825 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2826 if (rcStrict != VINF_SUCCESS)
2827 {
2828 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2829 return rcStrict;
2830 }
2831
2832 /*
2833 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2834 */
2835 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2836 {
2837 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2838 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2839 if (rcStrict != VINF_SUCCESS)
2840 {
2841 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2842 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2843 return rcStrict;
2844 }
2845
2846 /* Check that the descriptor indicates the new TSS is available (not busy). */
2847 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2848 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2849 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2850
2851 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2852 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2853 if (rcStrict != VINF_SUCCESS)
2854 {
2855 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2856 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2857 return rcStrict;
2858 }
2859 }
2860
2861 /*
2862 * From this point on, we're technically in the new task. We will defer exceptions
2863 * until the completion of the task switch but before executing any instructions in the new task.
2864 */
2865 pCtx->tr.Sel = SelTSS;
2866 pCtx->tr.ValidSel = SelTSS;
2867 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2868 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2869 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2870 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2871 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2872
2873 /* Set the busy bit in TR. */
2874 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2875 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2876 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2877 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2878 {
2879 uNewEflags |= X86_EFL_NT;
2880 }
2881
2882 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2883 pCtx->cr0 |= X86_CR0_TS;
2884 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2885
2886 pCtx->eip = uNewEip;
2887 pCtx->eax = uNewEax;
2888 pCtx->ecx = uNewEcx;
2889 pCtx->edx = uNewEdx;
2890 pCtx->ebx = uNewEbx;
2891 pCtx->esp = uNewEsp;
2892 pCtx->ebp = uNewEbp;
2893 pCtx->esi = uNewEsi;
2894 pCtx->edi = uNewEdi;
2895
2896 uNewEflags &= X86_EFL_LIVE_MASK;
2897 uNewEflags |= X86_EFL_RA1_MASK;
2898 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2899
2900 /*
2901 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2902 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2903 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2904 */
2905 pCtx->es.Sel = uNewES;
2906 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2907 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2908
2909 pCtx->cs.Sel = uNewCS;
2910 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2911 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2912
2913 pCtx->ss.Sel = uNewSS;
2914 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2915 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2916
2917 pCtx->ds.Sel = uNewDS;
2918 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2919 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2920
2921 pCtx->fs.Sel = uNewFS;
2922 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2923 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2924
2925 pCtx->gs.Sel = uNewGS;
2926 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2927 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2928 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2929
2930 pCtx->ldtr.Sel = uNewLdt;
2931 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2932 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2933 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2934
2935 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2936 {
2937 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2938 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2939 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2940 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2941 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2942 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2943 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2944 }
2945
2946 /*
2947 * Switch CR3 for the new task.
2948 */
2949 if ( fIsNewTSS386
2950 && (pCtx->cr0 & X86_CR0_PG))
2951 {
2952 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2953 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2954 {
2955 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2956 AssertRCSuccessReturn(rc, rc);
2957 }
2958 else
2959 pCtx->cr3 = uNewCr3;
2960
2961 /* Inform PGM. */
2962 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2963 {
2964 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2965 AssertRCReturn(rc, rc);
2966 /* ignore informational status codes */
2967 }
2968 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2969 }
2970
2971 /*
2972 * Switch LDTR for the new task.
2973 */
2974 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2975 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2976 else
2977 {
2978 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2979
2980 IEMSELDESC DescNewLdt;
2981 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2982 if (rcStrict != VINF_SUCCESS)
2983 {
2984 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2985 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2986 return rcStrict;
2987 }
2988 if ( !DescNewLdt.Legacy.Gen.u1Present
2989 || DescNewLdt.Legacy.Gen.u1DescType
2990 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2991 {
2992 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2993 uNewLdt, DescNewLdt.Legacy.u));
2994 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2995 }
2996
2997 pCtx->ldtr.ValidSel = uNewLdt;
2998 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2999 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3000 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3001 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3002 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3003 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3004 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
3005 }
3006
3007 IEMSELDESC DescSS;
3008 if (IEM_IS_V86_MODE(pIemCpu))
3009 {
3010 pIemCpu->uCpl = 3;
3011 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
3012 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
3013 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
3014 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
3015 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
3016 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
3017 }
3018 else
3019 {
3020 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3021
3022 /*
3023 * Load the stack segment for the new task.
3024 */
3025 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3026 {
3027 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3028 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3029 }
3030
3031 /* Fetch the descriptor. */
3032 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
3033 if (rcStrict != VINF_SUCCESS)
3034 {
3035 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3036 VBOXSTRICTRC_VAL(rcStrict)));
3037 return rcStrict;
3038 }
3039
3040 /* SS must be a data segment and writable. */
3041 if ( !DescSS.Legacy.Gen.u1DescType
3042 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3043 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3044 {
3045 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3046 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3047 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3048 }
3049
3050 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3051 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3052 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3053 {
3054 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3055 uNewCpl));
3056 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3057 }
3058
3059 /* Is it there? */
3060 if (!DescSS.Legacy.Gen.u1Present)
3061 {
3062 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3063 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3064 }
3065
3066 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3067 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3068
3069 /* Set the accessed bit before committing the result into SS. */
3070 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3071 {
3072 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3073 if (rcStrict != VINF_SUCCESS)
3074 return rcStrict;
3075 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3076 }
3077
3078 /* Commit SS. */
3079 pCtx->ss.Sel = uNewSS;
3080 pCtx->ss.ValidSel = uNewSS;
3081 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3082 pCtx->ss.u32Limit = cbLimit;
3083 pCtx->ss.u64Base = u64Base;
3084 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3086
3087 /* CPL has changed, update IEM before loading rest of segments. */
3088 pIemCpu->uCpl = uNewCpl;
3089
3090 /*
3091 * Load the data segments for the new task.
3092 */
3093 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3094 if (rcStrict != VINF_SUCCESS)
3095 return rcStrict;
3096 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3097 if (rcStrict != VINF_SUCCESS)
3098 return rcStrict;
3099 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3100 if (rcStrict != VINF_SUCCESS)
3101 return rcStrict;
3102 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3103 if (rcStrict != VINF_SUCCESS)
3104 return rcStrict;
3105
3106 /*
3107 * Load the code segment for the new task.
3108 */
3109 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3110 {
3111 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3112 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3113 }
3114
3115 /* Fetch the descriptor. */
3116 IEMSELDESC DescCS;
3117 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3118 if (rcStrict != VINF_SUCCESS)
3119 {
3120 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3121 return rcStrict;
3122 }
3123
3124 /* CS must be a code segment. */
3125 if ( !DescCS.Legacy.Gen.u1DescType
3126 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3127 {
3128 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3129 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3130 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3131 }
3132
3133 /* For conforming CS, DPL must be less than or equal to the RPL. */
3134 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3135 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3136 {
3137 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3138 DescCS.Legacy.Gen.u2Dpl));
3139 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3140 }
3141
3142 /* For non-conforming CS, DPL must match RPL. */
3143 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3144 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3145 {
3146 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3147 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3148 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3149 }
3150
3151 /* Is it there? */
3152 if (!DescCS.Legacy.Gen.u1Present)
3153 {
3154 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3155 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3156 }
3157
3158 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3159 u64Base = X86DESC_BASE(&DescCS.Legacy);
3160
3161 /* Set the accessed bit before committing the result into CS. */
3162 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3163 {
3164 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3165 if (rcStrict != VINF_SUCCESS)
3166 return rcStrict;
3167 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3168 }
3169
3170 /* Commit CS. */
3171 pCtx->cs.Sel = uNewCS;
3172 pCtx->cs.ValidSel = uNewCS;
3173 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3174 pCtx->cs.u32Limit = cbLimit;
3175 pCtx->cs.u64Base = u64Base;
3176 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3178 }
3179
3180 /** @todo Debug trap. */
3181 if (fIsNewTSS386 && fNewDebugTrap)
3182 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3183
3184 /*
3185 * Construct the error code masks based on what caused this task switch.
3186 * See Intel Instruction reference for INT.
3187 */
3188 uint16_t uExt;
3189 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3190 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3191 {
3192 uExt = 1;
3193 }
3194 else
3195 uExt = 0;
3196
3197 /*
3198 * Push any error code on to the new stack.
3199 */
3200 if (fFlags & IEM_XCPT_FLAGS_ERR)
3201 {
3202 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3203 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3204 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3205
3206 /* Check that there is sufficient space on the stack. */
3207 /** @todo Factor out segment limit checking for normal/expand down segments
3208 * into a separate function. */
3209 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3210 {
3211 if ( pCtx->esp - 1 > cbLimitSS
3212 || pCtx->esp < cbStackFrame)
3213 {
3214 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3215 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3216 cbStackFrame));
3217 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3218 }
3219 }
3220 else
3221 {
3222 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3223 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3224 {
3225 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3226 cbStackFrame));
3227 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3228 }
3229 }
3230
3231
3232 if (fIsNewTSS386)
3233 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3234 else
3235 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3236 if (rcStrict != VINF_SUCCESS)
3237 {
3238 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3239 VBOXSTRICTRC_VAL(rcStrict)));
3240 return rcStrict;
3241 }
3242 }
3243
3244 /* Check the new EIP against the new CS limit. */
3245 if (pCtx->eip > pCtx->cs.u32Limit)
3246 {
3247 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3248 pCtx->eip, pCtx->cs.u32Limit));
3249 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3250 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3251 }
3252
3253 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3254 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3255}
3256
3257
3258/**
3259 * Implements exceptions and interrupts for protected mode.
3260 *
3261 * @returns VBox strict status code.
3262 * @param pIemCpu The IEM per CPU instance data.
3263 * @param pCtx The CPU context.
3264 * @param cbInstr The number of bytes to offset rIP by in the return
3265 * address.
3266 * @param u8Vector The interrupt / exception vector number.
3267 * @param fFlags The flags.
3268 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3269 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3270 */
3271IEM_STATIC VBOXSTRICTRC
3272iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3273 PCPUMCTX pCtx,
3274 uint8_t cbInstr,
3275 uint8_t u8Vector,
3276 uint32_t fFlags,
3277 uint16_t uErr,
3278 uint64_t uCr2)
3279{
3280 /*
3281 * Read the IDT entry.
3282 */
3283 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3284 {
3285 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3286 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3287 }
3288 X86DESC Idte;
3289 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3290 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3291 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3292 return rcStrict;
3293 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3294 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3295 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3296
3297 /*
3298 * Check the descriptor type, DPL and such.
3299 * ASSUMES this is done in the same order as described for call-gate calls.
3300 */
3301 if (Idte.Gate.u1DescType)
3302 {
3303 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3304 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3305 }
3306 bool fTaskGate = false;
3307 uint8_t f32BitGate = true;
3308 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3309 switch (Idte.Gate.u4Type)
3310 {
3311 case X86_SEL_TYPE_SYS_UNDEFINED:
3312 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3313 case X86_SEL_TYPE_SYS_LDT:
3314 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3315 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3316 case X86_SEL_TYPE_SYS_UNDEFINED2:
3317 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3318 case X86_SEL_TYPE_SYS_UNDEFINED3:
3319 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3320 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3321 case X86_SEL_TYPE_SYS_UNDEFINED4:
3322 {
3323 /** @todo check what actually happens when the type is wrong...
3324 * esp. call gates. */
3325 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3326 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3327 }
3328
3329 case X86_SEL_TYPE_SYS_286_INT_GATE:
3330 f32BitGate = false;
3331 case X86_SEL_TYPE_SYS_386_INT_GATE:
3332 fEflToClear |= X86_EFL_IF;
3333 break;
3334
3335 case X86_SEL_TYPE_SYS_TASK_GATE:
3336 fTaskGate = true;
3337#ifndef IEM_IMPLEMENTS_TASKSWITCH
3338 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3339#endif
3340 break;
3341
3342 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3343 f32BitGate = false;
3344 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3345 break;
3346
3347 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3348 }
3349
3350 /* Check DPL against CPL if applicable. */
3351 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3352 {
3353 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3354 {
3355 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3356 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3357 }
3358 }
3359
3360 /* Is it there? */
3361 if (!Idte.Gate.u1Present)
3362 {
3363 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3364 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3365 }
3366
3367 /* Is it a task-gate? */
3368 if (fTaskGate)
3369 {
3370 /*
3371 * Construct the error code masks based on what caused this task switch.
3372 * See Intel Instruction reference for INT.
3373 */
3374 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3375 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3376 RTSEL SelTSS = Idte.Gate.u16Sel;
3377
3378 /*
3379 * Fetch the TSS descriptor in the GDT.
3380 */
3381 IEMSELDESC DescTSS;
3382 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3383 if (rcStrict != VINF_SUCCESS)
3384 {
3385 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3386 VBOXSTRICTRC_VAL(rcStrict)));
3387 return rcStrict;
3388 }
3389
3390 /* The TSS descriptor must be a system segment and be available (not busy). */
3391 if ( DescTSS.Legacy.Gen.u1DescType
3392 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3393 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3394 {
3395 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3396 u8Vector, SelTSS, DescTSS.Legacy.au64));
3397 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3398 }
3399
3400 /* The TSS must be present. */
3401 if (!DescTSS.Legacy.Gen.u1Present)
3402 {
3403 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3404 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3405 }
3406
3407 /* Do the actual task switch. */
3408 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3409 }
3410
3411 /* A null CS is bad. */
3412 RTSEL NewCS = Idte.Gate.u16Sel;
3413 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3414 {
3415 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3416 return iemRaiseGeneralProtectionFault0(pIemCpu);
3417 }
3418
3419 /* Fetch the descriptor for the new CS. */
3420 IEMSELDESC DescCS;
3421 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3422 if (rcStrict != VINF_SUCCESS)
3423 {
3424 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3425 return rcStrict;
3426 }
3427
3428 /* Must be a code segment. */
3429 if (!DescCS.Legacy.Gen.u1DescType)
3430 {
3431 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3432 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3433 }
3434 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3435 {
3436 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3437 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3438 }
3439
3440 /* Don't allow lowering the privilege level. */
3441 /** @todo Does the lowering of privileges apply to software interrupts
3442 * only? This has bearings on the more-privileged or
3443 * same-privilege stack behavior further down. A testcase would
3444 * be nice. */
3445 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3446 {
3447 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3448 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3449 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3450 }
3451
3452 /* Make sure the selector is present. */
3453 if (!DescCS.Legacy.Gen.u1Present)
3454 {
3455 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3456 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3457 }
3458
3459 /* Check the new EIP against the new CS limit. */
3460 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3461 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3462 ? Idte.Gate.u16OffsetLow
3463 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3464 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3465 if (uNewEip > cbLimitCS)
3466 {
3467 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3468 u8Vector, uNewEip, cbLimitCS, NewCS));
3469 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3470 }
3471
3472 /* Calc the flag image to push. */
3473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3474 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3475 fEfl &= ~X86_EFL_RF;
3476 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3477 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3478
3479 /* From V8086 mode only go to CPL 0. */
3480 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3481 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3482 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3483 {
3484 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3485 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3486 }
3487
3488 /*
3489 * If the privilege level changes, we need to get a new stack from the TSS.
3490 * This in turns means validating the new SS and ESP...
3491 */
3492 if (uNewCpl != pIemCpu->uCpl)
3493 {
3494 RTSEL NewSS;
3495 uint32_t uNewEsp;
3496 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3497 if (rcStrict != VINF_SUCCESS)
3498 return rcStrict;
3499
3500 IEMSELDESC DescSS;
3501 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3502 if (rcStrict != VINF_SUCCESS)
3503 return rcStrict;
3504
3505 /* Check that there is sufficient space for the stack frame. */
3506 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3507 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3508 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3509 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3510
3511 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3512 {
3513 if ( uNewEsp - 1 > cbLimitSS
3514 || uNewEsp < cbStackFrame)
3515 {
3516 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3517 u8Vector, NewSS, uNewEsp, cbStackFrame));
3518 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3519 }
3520 }
3521 else
3522 {
3523 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3524 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3525 {
3526 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3527 u8Vector, NewSS, uNewEsp, cbStackFrame));
3528 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3529 }
3530 }
3531
3532 /*
3533 * Start making changes.
3534 */
3535
3536 /* Create the stack frame. */
3537 RTPTRUNION uStackFrame;
3538 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3539 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3540 if (rcStrict != VINF_SUCCESS)
3541 return rcStrict;
3542 void * const pvStackFrame = uStackFrame.pv;
3543 if (f32BitGate)
3544 {
3545 if (fFlags & IEM_XCPT_FLAGS_ERR)
3546 *uStackFrame.pu32++ = uErr;
3547 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3548 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3549 uStackFrame.pu32[2] = fEfl;
3550 uStackFrame.pu32[3] = pCtx->esp;
3551 uStackFrame.pu32[4] = pCtx->ss.Sel;
3552 if (fEfl & X86_EFL_VM)
3553 {
3554 uStackFrame.pu32[1] = pCtx->cs.Sel;
3555 uStackFrame.pu32[5] = pCtx->es.Sel;
3556 uStackFrame.pu32[6] = pCtx->ds.Sel;
3557 uStackFrame.pu32[7] = pCtx->fs.Sel;
3558 uStackFrame.pu32[8] = pCtx->gs.Sel;
3559 }
3560 }
3561 else
3562 {
3563 if (fFlags & IEM_XCPT_FLAGS_ERR)
3564 *uStackFrame.pu16++ = uErr;
3565 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3566 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3567 uStackFrame.pu16[2] = fEfl;
3568 uStackFrame.pu16[3] = pCtx->sp;
3569 uStackFrame.pu16[4] = pCtx->ss.Sel;
3570 if (fEfl & X86_EFL_VM)
3571 {
3572 uStackFrame.pu16[1] = pCtx->cs.Sel;
3573 uStackFrame.pu16[5] = pCtx->es.Sel;
3574 uStackFrame.pu16[6] = pCtx->ds.Sel;
3575 uStackFrame.pu16[7] = pCtx->fs.Sel;
3576 uStackFrame.pu16[8] = pCtx->gs.Sel;
3577 }
3578 }
3579 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3580 if (rcStrict != VINF_SUCCESS)
3581 return rcStrict;
3582
3583 /* Mark the selectors 'accessed' (hope this is the correct time). */
3584 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3585 * after pushing the stack frame? (Write protect the gdt + stack to
3586 * find out.) */
3587 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3588 {
3589 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3590 if (rcStrict != VINF_SUCCESS)
3591 return rcStrict;
3592 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3593 }
3594
3595 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3596 {
3597 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3598 if (rcStrict != VINF_SUCCESS)
3599 return rcStrict;
3600 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3601 }
3602
3603 /*
3604 * Start comitting the register changes (joins with the DPL=CPL branch).
3605 */
3606 pCtx->ss.Sel = NewSS;
3607 pCtx->ss.ValidSel = NewSS;
3608 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3609 pCtx->ss.u32Limit = cbLimitSS;
3610 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3611 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3612 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3613 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3614 * SP is loaded).
3615 * Need to check the other combinations too:
3616 * - 16-bit TSS, 32-bit handler
3617 * - 32-bit TSS, 16-bit handler */
3618 if (!pCtx->ss.Attr.n.u1DefBig)
3619 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
3620 else
3621 pCtx->rsp = uNewEsp - cbStackFrame;
3622 pIemCpu->uCpl = uNewCpl;
3623
3624 if (fEfl & X86_EFL_VM)
3625 {
3626 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3627 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3628 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3629 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3630 }
3631 }
3632 /*
3633 * Same privilege, no stack change and smaller stack frame.
3634 */
3635 else
3636 {
3637 uint64_t uNewRsp;
3638 RTPTRUNION uStackFrame;
3639 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3640 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3641 if (rcStrict != VINF_SUCCESS)
3642 return rcStrict;
3643 void * const pvStackFrame = uStackFrame.pv;
3644
3645 if (f32BitGate)
3646 {
3647 if (fFlags & IEM_XCPT_FLAGS_ERR)
3648 *uStackFrame.pu32++ = uErr;
3649 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3650 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3651 uStackFrame.pu32[2] = fEfl;
3652 }
3653 else
3654 {
3655 if (fFlags & IEM_XCPT_FLAGS_ERR)
3656 *uStackFrame.pu16++ = uErr;
3657 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3658 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3659 uStackFrame.pu16[2] = fEfl;
3660 }
3661 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3662 if (rcStrict != VINF_SUCCESS)
3663 return rcStrict;
3664
3665 /* Mark the CS selector as 'accessed'. */
3666 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3667 {
3668 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3669 if (rcStrict != VINF_SUCCESS)
3670 return rcStrict;
3671 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3672 }
3673
3674 /*
3675 * Start committing the register changes (joins with the other branch).
3676 */
3677 pCtx->rsp = uNewRsp;
3678 }
3679
3680 /* ... register committing continues. */
3681 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3682 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3683 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3684 pCtx->cs.u32Limit = cbLimitCS;
3685 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3686 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3687
3688 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3689 fEfl &= ~fEflToClear;
3690 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3691
3692 if (fFlags & IEM_XCPT_FLAGS_CR2)
3693 pCtx->cr2 = uCr2;
3694
3695 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3696 iemRaiseXcptAdjustState(pCtx, u8Vector);
3697
3698 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3699}
3700
3701
3702/**
3703 * Implements exceptions and interrupts for long mode.
3704 *
3705 * @returns VBox strict status code.
3706 * @param pIemCpu The IEM per CPU instance data.
3707 * @param pCtx The CPU context.
3708 * @param cbInstr The number of bytes to offset rIP by in the return
3709 * address.
3710 * @param u8Vector The interrupt / exception vector number.
3711 * @param fFlags The flags.
3712 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3713 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3714 */
3715IEM_STATIC VBOXSTRICTRC
3716iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3717 PCPUMCTX pCtx,
3718 uint8_t cbInstr,
3719 uint8_t u8Vector,
3720 uint32_t fFlags,
3721 uint16_t uErr,
3722 uint64_t uCr2)
3723{
3724 /*
3725 * Read the IDT entry.
3726 */
3727 uint16_t offIdt = (uint16_t)u8Vector << 4;
3728 if (pCtx->idtr.cbIdt < offIdt + 7)
3729 {
3730 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3731 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3732 }
3733 X86DESC64 Idte;
3734 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3735 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3736 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3737 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3738 return rcStrict;
3739 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3740 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3741 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3742
3743 /*
3744 * Check the descriptor type, DPL and such.
3745 * ASSUMES this is done in the same order as described for call-gate calls.
3746 */
3747 if (Idte.Gate.u1DescType)
3748 {
3749 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3750 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3751 }
3752 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3753 switch (Idte.Gate.u4Type)
3754 {
3755 case AMD64_SEL_TYPE_SYS_INT_GATE:
3756 fEflToClear |= X86_EFL_IF;
3757 break;
3758 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3759 break;
3760
3761 default:
3762 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3763 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3764 }
3765
3766 /* Check DPL against CPL if applicable. */
3767 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3768 {
3769 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3770 {
3771 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3772 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3773 }
3774 }
3775
3776 /* Is it there? */
3777 if (!Idte.Gate.u1Present)
3778 {
3779 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3780 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3781 }
3782
3783 /* A null CS is bad. */
3784 RTSEL NewCS = Idte.Gate.u16Sel;
3785 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3786 {
3787 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3788 return iemRaiseGeneralProtectionFault0(pIemCpu);
3789 }
3790
3791 /* Fetch the descriptor for the new CS. */
3792 IEMSELDESC DescCS;
3793 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3794 if (rcStrict != VINF_SUCCESS)
3795 {
3796 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3797 return rcStrict;
3798 }
3799
3800 /* Must be a 64-bit code segment. */
3801 if (!DescCS.Long.Gen.u1DescType)
3802 {
3803 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3804 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3805 }
3806 if ( !DescCS.Long.Gen.u1Long
3807 || DescCS.Long.Gen.u1DefBig
3808 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3809 {
3810 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3811 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3812 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3813 }
3814
3815 /* Don't allow lowering the privilege level. For non-conforming CS
3816 selectors, the CS.DPL sets the privilege level the trap/interrupt
3817 handler runs at. For conforming CS selectors, the CPL remains
3818 unchanged, but the CS.DPL must be <= CPL. */
3819 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3820 * when CPU in Ring-0. Result \#GP? */
3821 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3822 {
3823 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3824 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3825 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3826 }
3827
3828
3829 /* Make sure the selector is present. */
3830 if (!DescCS.Legacy.Gen.u1Present)
3831 {
3832 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3833 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3834 }
3835
3836 /* Check that the new RIP is canonical. */
3837 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3838 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3839 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3840 if (!IEM_IS_CANONICAL(uNewRip))
3841 {
3842 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3843 return iemRaiseGeneralProtectionFault0(pIemCpu);
3844 }
3845
3846 /*
3847 * If the privilege level changes or if the IST isn't zero, we need to get
3848 * a new stack from the TSS.
3849 */
3850 uint64_t uNewRsp;
3851 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3852 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3853 if ( uNewCpl != pIemCpu->uCpl
3854 || Idte.Gate.u3IST != 0)
3855 {
3856 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3857 if (rcStrict != VINF_SUCCESS)
3858 return rcStrict;
3859 }
3860 else
3861 uNewRsp = pCtx->rsp;
3862 uNewRsp &= ~(uint64_t)0xf;
3863
3864 /*
3865 * Calc the flag image to push.
3866 */
3867 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3868 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3869 fEfl &= ~X86_EFL_RF;
3870 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3871 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3872
3873 /*
3874 * Start making changes.
3875 */
3876
3877 /* Create the stack frame. */
3878 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3879 RTPTRUNION uStackFrame;
3880 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3881 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3882 if (rcStrict != VINF_SUCCESS)
3883 return rcStrict;
3884 void * const pvStackFrame = uStackFrame.pv;
3885
3886 if (fFlags & IEM_XCPT_FLAGS_ERR)
3887 *uStackFrame.pu64++ = uErr;
3888 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3889 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3890 uStackFrame.pu64[2] = fEfl;
3891 uStackFrame.pu64[3] = pCtx->rsp;
3892 uStackFrame.pu64[4] = pCtx->ss.Sel;
3893 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3894 if (rcStrict != VINF_SUCCESS)
3895 return rcStrict;
3896
3897 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3898 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3899 * after pushing the stack frame? (Write protect the gdt + stack to
3900 * find out.) */
3901 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3902 {
3903 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3904 if (rcStrict != VINF_SUCCESS)
3905 return rcStrict;
3906 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3907 }
3908
3909 /*
3910 * Start comitting the register changes.
3911 */
3912 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3913 * hidden registers when interrupting 32-bit or 16-bit code! */
3914 if (uNewCpl != pIemCpu->uCpl)
3915 {
3916 pCtx->ss.Sel = 0 | uNewCpl;
3917 pCtx->ss.ValidSel = 0 | uNewCpl;
3918 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3919 pCtx->ss.u32Limit = UINT32_MAX;
3920 pCtx->ss.u64Base = 0;
3921 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3922 }
3923 pCtx->rsp = uNewRsp - cbStackFrame;
3924 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3925 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3926 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3927 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3928 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3929 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3930 pCtx->rip = uNewRip;
3931 pIemCpu->uCpl = uNewCpl;
3932
3933 fEfl &= ~fEflToClear;
3934 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3935
3936 if (fFlags & IEM_XCPT_FLAGS_CR2)
3937 pCtx->cr2 = uCr2;
3938
3939 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3940 iemRaiseXcptAdjustState(pCtx, u8Vector);
3941
3942 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3943}
3944
3945
3946/**
3947 * Implements exceptions and interrupts.
3948 *
3949 * All exceptions and interrupts goes thru this function!
3950 *
3951 * @returns VBox strict status code.
3952 * @param pIemCpu The IEM per CPU instance data.
3953 * @param cbInstr The number of bytes to offset rIP by in the return
3954 * address.
3955 * @param u8Vector The interrupt / exception vector number.
3956 * @param fFlags The flags.
3957 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3958 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3959 */
3960DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3961iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3962 uint8_t cbInstr,
3963 uint8_t u8Vector,
3964 uint32_t fFlags,
3965 uint16_t uErr,
3966 uint64_t uCr2)
3967{
3968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3969#ifdef IN_RING0
3970 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3971 AssertRCReturn(rc, rc);
3972#endif
3973
3974 /*
3975 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3976 */
3977 if ( pCtx->eflags.Bits.u1VM
3978 && pCtx->eflags.Bits.u2IOPL != 3
3979 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3980 && (pCtx->cr0 & X86_CR0_PE) )
3981 {
3982 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3983 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3984 u8Vector = X86_XCPT_GP;
3985 uErr = 0;
3986 }
3987#ifdef DBGFTRACE_ENABLED
3988 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3989 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3990 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3991#endif
3992
3993 /*
3994 * Do recursion accounting.
3995 */
3996 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3997 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3998 if (pIemCpu->cXcptRecursions == 0)
3999 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4000 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4001 else
4002 {
4003 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4004 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
4005
4006 /** @todo double and tripple faults. */
4007 if (pIemCpu->cXcptRecursions >= 3)
4008 {
4009#ifdef DEBUG_bird
4010 AssertFailed();
4011#endif
4012 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4013 }
4014
4015 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4016 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4017 {
4018 ....
4019 } */
4020 }
4021 pIemCpu->cXcptRecursions++;
4022 pIemCpu->uCurXcpt = u8Vector;
4023 pIemCpu->fCurXcpt = fFlags;
4024
4025 /*
4026 * Extensive logging.
4027 */
4028#if defined(LOG_ENABLED) && defined(IN_RING3)
4029 if (LogIs3Enabled())
4030 {
4031 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4032 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4033 char szRegs[4096];
4034 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4035 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4036 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4037 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4038 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4039 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4040 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4041 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4042 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4043 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4044 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4045 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4046 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4047 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4048 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4049 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4050 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4051 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4052 " efer=%016VR{efer}\n"
4053 " pat=%016VR{pat}\n"
4054 " sf_mask=%016VR{sf_mask}\n"
4055 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4056 " lstar=%016VR{lstar}\n"
4057 " star=%016VR{star} cstar=%016VR{cstar}\n"
4058 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4059 );
4060
4061 char szInstr[256];
4062 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4063 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4064 szInstr, sizeof(szInstr), NULL);
4065 Log3(("%s%s\n", szRegs, szInstr));
4066 }
4067#endif /* LOG_ENABLED */
4068
4069 /*
4070 * Call the mode specific worker function.
4071 */
4072 VBOXSTRICTRC rcStrict;
4073 if (!(pCtx->cr0 & X86_CR0_PE))
4074 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4075 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4076 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4077 else
4078 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4079
4080 /*
4081 * Unwind.
4082 */
4083 pIemCpu->cXcptRecursions--;
4084 pIemCpu->uCurXcpt = uPrevXcpt;
4085 pIemCpu->fCurXcpt = fPrevXcpt;
4086 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4087 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4088 return rcStrict;
4089}
4090
4091
4092/** \#DE - 00. */
4093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4094{
4095 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4096}
4097
4098
4099/** \#DB - 01.
4100 * @note This automatically clear DR7.GD. */
4101DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4102{
4103 /** @todo set/clear RF. */
4104 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4105 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4106}
4107
4108
4109/** \#UD - 06. */
4110DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4111{
4112 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4113}
4114
4115
4116/** \#NM - 07. */
4117DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4118{
4119 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4120}
4121
4122
4123/** \#TS(err) - 0a. */
4124DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4125{
4126 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4127}
4128
4129
4130/** \#TS(tr) - 0a. */
4131DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4132{
4133 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4134 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4135}
4136
4137
4138/** \#TS(0) - 0a. */
4139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4140{
4141 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4142 0, 0);
4143}
4144
4145
4146/** \#TS(err) - 0a. */
4147DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4148{
4149 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4150 uSel & X86_SEL_MASK_OFF_RPL, 0);
4151}
4152
4153
4154/** \#NP(err) - 0b. */
4155DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4156{
4157 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4158}
4159
4160
4161/** \#NP(seg) - 0b. */
4162DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4163{
4164 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4165 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4166}
4167
4168
4169/** \#NP(sel) - 0b. */
4170DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4171{
4172 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & ~X86_SEL_RPL, 0);
4174}
4175
4176
4177/** \#SS(seg) - 0c. */
4178DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4179{
4180 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4181 uSel & ~X86_SEL_RPL, 0);
4182}
4183
4184
4185/** \#SS(err) - 0c. */
4186DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4187{
4188 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4189}
4190
4191
4192/** \#GP(n) - 0d. */
4193DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4194{
4195 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4196}
4197
4198
4199/** \#GP(0) - 0d. */
4200DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4201{
4202 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4203}
4204
4205
4206/** \#GP(sel) - 0d. */
4207DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4208{
4209 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4210 Sel & ~X86_SEL_RPL, 0);
4211}
4212
4213
4214/** \#GP(0) - 0d. */
4215DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4216{
4217 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220
4221/** \#GP(sel) - 0d. */
4222DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4223{
4224 NOREF(iSegReg); NOREF(fAccess);
4225 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4226 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4227}
4228
4229
4230/** \#GP(sel) - 0d. */
4231DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4232{
4233 NOREF(Sel);
4234 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4235}
4236
4237
4238/** \#GP(sel) - 0d. */
4239DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4240{
4241 NOREF(iSegReg); NOREF(fAccess);
4242 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4243}
4244
4245
4246/** \#PF(n) - 0e. */
4247DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4248{
4249 uint16_t uErr;
4250 switch (rc)
4251 {
4252 case VERR_PAGE_NOT_PRESENT:
4253 case VERR_PAGE_TABLE_NOT_PRESENT:
4254 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4255 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4256 uErr = 0;
4257 break;
4258
4259 default:
4260 AssertMsgFailed(("%Rrc\n", rc));
4261 case VERR_ACCESS_DENIED:
4262 uErr = X86_TRAP_PF_P;
4263 break;
4264
4265 /** @todo reserved */
4266 }
4267
4268 if (pIemCpu->uCpl == 3)
4269 uErr |= X86_TRAP_PF_US;
4270
4271 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4272 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4273 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4274 uErr |= X86_TRAP_PF_ID;
4275
4276#if 0 /* This is so much non-sense, really. Why was it done like that? */
4277 /* Note! RW access callers reporting a WRITE protection fault, will clear
4278 the READ flag before calling. So, read-modify-write accesses (RW)
4279 can safely be reported as READ faults. */
4280 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4281 uErr |= X86_TRAP_PF_RW;
4282#else
4283 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4284 {
4285 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4286 uErr |= X86_TRAP_PF_RW;
4287 }
4288#endif
4289
4290 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4291 uErr, GCPtrWhere);
4292}
4293
4294
4295/** \#MF(0) - 10. */
4296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4297{
4298 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4299}
4300
4301
4302/** \#AC(0) - 11. */
4303DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4304{
4305 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4306}
4307
4308
4309/**
4310 * Macro for calling iemCImplRaiseDivideError().
4311 *
4312 * This enables us to add/remove arguments and force different levels of
4313 * inlining as we wish.
4314 *
4315 * @return Strict VBox status code.
4316 */
4317#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4318IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4319{
4320 NOREF(cbInstr);
4321 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4322}
4323
4324
4325/**
4326 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4327 *
4328 * This enables us to add/remove arguments and force different levels of
4329 * inlining as we wish.
4330 *
4331 * @return Strict VBox status code.
4332 */
4333#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4334IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4335{
4336 NOREF(cbInstr);
4337 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4338}
4339
4340
4341/**
4342 * Macro for calling iemCImplRaiseInvalidOpcode().
4343 *
4344 * This enables us to add/remove arguments and force different levels of
4345 * inlining as we wish.
4346 *
4347 * @return Strict VBox status code.
4348 */
4349#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4350IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4351{
4352 NOREF(cbInstr);
4353 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4354}
4355
4356
4357/** @} */
4358
4359
4360/*
4361 *
4362 * Helpers routines.
4363 * Helpers routines.
4364 * Helpers routines.
4365 *
4366 */
4367
4368/**
4369 * Recalculates the effective operand size.
4370 *
4371 * @param pIemCpu The IEM state.
4372 */
4373IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4374{
4375 switch (pIemCpu->enmCpuMode)
4376 {
4377 case IEMMODE_16BIT:
4378 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4379 break;
4380 case IEMMODE_32BIT:
4381 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4382 break;
4383 case IEMMODE_64BIT:
4384 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4385 {
4386 case 0:
4387 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4388 break;
4389 case IEM_OP_PRF_SIZE_OP:
4390 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4391 break;
4392 case IEM_OP_PRF_SIZE_REX_W:
4393 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4394 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4395 break;
4396 }
4397 break;
4398 default:
4399 AssertFailed();
4400 }
4401}
4402
4403
4404/**
4405 * Sets the default operand size to 64-bit and recalculates the effective
4406 * operand size.
4407 *
4408 * @param pIemCpu The IEM state.
4409 */
4410IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4411{
4412 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4413 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4414 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4415 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4416 else
4417 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4418}
4419
4420
4421/*
4422 *
4423 * Common opcode decoders.
4424 * Common opcode decoders.
4425 * Common opcode decoders.
4426 *
4427 */
4428//#include <iprt/mem.h>
4429
4430/**
4431 * Used to add extra details about a stub case.
4432 * @param pIemCpu The IEM per CPU state.
4433 */
4434IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4435{
4436#if defined(LOG_ENABLED) && defined(IN_RING3)
4437 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4438 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4439 char szRegs[4096];
4440 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4441 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4442 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4443 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4444 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4445 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4446 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4447 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4448 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4449 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4450 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4451 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4452 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4453 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4454 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4455 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4456 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4457 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4458 " efer=%016VR{efer}\n"
4459 " pat=%016VR{pat}\n"
4460 " sf_mask=%016VR{sf_mask}\n"
4461 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4462 " lstar=%016VR{lstar}\n"
4463 " star=%016VR{star} cstar=%016VR{cstar}\n"
4464 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4465 );
4466
4467 char szInstr[256];
4468 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4469 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4470 szInstr, sizeof(szInstr), NULL);
4471
4472 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4473#else
4474 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4475#endif
4476}
4477
4478/**
4479 * Complains about a stub.
4480 *
4481 * Providing two versions of this macro, one for daily use and one for use when
4482 * working on IEM.
4483 */
4484#if 0
4485# define IEMOP_BITCH_ABOUT_STUB() \
4486 do { \
4487 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4488 iemOpStubMsg2(pIemCpu); \
4489 RTAssertPanic(); \
4490 } while (0)
4491#else
4492# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4493#endif
4494
4495/** Stubs an opcode. */
4496#define FNIEMOP_STUB(a_Name) \
4497 FNIEMOP_DEF(a_Name) \
4498 { \
4499 IEMOP_BITCH_ABOUT_STUB(); \
4500 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4501 } \
4502 typedef int ignore_semicolon
4503
4504/** Stubs an opcode. */
4505#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4506 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4507 { \
4508 IEMOP_BITCH_ABOUT_STUB(); \
4509 NOREF(a_Name0); \
4510 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4511 } \
4512 typedef int ignore_semicolon
4513
4514/** Stubs an opcode which currently should raise \#UD. */
4515#define FNIEMOP_UD_STUB(a_Name) \
4516 FNIEMOP_DEF(a_Name) \
4517 { \
4518 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4519 return IEMOP_RAISE_INVALID_OPCODE(); \
4520 } \
4521 typedef int ignore_semicolon
4522
4523/** Stubs an opcode which currently should raise \#UD. */
4524#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4525 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4526 { \
4527 NOREF(a_Name0); \
4528 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4529 return IEMOP_RAISE_INVALID_OPCODE(); \
4530 } \
4531 typedef int ignore_semicolon
4532
4533
4534
4535/** @name Register Access.
4536 * @{
4537 */
4538
4539/**
4540 * Gets a reference (pointer) to the specified hidden segment register.
4541 *
4542 * @returns Hidden register reference.
4543 * @param pIemCpu The per CPU data.
4544 * @param iSegReg The segment register.
4545 */
4546IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4547{
4548 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4549 PCPUMSELREG pSReg;
4550 switch (iSegReg)
4551 {
4552 case X86_SREG_ES: pSReg = &pCtx->es; break;
4553 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4554 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4555 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4556 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4557 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4558 default:
4559 AssertFailedReturn(NULL);
4560 }
4561#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4562 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4563 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4564#else
4565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4566#endif
4567 return pSReg;
4568}
4569
4570
4571/**
4572 * Gets a reference (pointer) to the specified segment register (the selector
4573 * value).
4574 *
4575 * @returns Pointer to the selector variable.
4576 * @param pIemCpu The per CPU data.
4577 * @param iSegReg The segment register.
4578 */
4579IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4580{
4581 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4582 switch (iSegReg)
4583 {
4584 case X86_SREG_ES: return &pCtx->es.Sel;
4585 case X86_SREG_CS: return &pCtx->cs.Sel;
4586 case X86_SREG_SS: return &pCtx->ss.Sel;
4587 case X86_SREG_DS: return &pCtx->ds.Sel;
4588 case X86_SREG_FS: return &pCtx->fs.Sel;
4589 case X86_SREG_GS: return &pCtx->gs.Sel;
4590 }
4591 AssertFailedReturn(NULL);
4592}
4593
4594
4595/**
4596 * Fetches the selector value of a segment register.
4597 *
4598 * @returns The selector value.
4599 * @param pIemCpu The per CPU data.
4600 * @param iSegReg The segment register.
4601 */
4602IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4603{
4604 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4605 switch (iSegReg)
4606 {
4607 case X86_SREG_ES: return pCtx->es.Sel;
4608 case X86_SREG_CS: return pCtx->cs.Sel;
4609 case X86_SREG_SS: return pCtx->ss.Sel;
4610 case X86_SREG_DS: return pCtx->ds.Sel;
4611 case X86_SREG_FS: return pCtx->fs.Sel;
4612 case X86_SREG_GS: return pCtx->gs.Sel;
4613 }
4614 AssertFailedReturn(0xffff);
4615}
4616
4617
4618/**
4619 * Gets a reference (pointer) to the specified general register.
4620 *
4621 * @returns Register reference.
4622 * @param pIemCpu The per CPU data.
4623 * @param iReg The general register.
4624 */
4625IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4626{
4627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4628 switch (iReg)
4629 {
4630 case X86_GREG_xAX: return &pCtx->rax;
4631 case X86_GREG_xCX: return &pCtx->rcx;
4632 case X86_GREG_xDX: return &pCtx->rdx;
4633 case X86_GREG_xBX: return &pCtx->rbx;
4634 case X86_GREG_xSP: return &pCtx->rsp;
4635 case X86_GREG_xBP: return &pCtx->rbp;
4636 case X86_GREG_xSI: return &pCtx->rsi;
4637 case X86_GREG_xDI: return &pCtx->rdi;
4638 case X86_GREG_x8: return &pCtx->r8;
4639 case X86_GREG_x9: return &pCtx->r9;
4640 case X86_GREG_x10: return &pCtx->r10;
4641 case X86_GREG_x11: return &pCtx->r11;
4642 case X86_GREG_x12: return &pCtx->r12;
4643 case X86_GREG_x13: return &pCtx->r13;
4644 case X86_GREG_x14: return &pCtx->r14;
4645 case X86_GREG_x15: return &pCtx->r15;
4646 }
4647 AssertFailedReturn(NULL);
4648}
4649
4650
4651/**
4652 * Gets a reference (pointer) to the specified 8-bit general register.
4653 *
4654 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4655 *
4656 * @returns Register reference.
4657 * @param pIemCpu The per CPU data.
4658 * @param iReg The register.
4659 */
4660IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4661{
4662 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4663 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4664
4665 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4666 if (iReg >= 4)
4667 pu8Reg++;
4668 return pu8Reg;
4669}
4670
4671
4672/**
4673 * Fetches the value of a 8-bit general register.
4674 *
4675 * @returns The register value.
4676 * @param pIemCpu The per CPU data.
4677 * @param iReg The register.
4678 */
4679IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4680{
4681 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4682 return *pbSrc;
4683}
4684
4685
4686/**
4687 * Fetches the value of a 16-bit general register.
4688 *
4689 * @returns The register value.
4690 * @param pIemCpu The per CPU data.
4691 * @param iReg The register.
4692 */
4693IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4694{
4695 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4696}
4697
4698
4699/**
4700 * Fetches the value of a 32-bit general register.
4701 *
4702 * @returns The register value.
4703 * @param pIemCpu The per CPU data.
4704 * @param iReg The register.
4705 */
4706IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4707{
4708 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4709}
4710
4711
4712/**
4713 * Fetches the value of a 64-bit general register.
4714 *
4715 * @returns The register value.
4716 * @param pIemCpu The per CPU data.
4717 * @param iReg The register.
4718 */
4719IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4720{
4721 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4722}
4723
4724
4725/**
4726 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4727 *
4728 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4729 * segment limit.
4730 *
4731 * @param pIemCpu The per CPU data.
4732 * @param offNextInstr The offset of the next instruction.
4733 */
4734IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4735{
4736 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4737 switch (pIemCpu->enmEffOpSize)
4738 {
4739 case IEMMODE_16BIT:
4740 {
4741 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4742 if ( uNewIp > pCtx->cs.u32Limit
4743 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4744 return iemRaiseGeneralProtectionFault0(pIemCpu);
4745 pCtx->rip = uNewIp;
4746 break;
4747 }
4748
4749 case IEMMODE_32BIT:
4750 {
4751 Assert(pCtx->rip <= UINT32_MAX);
4752 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4753
4754 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4755 if (uNewEip > pCtx->cs.u32Limit)
4756 return iemRaiseGeneralProtectionFault0(pIemCpu);
4757 pCtx->rip = uNewEip;
4758 break;
4759 }
4760
4761 case IEMMODE_64BIT:
4762 {
4763 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4764
4765 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4766 if (!IEM_IS_CANONICAL(uNewRip))
4767 return iemRaiseGeneralProtectionFault0(pIemCpu);
4768 pCtx->rip = uNewRip;
4769 break;
4770 }
4771
4772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4773 }
4774
4775 pCtx->eflags.Bits.u1RF = 0;
4776 return VINF_SUCCESS;
4777}
4778
4779
4780/**
4781 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4782 *
4783 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4784 * segment limit.
4785 *
4786 * @returns Strict VBox status code.
4787 * @param pIemCpu The per CPU data.
4788 * @param offNextInstr The offset of the next instruction.
4789 */
4790IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4791{
4792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4793 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4794
4795 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4796 if ( uNewIp > pCtx->cs.u32Limit
4797 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4798 return iemRaiseGeneralProtectionFault0(pIemCpu);
4799 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4800 pCtx->rip = uNewIp;
4801 pCtx->eflags.Bits.u1RF = 0;
4802
4803 return VINF_SUCCESS;
4804}
4805
4806
4807/**
4808 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4809 *
4810 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4811 * segment limit.
4812 *
4813 * @returns Strict VBox status code.
4814 * @param pIemCpu The per CPU data.
4815 * @param offNextInstr The offset of the next instruction.
4816 */
4817IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4818{
4819 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4820 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4821
4822 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4823 {
4824 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4825
4826 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4827 if (uNewEip > pCtx->cs.u32Limit)
4828 return iemRaiseGeneralProtectionFault0(pIemCpu);
4829 pCtx->rip = uNewEip;
4830 }
4831 else
4832 {
4833 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4834
4835 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4836 if (!IEM_IS_CANONICAL(uNewRip))
4837 return iemRaiseGeneralProtectionFault0(pIemCpu);
4838 pCtx->rip = uNewRip;
4839 }
4840 pCtx->eflags.Bits.u1RF = 0;
4841 return VINF_SUCCESS;
4842}
4843
4844
4845/**
4846 * Performs a near jump to the specified address.
4847 *
4848 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4849 * segment limit.
4850 *
4851 * @param pIemCpu The per CPU data.
4852 * @param uNewRip The new RIP value.
4853 */
4854IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4855{
4856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4857 switch (pIemCpu->enmEffOpSize)
4858 {
4859 case IEMMODE_16BIT:
4860 {
4861 Assert(uNewRip <= UINT16_MAX);
4862 if ( uNewRip > pCtx->cs.u32Limit
4863 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4864 return iemRaiseGeneralProtectionFault0(pIemCpu);
4865 /** @todo Test 16-bit jump in 64-bit mode. */
4866 pCtx->rip = uNewRip;
4867 break;
4868 }
4869
4870 case IEMMODE_32BIT:
4871 {
4872 Assert(uNewRip <= UINT32_MAX);
4873 Assert(pCtx->rip <= UINT32_MAX);
4874 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4875
4876 if (uNewRip > pCtx->cs.u32Limit)
4877 return iemRaiseGeneralProtectionFault0(pIemCpu);
4878 pCtx->rip = uNewRip;
4879 break;
4880 }
4881
4882 case IEMMODE_64BIT:
4883 {
4884 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4885
4886 if (!IEM_IS_CANONICAL(uNewRip))
4887 return iemRaiseGeneralProtectionFault0(pIemCpu);
4888 pCtx->rip = uNewRip;
4889 break;
4890 }
4891
4892 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4893 }
4894
4895 pCtx->eflags.Bits.u1RF = 0;
4896 return VINF_SUCCESS;
4897}
4898
4899
4900/**
4901 * Get the address of the top of the stack.
4902 *
4903 * @param pIemCpu The per CPU data.
4904 * @param pCtx The CPU context which SP/ESP/RSP should be
4905 * read.
4906 */
4907DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4908{
4909 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4910 return pCtx->rsp;
4911 if (pCtx->ss.Attr.n.u1DefBig)
4912 return pCtx->esp;
4913 return pCtx->sp;
4914}
4915
4916
4917/**
4918 * Updates the RIP/EIP/IP to point to the next instruction.
4919 *
4920 * This function leaves the EFLAGS.RF flag alone.
4921 *
4922 * @param pIemCpu The per CPU data.
4923 * @param cbInstr The number of bytes to add.
4924 */
4925IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4926{
4927 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4928 switch (pIemCpu->enmCpuMode)
4929 {
4930 case IEMMODE_16BIT:
4931 Assert(pCtx->rip <= UINT16_MAX);
4932 pCtx->eip += cbInstr;
4933 pCtx->eip &= UINT32_C(0xffff);
4934 break;
4935
4936 case IEMMODE_32BIT:
4937 pCtx->eip += cbInstr;
4938 Assert(pCtx->rip <= UINT32_MAX);
4939 break;
4940
4941 case IEMMODE_64BIT:
4942 pCtx->rip += cbInstr;
4943 break;
4944 default: AssertFailed();
4945 }
4946}
4947
4948
4949#if 0
4950/**
4951 * Updates the RIP/EIP/IP to point to the next instruction.
4952 *
4953 * @param pIemCpu The per CPU data.
4954 */
4955IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4956{
4957 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4958}
4959#endif
4960
4961
4962
4963/**
4964 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4965 *
4966 * @param pIemCpu The per CPU data.
4967 * @param cbInstr The number of bytes to add.
4968 */
4969IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4970{
4971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4972
4973 pCtx->eflags.Bits.u1RF = 0;
4974
4975 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4976 switch (pIemCpu->enmCpuMode)
4977 {
4978 /** @todo investigate if EIP or RIP is really incremented. */
4979 case IEMMODE_16BIT:
4980 case IEMMODE_32BIT:
4981 pCtx->eip += cbInstr;
4982 Assert(pCtx->rip <= UINT32_MAX);
4983 break;
4984
4985 case IEMMODE_64BIT:
4986 pCtx->rip += cbInstr;
4987 break;
4988 default: AssertFailed();
4989 }
4990}
4991
4992
4993/**
4994 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4995 *
4996 * @param pIemCpu The per CPU data.
4997 */
4998IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4999{
5000 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
5001}
5002
5003
5004/**
5005 * Adds to the stack pointer.
5006 *
5007 * @param pIemCpu The per CPU data.
5008 * @param pCtx The CPU context which SP/ESP/RSP should be
5009 * updated.
5010 * @param cbToAdd The number of bytes to add.
5011 */
5012DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
5013{
5014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5015 pCtx->rsp += cbToAdd;
5016 else if (pCtx->ss.Attr.n.u1DefBig)
5017 pCtx->esp += cbToAdd;
5018 else
5019 pCtx->sp += cbToAdd;
5020}
5021
5022
5023/**
5024 * Subtracts from the stack pointer.
5025 *
5026 * @param pIemCpu The per CPU data.
5027 * @param pCtx The CPU context which SP/ESP/RSP should be
5028 * updated.
5029 * @param cbToSub The number of bytes to subtract.
5030 */
5031DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
5032{
5033 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5034 pCtx->rsp -= cbToSub;
5035 else if (pCtx->ss.Attr.n.u1DefBig)
5036 pCtx->esp -= cbToSub;
5037 else
5038 pCtx->sp -= cbToSub;
5039}
5040
5041
5042/**
5043 * Adds to the temporary stack pointer.
5044 *
5045 * @param pIemCpu The per CPU data.
5046 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5047 * @param cbToAdd The number of bytes to add.
5048 * @param pCtx Where to get the current stack mode.
5049 */
5050DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
5051{
5052 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5053 pTmpRsp->u += cbToAdd;
5054 else if (pCtx->ss.Attr.n.u1DefBig)
5055 pTmpRsp->DWords.dw0 += cbToAdd;
5056 else
5057 pTmpRsp->Words.w0 += cbToAdd;
5058}
5059
5060
5061/**
5062 * Subtracts from the temporary stack pointer.
5063 *
5064 * @param pIemCpu The per CPU data.
5065 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5066 * @param cbToSub The number of bytes to subtract.
5067 * @param pCtx Where to get the current stack mode.
5068 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5069 * expecting that.
5070 */
5071DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5072{
5073 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5074 pTmpRsp->u -= cbToSub;
5075 else if (pCtx->ss.Attr.n.u1DefBig)
5076 pTmpRsp->DWords.dw0 -= cbToSub;
5077 else
5078 pTmpRsp->Words.w0 -= cbToSub;
5079}
5080
5081
5082/**
5083 * Calculates the effective stack address for a push of the specified size as
5084 * well as the new RSP value (upper bits may be masked).
5085 *
5086 * @returns Effective stack addressf for the push.
5087 * @param pIemCpu The IEM per CPU data.
5088 * @param pCtx Where to get the current stack mode.
5089 * @param cbItem The size of the stack item to pop.
5090 * @param puNewRsp Where to return the new RSP value.
5091 */
5092DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5093{
5094 RTUINT64U uTmpRsp;
5095 RTGCPTR GCPtrTop;
5096 uTmpRsp.u = pCtx->rsp;
5097
5098 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5099 GCPtrTop = uTmpRsp.u -= cbItem;
5100 else if (pCtx->ss.Attr.n.u1DefBig)
5101 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5102 else
5103 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5104 *puNewRsp = uTmpRsp.u;
5105 return GCPtrTop;
5106}
5107
5108
5109/**
5110 * Gets the current stack pointer and calculates the value after a pop of the
5111 * specified size.
5112 *
5113 * @returns Current stack pointer.
5114 * @param pIemCpu The per CPU data.
5115 * @param pCtx Where to get the current stack mode.
5116 * @param cbItem The size of the stack item to pop.
5117 * @param puNewRsp Where to return the new RSP value.
5118 */
5119DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5120{
5121 RTUINT64U uTmpRsp;
5122 RTGCPTR GCPtrTop;
5123 uTmpRsp.u = pCtx->rsp;
5124
5125 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5126 {
5127 GCPtrTop = uTmpRsp.u;
5128 uTmpRsp.u += cbItem;
5129 }
5130 else if (pCtx->ss.Attr.n.u1DefBig)
5131 {
5132 GCPtrTop = uTmpRsp.DWords.dw0;
5133 uTmpRsp.DWords.dw0 += cbItem;
5134 }
5135 else
5136 {
5137 GCPtrTop = uTmpRsp.Words.w0;
5138 uTmpRsp.Words.w0 += cbItem;
5139 }
5140 *puNewRsp = uTmpRsp.u;
5141 return GCPtrTop;
5142}
5143
5144
5145/**
5146 * Calculates the effective stack address for a push of the specified size as
5147 * well as the new temporary RSP value (upper bits may be masked).
5148 *
5149 * @returns Effective stack addressf for the push.
5150 * @param pIemCpu The per CPU data.
5151 * @param pCtx Where to get the current stack mode.
5152 * @param pTmpRsp The temporary stack pointer. This is updated.
5153 * @param cbItem The size of the stack item to pop.
5154 */
5155DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5156{
5157 RTGCPTR GCPtrTop;
5158
5159 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5160 GCPtrTop = pTmpRsp->u -= cbItem;
5161 else if (pCtx->ss.Attr.n.u1DefBig)
5162 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5163 else
5164 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5165 return GCPtrTop;
5166}
5167
5168
5169/**
5170 * Gets the effective stack address for a pop of the specified size and
5171 * calculates and updates the temporary RSP.
5172 *
5173 * @returns Current stack pointer.
5174 * @param pIemCpu The per CPU data.
5175 * @param pCtx Where to get the current stack mode.
5176 * @param pTmpRsp The temporary stack pointer. This is updated.
5177 * @param cbItem The size of the stack item to pop.
5178 */
5179DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5180{
5181 RTGCPTR GCPtrTop;
5182 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5183 {
5184 GCPtrTop = pTmpRsp->u;
5185 pTmpRsp->u += cbItem;
5186 }
5187 else if (pCtx->ss.Attr.n.u1DefBig)
5188 {
5189 GCPtrTop = pTmpRsp->DWords.dw0;
5190 pTmpRsp->DWords.dw0 += cbItem;
5191 }
5192 else
5193 {
5194 GCPtrTop = pTmpRsp->Words.w0;
5195 pTmpRsp->Words.w0 += cbItem;
5196 }
5197 return GCPtrTop;
5198}
5199
5200/** @} */
5201
5202
5203/** @name FPU access and helpers.
5204 *
5205 * @{
5206 */
5207
5208
5209/**
5210 * Hook for preparing to use the host FPU.
5211 *
5212 * This is necessary in ring-0 and raw-mode context.
5213 *
5214 * @param pIemCpu The IEM per CPU data.
5215 */
5216DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5217{
5218#ifdef IN_RING3
5219 NOREF(pIemCpu);
5220#else
5221/** @todo RZ: FIXME */
5222//# error "Implement me"
5223#endif
5224}
5225
5226
5227/**
5228 * Hook for preparing to use the host FPU for SSE
5229 *
5230 * This is necessary in ring-0 and raw-mode context.
5231 *
5232 * @param pIemCpu The IEM per CPU data.
5233 */
5234DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5235{
5236 iemFpuPrepareUsage(pIemCpu);
5237}
5238
5239
5240/**
5241 * Stores a QNaN value into a FPU register.
5242 *
5243 * @param pReg Pointer to the register.
5244 */
5245DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5246{
5247 pReg->au32[0] = UINT32_C(0x00000000);
5248 pReg->au32[1] = UINT32_C(0xc0000000);
5249 pReg->au16[4] = UINT16_C(0xffff);
5250}
5251
5252
5253/**
5254 * Updates the FOP, FPU.CS and FPUIP registers.
5255 *
5256 * @param pIemCpu The IEM per CPU data.
5257 * @param pCtx The CPU context.
5258 * @param pFpuCtx The FPU context.
5259 */
5260DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5261{
5262 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5263 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5264 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5265 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5266 {
5267 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5268 * happens in real mode here based on the fnsave and fnstenv images. */
5269 pFpuCtx->CS = 0;
5270 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5271 }
5272 else
5273 {
5274 pFpuCtx->CS = pCtx->cs.Sel;
5275 pFpuCtx->FPUIP = pCtx->rip;
5276 }
5277}
5278
5279
5280/**
5281 * Updates the x87.DS and FPUDP registers.
5282 *
5283 * @param pIemCpu The IEM per CPU data.
5284 * @param pCtx The CPU context.
5285 * @param pFpuCtx The FPU context.
5286 * @param iEffSeg The effective segment register.
5287 * @param GCPtrEff The effective address relative to @a iEffSeg.
5288 */
5289DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5290{
5291 RTSEL sel;
5292 switch (iEffSeg)
5293 {
5294 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5295 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5296 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5297 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5298 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5299 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5300 default:
5301 AssertMsgFailed(("%d\n", iEffSeg));
5302 sel = pCtx->ds.Sel;
5303 }
5304 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5305 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5306 {
5307 pFpuCtx->DS = 0;
5308 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5309 }
5310 else
5311 {
5312 pFpuCtx->DS = sel;
5313 pFpuCtx->FPUDP = GCPtrEff;
5314 }
5315}
5316
5317
5318/**
5319 * Rotates the stack registers in the push direction.
5320 *
5321 * @param pFpuCtx The FPU context.
5322 * @remarks This is a complete waste of time, but fxsave stores the registers in
5323 * stack order.
5324 */
5325DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5326{
5327 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5328 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5329 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5330 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5331 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5332 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5333 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5334 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5335 pFpuCtx->aRegs[0].r80 = r80Tmp;
5336}
5337
5338
5339/**
5340 * Rotates the stack registers in the pop direction.
5341 *
5342 * @param pFpuCtx The FPU context.
5343 * @remarks This is a complete waste of time, but fxsave stores the registers in
5344 * stack order.
5345 */
5346DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5347{
5348 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5349 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5350 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5351 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5352 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5353 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5354 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5355 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5356 pFpuCtx->aRegs[7].r80 = r80Tmp;
5357}
5358
5359
5360/**
5361 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5362 * exception prevents it.
5363 *
5364 * @param pIemCpu The IEM per CPU data.
5365 * @param pResult The FPU operation result to push.
5366 * @param pFpuCtx The FPU context.
5367 */
5368IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5369{
5370 /* Update FSW and bail if there are pending exceptions afterwards. */
5371 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5372 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5373 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5374 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5375 {
5376 pFpuCtx->FSW = fFsw;
5377 return;
5378 }
5379
5380 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5381 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5382 {
5383 /* All is fine, push the actual value. */
5384 pFpuCtx->FTW |= RT_BIT(iNewTop);
5385 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5386 }
5387 else if (pFpuCtx->FCW & X86_FCW_IM)
5388 {
5389 /* Masked stack overflow, push QNaN. */
5390 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5391 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5392 }
5393 else
5394 {
5395 /* Raise stack overflow, don't push anything. */
5396 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5397 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5398 return;
5399 }
5400
5401 fFsw &= ~X86_FSW_TOP_MASK;
5402 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5403 pFpuCtx->FSW = fFsw;
5404
5405 iemFpuRotateStackPush(pFpuCtx);
5406}
5407
5408
5409/**
5410 * Stores a result in a FPU register and updates the FSW and FTW.
5411 *
5412 * @param pFpuCtx The FPU context.
5413 * @param pResult The result to store.
5414 * @param iStReg Which FPU register to store it in.
5415 */
5416IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5417{
5418 Assert(iStReg < 8);
5419 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5420 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5421 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5422 pFpuCtx->FTW |= RT_BIT(iReg);
5423 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5424}
5425
5426
5427/**
5428 * Only updates the FPU status word (FSW) with the result of the current
5429 * instruction.
5430 *
5431 * @param pFpuCtx The FPU context.
5432 * @param u16FSW The FSW output of the current instruction.
5433 */
5434IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5435{
5436 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5437 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5438}
5439
5440
5441/**
5442 * Pops one item off the FPU stack if no pending exception prevents it.
5443 *
5444 * @param pFpuCtx The FPU context.
5445 */
5446IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5447{
5448 /* Check pending exceptions. */
5449 uint16_t uFSW = pFpuCtx->FSW;
5450 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5451 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5452 return;
5453
5454 /* TOP--. */
5455 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5456 uFSW &= ~X86_FSW_TOP_MASK;
5457 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5458 pFpuCtx->FSW = uFSW;
5459
5460 /* Mark the previous ST0 as empty. */
5461 iOldTop >>= X86_FSW_TOP_SHIFT;
5462 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5463
5464 /* Rotate the registers. */
5465 iemFpuRotateStackPop(pFpuCtx);
5466}
5467
5468
5469/**
5470 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5471 *
5472 * @param pIemCpu The IEM per CPU data.
5473 * @param pResult The FPU operation result to push.
5474 */
5475IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5476{
5477 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5478 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5479 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5480 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5481}
5482
5483
5484/**
5485 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5486 * and sets FPUDP and FPUDS.
5487 *
5488 * @param pIemCpu The IEM per CPU data.
5489 * @param pResult The FPU operation result to push.
5490 * @param iEffSeg The effective segment register.
5491 * @param GCPtrEff The effective address relative to @a iEffSeg.
5492 */
5493IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5494{
5495 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5496 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5497 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5498 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5499 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5500}
5501
5502
5503/**
5504 * Replace ST0 with the first value and push the second onto the FPU stack,
5505 * unless a pending exception prevents it.
5506 *
5507 * @param pIemCpu The IEM per CPU data.
5508 * @param pResult The FPU operation result to store and push.
5509 */
5510IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5511{
5512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5513 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5514 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5515
5516 /* Update FSW and bail if there are pending exceptions afterwards. */
5517 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5518 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5519 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5520 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5521 {
5522 pFpuCtx->FSW = fFsw;
5523 return;
5524 }
5525
5526 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5527 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5528 {
5529 /* All is fine, push the actual value. */
5530 pFpuCtx->FTW |= RT_BIT(iNewTop);
5531 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5532 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5533 }
5534 else if (pFpuCtx->FCW & X86_FCW_IM)
5535 {
5536 /* Masked stack overflow, push QNaN. */
5537 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5538 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5539 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5540 }
5541 else
5542 {
5543 /* Raise stack overflow, don't push anything. */
5544 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5545 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5546 return;
5547 }
5548
5549 fFsw &= ~X86_FSW_TOP_MASK;
5550 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5551 pFpuCtx->FSW = fFsw;
5552
5553 iemFpuRotateStackPush(pFpuCtx);
5554}
5555
5556
5557/**
5558 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5559 * FOP.
5560 *
5561 * @param pIemCpu The IEM per CPU data.
5562 * @param pResult The result to store.
5563 * @param iStReg Which FPU register to store it in.
5564 */
5565IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5566{
5567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5568 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5569 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5570 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5571}
5572
5573
5574/**
5575 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5576 * FOP, and then pops the stack.
5577 *
5578 * @param pIemCpu The IEM per CPU data.
5579 * @param pResult The result to store.
5580 * @param iStReg Which FPU register to store it in.
5581 */
5582IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5583{
5584 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5585 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5586 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5587 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5588 iemFpuMaybePopOne(pFpuCtx);
5589}
5590
5591
5592/**
5593 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5594 * FPUDP, and FPUDS.
5595 *
5596 * @param pIemCpu The IEM per CPU data.
5597 * @param pResult The result to store.
5598 * @param iStReg Which FPU register to store it in.
5599 * @param iEffSeg The effective memory operand selector register.
5600 * @param GCPtrEff The effective memory operand offset.
5601 */
5602IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5603 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5604{
5605 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5606 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5607 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5608 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5609 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5610}
5611
5612
5613/**
5614 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5615 * FPUDP, and FPUDS, and then pops the stack.
5616 *
5617 * @param pIemCpu The IEM per CPU data.
5618 * @param pResult The result to store.
5619 * @param iStReg Which FPU register to store it in.
5620 * @param iEffSeg The effective memory operand selector register.
5621 * @param GCPtrEff The effective memory operand offset.
5622 */
5623IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5624 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5625{
5626 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5627 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5628 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5629 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5630 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5631 iemFpuMaybePopOne(pFpuCtx);
5632}
5633
5634
5635/**
5636 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5637 *
5638 * @param pIemCpu The IEM per CPU data.
5639 */
5640IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5641{
5642 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5643 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5644 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5645}
5646
5647
5648/**
5649 * Marks the specified stack register as free (for FFREE).
5650 *
5651 * @param pIemCpu The IEM per CPU data.
5652 * @param iStReg The register to free.
5653 */
5654IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5655{
5656 Assert(iStReg < 8);
5657 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5658 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5659 pFpuCtx->FTW &= ~RT_BIT(iReg);
5660}
5661
5662
5663/**
5664 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5665 *
5666 * @param pIemCpu The IEM per CPU data.
5667 */
5668IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5669{
5670 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5671 uint16_t uFsw = pFpuCtx->FSW;
5672 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5673 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5674 uFsw &= ~X86_FSW_TOP_MASK;
5675 uFsw |= uTop;
5676 pFpuCtx->FSW = uFsw;
5677}
5678
5679
5680/**
5681 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5682 *
5683 * @param pIemCpu The IEM per CPU data.
5684 */
5685IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5686{
5687 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5688 uint16_t uFsw = pFpuCtx->FSW;
5689 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5690 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5691 uFsw &= ~X86_FSW_TOP_MASK;
5692 uFsw |= uTop;
5693 pFpuCtx->FSW = uFsw;
5694}
5695
5696
5697/**
5698 * Updates the FSW, FOP, FPUIP, and FPUCS.
5699 *
5700 * @param pIemCpu The IEM per CPU data.
5701 * @param u16FSW The FSW from the current instruction.
5702 */
5703IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5704{
5705 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5706 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5707 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5708 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5709}
5710
5711
5712/**
5713 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5714 *
5715 * @param pIemCpu The IEM per CPU data.
5716 * @param u16FSW The FSW from the current instruction.
5717 */
5718IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5719{
5720 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5721 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5722 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5723 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5724 iemFpuMaybePopOne(pFpuCtx);
5725}
5726
5727
5728/**
5729 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5730 *
5731 * @param pIemCpu The IEM per CPU data.
5732 * @param u16FSW The FSW from the current instruction.
5733 * @param iEffSeg The effective memory operand selector register.
5734 * @param GCPtrEff The effective memory operand offset.
5735 */
5736IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5737{
5738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5739 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5740 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5741 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5742 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5743}
5744
5745
5746/**
5747 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5748 *
5749 * @param pIemCpu The IEM per CPU data.
5750 * @param u16FSW The FSW from the current instruction.
5751 */
5752IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5753{
5754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5755 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5756 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5757 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5758 iemFpuMaybePopOne(pFpuCtx);
5759 iemFpuMaybePopOne(pFpuCtx);
5760}
5761
5762
5763/**
5764 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5765 *
5766 * @param pIemCpu The IEM per CPU data.
5767 * @param u16FSW The FSW from the current instruction.
5768 * @param iEffSeg The effective memory operand selector register.
5769 * @param GCPtrEff The effective memory operand offset.
5770 */
5771IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5772{
5773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5774 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5775 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5776 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5777 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5778 iemFpuMaybePopOne(pFpuCtx);
5779}
5780
5781
5782/**
5783 * Worker routine for raising an FPU stack underflow exception.
5784 *
5785 * @param pIemCpu The IEM per CPU data.
5786 * @param pFpuCtx The FPU context.
5787 * @param iStReg The stack register being accessed.
5788 */
5789IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5790{
5791 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5792 if (pFpuCtx->FCW & X86_FCW_IM)
5793 {
5794 /* Masked underflow. */
5795 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5796 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5797 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5798 if (iStReg != UINT8_MAX)
5799 {
5800 pFpuCtx->FTW |= RT_BIT(iReg);
5801 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5802 }
5803 }
5804 else
5805 {
5806 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5807 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5808 }
5809}
5810
5811
5812/**
5813 * Raises a FPU stack underflow exception.
5814 *
5815 * @param pIemCpu The IEM per CPU data.
5816 * @param iStReg The destination register that should be loaded
5817 * with QNaN if \#IS is not masked. Specify
5818 * UINT8_MAX if none (like for fcom).
5819 */
5820DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5821{
5822 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5823 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5824 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5825 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5826}
5827
5828
5829DECL_NO_INLINE(IEM_STATIC, void)
5830iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5831{
5832 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5833 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5834 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5835 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5836 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5837}
5838
5839
5840DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5841{
5842 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5843 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5844 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5845 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5846 iemFpuMaybePopOne(pFpuCtx);
5847}
5848
5849
5850DECL_NO_INLINE(IEM_STATIC, void)
5851iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5852{
5853 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5855 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5856 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5857 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5858 iemFpuMaybePopOne(pFpuCtx);
5859}
5860
5861
5862DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5863{
5864 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5865 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5866 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5867 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5868 iemFpuMaybePopOne(pFpuCtx);
5869 iemFpuMaybePopOne(pFpuCtx);
5870}
5871
5872
5873DECL_NO_INLINE(IEM_STATIC, void)
5874iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5875{
5876 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5877 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5878 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5879
5880 if (pFpuCtx->FCW & X86_FCW_IM)
5881 {
5882 /* Masked overflow - Push QNaN. */
5883 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5884 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5885 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5886 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5887 pFpuCtx->FTW |= RT_BIT(iNewTop);
5888 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5889 iemFpuRotateStackPush(pFpuCtx);
5890 }
5891 else
5892 {
5893 /* Exception pending - don't change TOP or the register stack. */
5894 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5895 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5896 }
5897}
5898
5899
5900DECL_NO_INLINE(IEM_STATIC, void)
5901iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5902{
5903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5904 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5905 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5906
5907 if (pFpuCtx->FCW & X86_FCW_IM)
5908 {
5909 /* Masked overflow - Push QNaN. */
5910 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5911 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5912 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5913 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5914 pFpuCtx->FTW |= RT_BIT(iNewTop);
5915 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5916 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5917 iemFpuRotateStackPush(pFpuCtx);
5918 }
5919 else
5920 {
5921 /* Exception pending - don't change TOP or the register stack. */
5922 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5923 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5924 }
5925}
5926
5927
5928/**
5929 * Worker routine for raising an FPU stack overflow exception on a push.
5930 *
5931 * @param pFpuCtx The FPU context.
5932 */
5933IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5934{
5935 if (pFpuCtx->FCW & X86_FCW_IM)
5936 {
5937 /* Masked overflow. */
5938 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5939 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5940 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5941 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5942 pFpuCtx->FTW |= RT_BIT(iNewTop);
5943 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5944 iemFpuRotateStackPush(pFpuCtx);
5945 }
5946 else
5947 {
5948 /* Exception pending - don't change TOP or the register stack. */
5949 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5950 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5951 }
5952}
5953
5954
5955/**
5956 * Raises a FPU stack overflow exception on a push.
5957 *
5958 * @param pIemCpu The IEM per CPU data.
5959 */
5960DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5961{
5962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5963 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5964 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5965 iemFpuStackPushOverflowOnly(pFpuCtx);
5966}
5967
5968
5969/**
5970 * Raises a FPU stack overflow exception on a push with a memory operand.
5971 *
5972 * @param pIemCpu The IEM per CPU data.
5973 * @param iEffSeg The effective memory operand selector register.
5974 * @param GCPtrEff The effective memory operand offset.
5975 */
5976DECL_NO_INLINE(IEM_STATIC, void)
5977iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5978{
5979 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5980 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5981 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5982 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5983 iemFpuStackPushOverflowOnly(pFpuCtx);
5984}
5985
5986
5987IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5988{
5989 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5990 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5991 if (pFpuCtx->FTW & RT_BIT(iReg))
5992 return VINF_SUCCESS;
5993 return VERR_NOT_FOUND;
5994}
5995
5996
5997IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5998{
5999 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6000 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6001 if (pFpuCtx->FTW & RT_BIT(iReg))
6002 {
6003 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
6004 return VINF_SUCCESS;
6005 }
6006 return VERR_NOT_FOUND;
6007}
6008
6009
6010IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
6011 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
6012{
6013 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6014 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6015 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6016 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6017 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6018 {
6019 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6020 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
6021 return VINF_SUCCESS;
6022 }
6023 return VERR_NOT_FOUND;
6024}
6025
6026
6027IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
6028{
6029 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
6030 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6031 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
6032 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
6033 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
6034 {
6035 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
6036 return VINF_SUCCESS;
6037 }
6038 return VERR_NOT_FOUND;
6039}
6040
6041
6042/**
6043 * Updates the FPU exception status after FCW is changed.
6044 *
6045 * @param pFpuCtx The FPU context.
6046 */
6047IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
6048{
6049 uint16_t u16Fsw = pFpuCtx->FSW;
6050 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
6051 u16Fsw |= X86_FSW_ES | X86_FSW_B;
6052 else
6053 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
6054 pFpuCtx->FSW = u16Fsw;
6055}
6056
6057
6058/**
6059 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6060 *
6061 * @returns The full FTW.
6062 * @param pFpuCtx The FPU context.
6063 */
6064IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6065{
6066 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6067 uint16_t u16Ftw = 0;
6068 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6069 for (unsigned iSt = 0; iSt < 8; iSt++)
6070 {
6071 unsigned const iReg = (iSt + iTop) & 7;
6072 if (!(u8Ftw & RT_BIT(iReg)))
6073 u16Ftw |= 3 << (iReg * 2); /* empty */
6074 else
6075 {
6076 uint16_t uTag;
6077 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6078 if (pr80Reg->s.uExponent == 0x7fff)
6079 uTag = 2; /* Exponent is all 1's => Special. */
6080 else if (pr80Reg->s.uExponent == 0x0000)
6081 {
6082 if (pr80Reg->s.u64Mantissa == 0x0000)
6083 uTag = 1; /* All bits are zero => Zero. */
6084 else
6085 uTag = 2; /* Must be special. */
6086 }
6087 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6088 uTag = 0; /* Valid. */
6089 else
6090 uTag = 2; /* Must be special. */
6091
6092 u16Ftw |= uTag << (iReg * 2); /* empty */
6093 }
6094 }
6095
6096 return u16Ftw;
6097}
6098
6099
6100/**
6101 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6102 *
6103 * @returns The compressed FTW.
6104 * @param u16FullFtw The full FTW to convert.
6105 */
6106IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6107{
6108 uint8_t u8Ftw = 0;
6109 for (unsigned i = 0; i < 8; i++)
6110 {
6111 if ((u16FullFtw & 3) != 3 /*empty*/)
6112 u8Ftw |= RT_BIT(i);
6113 u16FullFtw >>= 2;
6114 }
6115
6116 return u8Ftw;
6117}
6118
6119/** @} */
6120
6121
6122/** @name Memory access.
6123 *
6124 * @{
6125 */
6126
6127
6128/**
6129 * Updates the IEMCPU::cbWritten counter if applicable.
6130 *
6131 * @param pIemCpu The IEM per CPU data.
6132 * @param fAccess The access being accounted for.
6133 * @param cbMem The access size.
6134 */
6135DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6136{
6137 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6138 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6139 pIemCpu->cbWritten += (uint32_t)cbMem;
6140}
6141
6142
6143/**
6144 * Checks if the given segment can be written to, raise the appropriate
6145 * exception if not.
6146 *
6147 * @returns VBox strict status code.
6148 *
6149 * @param pIemCpu The IEM per CPU data.
6150 * @param pHid Pointer to the hidden register.
6151 * @param iSegReg The register number.
6152 * @param pu64BaseAddr Where to return the base address to use for the
6153 * segment. (In 64-bit code it may differ from the
6154 * base in the hidden segment.)
6155 */
6156IEM_STATIC VBOXSTRICTRC
6157iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6158{
6159 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6160 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6161 else
6162 {
6163 if (!pHid->Attr.n.u1Present)
6164 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6165
6166 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6167 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6168 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6169 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6170 *pu64BaseAddr = pHid->u64Base;
6171 }
6172 return VINF_SUCCESS;
6173}
6174
6175
6176/**
6177 * Checks if the given segment can be read from, raise the appropriate
6178 * exception if not.
6179 *
6180 * @returns VBox strict status code.
6181 *
6182 * @param pIemCpu The IEM per CPU data.
6183 * @param pHid Pointer to the hidden register.
6184 * @param iSegReg The register number.
6185 * @param pu64BaseAddr Where to return the base address to use for the
6186 * segment. (In 64-bit code it may differ from the
6187 * base in the hidden segment.)
6188 */
6189IEM_STATIC VBOXSTRICTRC
6190iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6191{
6192 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6193 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6194 else
6195 {
6196 if (!pHid->Attr.n.u1Present)
6197 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6198
6199 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6200 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6201 *pu64BaseAddr = pHid->u64Base;
6202 }
6203 return VINF_SUCCESS;
6204}
6205
6206
6207/**
6208 * Applies the segment limit, base and attributes.
6209 *
6210 * This may raise a \#GP or \#SS.
6211 *
6212 * @returns VBox strict status code.
6213 *
6214 * @param pIemCpu The IEM per CPU data.
6215 * @param fAccess The kind of access which is being performed.
6216 * @param iSegReg The index of the segment register to apply.
6217 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6218 * TSS, ++).
6219 * @param cbMem The access size.
6220 * @param pGCPtrMem Pointer to the guest memory address to apply
6221 * segmentation to. Input and output parameter.
6222 */
6223IEM_STATIC VBOXSTRICTRC
6224iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6225{
6226 if (iSegReg == UINT8_MAX)
6227 return VINF_SUCCESS;
6228
6229 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6230 switch (pIemCpu->enmCpuMode)
6231 {
6232 case IEMMODE_16BIT:
6233 case IEMMODE_32BIT:
6234 {
6235 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6236 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6237
6238 Assert(pSel->Attr.n.u1Present);
6239 Assert(pSel->Attr.n.u1DescType);
6240 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6241 {
6242 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6243 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6244 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6245
6246 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6247 {
6248 /** @todo CPL check. */
6249 }
6250
6251 /*
6252 * There are two kinds of data selectors, normal and expand down.
6253 */
6254 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6255 {
6256 if ( GCPtrFirst32 > pSel->u32Limit
6257 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6258 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6259 }
6260 else
6261 {
6262 /*
6263 * The upper boundary is defined by the B bit, not the G bit!
6264 */
6265 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6266 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6267 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6268 }
6269 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6270 }
6271 else
6272 {
6273
6274 /*
6275 * Code selector and usually be used to read thru, writing is
6276 * only permitted in real and V8086 mode.
6277 */
6278 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6279 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6280 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6281 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6282 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6283
6284 if ( GCPtrFirst32 > pSel->u32Limit
6285 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6286 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6287
6288 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6289 {
6290 /** @todo CPL check. */
6291 }
6292
6293 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6294 }
6295 return VINF_SUCCESS;
6296 }
6297
6298 case IEMMODE_64BIT:
6299 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6300 *pGCPtrMem += pSel->u64Base;
6301 return VINF_SUCCESS;
6302
6303 default:
6304 AssertFailedReturn(VERR_IEM_IPE_7);
6305 }
6306}
6307
6308
6309/**
6310 * Translates a virtual address to a physical physical address and checks if we
6311 * can access the page as specified.
6312 *
6313 * @param pIemCpu The IEM per CPU data.
6314 * @param GCPtrMem The virtual address.
6315 * @param fAccess The intended access.
6316 * @param pGCPhysMem Where to return the physical address.
6317 */
6318IEM_STATIC VBOXSTRICTRC
6319iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6320{
6321 /** @todo Need a different PGM interface here. We're currently using
6322 * generic / REM interfaces. this won't cut it for R0 & RC. */
6323 RTGCPHYS GCPhys;
6324 uint64_t fFlags;
6325 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6326 if (RT_FAILURE(rc))
6327 {
6328 /** @todo Check unassigned memory in unpaged mode. */
6329 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6330 *pGCPhysMem = NIL_RTGCPHYS;
6331 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6332 }
6333
6334 /* If the page is writable and does not have the no-exec bit set, all
6335 access is allowed. Otherwise we'll have to check more carefully... */
6336 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6337 {
6338 /* Write to read only memory? */
6339 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6340 && !(fFlags & X86_PTE_RW)
6341 && ( pIemCpu->uCpl != 0
6342 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6343 {
6344 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6345 *pGCPhysMem = NIL_RTGCPHYS;
6346 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6347 }
6348
6349 /* Kernel memory accessed by userland? */
6350 if ( !(fFlags & X86_PTE_US)
6351 && pIemCpu->uCpl == 3
6352 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6353 {
6354 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6355 *pGCPhysMem = NIL_RTGCPHYS;
6356 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6357 }
6358
6359 /* Executing non-executable memory? */
6360 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6361 && (fFlags & X86_PTE_PAE_NX)
6362 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6363 {
6364 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6365 *pGCPhysMem = NIL_RTGCPHYS;
6366 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6367 VERR_ACCESS_DENIED);
6368 }
6369 }
6370
6371 /*
6372 * Set the dirty / access flags.
6373 * ASSUMES this is set when the address is translated rather than on committ...
6374 */
6375 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6376 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6377 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6378 {
6379 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6380 AssertRC(rc2);
6381 }
6382
6383 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6384 *pGCPhysMem = GCPhys;
6385 return VINF_SUCCESS;
6386}
6387
6388
6389
6390/**
6391 * Maps a physical page.
6392 *
6393 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6394 * @param pIemCpu The IEM per CPU data.
6395 * @param GCPhysMem The physical address.
6396 * @param fAccess The intended access.
6397 * @param ppvMem Where to return the mapping address.
6398 * @param pLock The PGM lock.
6399 */
6400IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6401{
6402#ifdef IEM_VERIFICATION_MODE_FULL
6403 /* Force the alternative path so we can ignore writes. */
6404 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6405 {
6406 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6407 {
6408 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6409 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6410 if (RT_FAILURE(rc2))
6411 pIemCpu->fProblematicMemory = true;
6412 }
6413 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6414 }
6415#endif
6416#ifdef IEM_LOG_MEMORY_WRITES
6417 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6418 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6419#endif
6420#ifdef IEM_VERIFICATION_MODE_MINIMAL
6421 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6422#endif
6423
6424 /** @todo This API may require some improving later. A private deal with PGM
6425 * regarding locking and unlocking needs to be struct. A couple of TLBs
6426 * living in PGM, but with publicly accessible inlined access methods
6427 * could perhaps be an even better solution. */
6428 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6429 GCPhysMem,
6430 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6431 pIemCpu->fBypassHandlers,
6432 ppvMem,
6433 pLock);
6434 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6435 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6436
6437#ifdef IEM_VERIFICATION_MODE_FULL
6438 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6439 pIemCpu->fProblematicMemory = true;
6440#endif
6441 return rc;
6442}
6443
6444
6445/**
6446 * Unmap a page previously mapped by iemMemPageMap.
6447 *
6448 * @param pIemCpu The IEM per CPU data.
6449 * @param GCPhysMem The physical address.
6450 * @param fAccess The intended access.
6451 * @param pvMem What iemMemPageMap returned.
6452 * @param pLock The PGM lock.
6453 */
6454DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6455{
6456 NOREF(pIemCpu);
6457 NOREF(GCPhysMem);
6458 NOREF(fAccess);
6459 NOREF(pvMem);
6460 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6461}
6462
6463
6464/**
6465 * Looks up a memory mapping entry.
6466 *
6467 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6468 * @param pIemCpu The IEM per CPU data.
6469 * @param pvMem The memory address.
6470 * @param fAccess The access to.
6471 */
6472DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6473{
6474 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
6475 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6476 if ( pIemCpu->aMemMappings[0].pv == pvMem
6477 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6478 return 0;
6479 if ( pIemCpu->aMemMappings[1].pv == pvMem
6480 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6481 return 1;
6482 if ( pIemCpu->aMemMappings[2].pv == pvMem
6483 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6484 return 2;
6485 return VERR_NOT_FOUND;
6486}
6487
6488
6489/**
6490 * Finds a free memmap entry when using iNextMapping doesn't work.
6491 *
6492 * @returns Memory mapping index, 1024 on failure.
6493 * @param pIemCpu The IEM per CPU data.
6494 */
6495IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6496{
6497 /*
6498 * The easy case.
6499 */
6500 if (pIemCpu->cActiveMappings == 0)
6501 {
6502 pIemCpu->iNextMapping = 1;
6503 return 0;
6504 }
6505
6506 /* There should be enough mappings for all instructions. */
6507 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6508
6509 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6510 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6511 return i;
6512
6513 AssertFailedReturn(1024);
6514}
6515
6516
6517/**
6518 * Commits a bounce buffer that needs writing back and unmaps it.
6519 *
6520 * @returns Strict VBox status code.
6521 * @param pIemCpu The IEM per CPU data.
6522 * @param iMemMap The index of the buffer to commit.
6523 */
6524IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6525{
6526 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6527 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6528
6529 /*
6530 * Do the writing.
6531 */
6532#ifndef IEM_VERIFICATION_MODE_MINIMAL
6533 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6534 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6535 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6536 {
6537 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6538 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6539 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6540 if (!pIemCpu->fBypassHandlers)
6541 {
6542 /*
6543 * Carefully and efficiently dealing with access handler return
6544 * codes make this a little bloated.
6545 */
6546 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6547 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6548 pbBuf,
6549 cbFirst,
6550 PGMACCESSORIGIN_IEM);
6551 if (rcStrict == VINF_SUCCESS)
6552 {
6553 if (cbSecond)
6554 {
6555 rcStrict = PGMPhysWrite(pVM,
6556 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6557 pbBuf + cbFirst,
6558 cbSecond,
6559 PGMACCESSORIGIN_IEM);
6560 if (rcStrict == VINF_SUCCESS)
6561 { /* nothing */ }
6562 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6563 {
6564 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6565 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6566 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6567 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6568 }
6569 else
6570 {
6571 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6572 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6573 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6574 return rcStrict;
6575 }
6576 }
6577 }
6578 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6579 {
6580 if (!cbSecond)
6581 {
6582 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6583 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6584 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6585 }
6586 else
6587 {
6588 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6589 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6590 pbBuf + cbFirst,
6591 cbSecond,
6592 PGMACCESSORIGIN_IEM);
6593 if (rcStrict2 == VINF_SUCCESS)
6594 {
6595 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6596 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6597 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6598 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6599 }
6600 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6601 {
6602 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6603 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6604 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6605 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6606 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6607 }
6608 else
6609 {
6610 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6611 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6612 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6613 return rcStrict2;
6614 }
6615 }
6616 }
6617 else
6618 {
6619 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6620 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6621 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6622 return rcStrict;
6623 }
6624 }
6625 else
6626 {
6627 /*
6628 * No access handlers, much simpler.
6629 */
6630 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6631 if (RT_SUCCESS(rc))
6632 {
6633 if (cbSecond)
6634 {
6635 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6636 if (RT_SUCCESS(rc))
6637 { /* likely */ }
6638 else
6639 {
6640 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6641 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6642 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6643 return rc;
6644 }
6645 }
6646 }
6647 else
6648 {
6649 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6650 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6651 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6652 return rc;
6653 }
6654 }
6655 }
6656#endif
6657
6658#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6659 /*
6660 * Record the write(s).
6661 */
6662 if (!pIemCpu->fNoRem)
6663 {
6664 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6665 if (pEvtRec)
6666 {
6667 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6668 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6669 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6670 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6671 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6672 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6673 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6674 }
6675 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6676 {
6677 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6678 if (pEvtRec)
6679 {
6680 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6681 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6682 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6683 memcpy(pEvtRec->u.RamWrite.ab,
6684 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6685 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6686 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6687 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6688 }
6689 }
6690 }
6691#endif
6692#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6693 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6694 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6695 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6696 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6697 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6698 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6699
6700 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6701 g_cbIemWrote = cbWrote;
6702 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6703#endif
6704
6705 /*
6706 * Free the mapping entry.
6707 */
6708 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6709 Assert(pIemCpu->cActiveMappings != 0);
6710 pIemCpu->cActiveMappings--;
6711 return VINF_SUCCESS;
6712}
6713
6714
6715/**
6716 * iemMemMap worker that deals with a request crossing pages.
6717 */
6718IEM_STATIC VBOXSTRICTRC
6719iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6720{
6721 /*
6722 * Do the address translations.
6723 */
6724 RTGCPHYS GCPhysFirst;
6725 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6726 if (rcStrict != VINF_SUCCESS)
6727 return rcStrict;
6728
6729/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6730 * last byte. */
6731 RTGCPHYS GCPhysSecond;
6732 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6733 if (rcStrict != VINF_SUCCESS)
6734 return rcStrict;
6735 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6736
6737 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6738#ifdef IEM_VERIFICATION_MODE_FULL
6739 /*
6740 * Detect problematic memory when verifying so we can select
6741 * the right execution engine. (TLB: Redo this.)
6742 */
6743 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6744 {
6745 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6746 if (RT_SUCCESS(rc2))
6747 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6748 if (RT_FAILURE(rc2))
6749 pIemCpu->fProblematicMemory = true;
6750 }
6751#endif
6752
6753
6754 /*
6755 * Read in the current memory content if it's a read, execute or partial
6756 * write access.
6757 */
6758 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6759 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6760 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6761
6762 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6763 {
6764 if (!pIemCpu->fBypassHandlers)
6765 {
6766 /*
6767 * Must carefully deal with access handler status codes here,
6768 * makes the code a bit bloated.
6769 */
6770 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6771 if (rcStrict == VINF_SUCCESS)
6772 {
6773 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6774 if (rcStrict == VINF_SUCCESS)
6775 { /*likely */ }
6776 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6777 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6778 else
6779 {
6780 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6781 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6782 return rcStrict;
6783 }
6784 }
6785 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6786 {
6787 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6788 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6789 {
6790 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6791 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6792 }
6793 else
6794 {
6795 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6796 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6797 return rcStrict2;
6798 }
6799 }
6800 else
6801 {
6802 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6803 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6804 return rcStrict;
6805 }
6806 }
6807 else
6808 {
6809 /*
6810 * No informational status codes here, much more straight forward.
6811 */
6812 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6813 if (RT_SUCCESS(rc))
6814 {
6815 Assert(rc == VINF_SUCCESS);
6816 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6817 if (RT_SUCCESS(rc))
6818 Assert(rc == VINF_SUCCESS);
6819 else
6820 {
6821 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6822 return rc;
6823 }
6824 }
6825 else
6826 {
6827 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6828 return rc;
6829 }
6830 }
6831
6832#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6833 if ( !pIemCpu->fNoRem
6834 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6835 {
6836 /*
6837 * Record the reads.
6838 */
6839 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6840 if (pEvtRec)
6841 {
6842 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6843 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6844 pEvtRec->u.RamRead.cb = cbFirstPage;
6845 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6846 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6847 }
6848 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6849 if (pEvtRec)
6850 {
6851 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6852 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6853 pEvtRec->u.RamRead.cb = cbSecondPage;
6854 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6855 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6856 }
6857 }
6858#endif
6859 }
6860#ifdef VBOX_STRICT
6861 else
6862 memset(pbBuf, 0xcc, cbMem);
6863 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6864 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6865#endif
6866
6867 /*
6868 * Commit the bounce buffer entry.
6869 */
6870 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6871 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6872 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6873 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6874 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6875 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6876 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6877 pIemCpu->iNextMapping = iMemMap + 1;
6878 pIemCpu->cActiveMappings++;
6879
6880 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6881 *ppvMem = pbBuf;
6882 return VINF_SUCCESS;
6883}
6884
6885
6886/**
6887 * iemMemMap woker that deals with iemMemPageMap failures.
6888 */
6889IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6890 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6891{
6892 /*
6893 * Filter out conditions we can handle and the ones which shouldn't happen.
6894 */
6895 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6896 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6897 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6898 {
6899 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6900 return rcMap;
6901 }
6902 pIemCpu->cPotentialExits++;
6903
6904 /*
6905 * Read in the current memory content if it's a read, execute or partial
6906 * write access.
6907 */
6908 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6909 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6910 {
6911 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6912 memset(pbBuf, 0xff, cbMem);
6913 else
6914 {
6915 int rc;
6916 if (!pIemCpu->fBypassHandlers)
6917 {
6918 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6919 if (rcStrict == VINF_SUCCESS)
6920 { /* nothing */ }
6921 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6922 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6923 else
6924 {
6925 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6926 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6927 return rcStrict;
6928 }
6929 }
6930 else
6931 {
6932 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6933 if (RT_SUCCESS(rc))
6934 { /* likely */ }
6935 else
6936 {
6937 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6938 GCPhysFirst, rc));
6939 return rc;
6940 }
6941 }
6942 }
6943
6944#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6945 if ( !pIemCpu->fNoRem
6946 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6947 {
6948 /*
6949 * Record the read.
6950 */
6951 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6952 if (pEvtRec)
6953 {
6954 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6955 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6956 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6957 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6958 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6959 }
6960 }
6961#endif
6962 }
6963#ifdef VBOX_STRICT
6964 else
6965 memset(pbBuf, 0xcc, cbMem);
6966#endif
6967#ifdef VBOX_STRICT
6968 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6969 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6970#endif
6971
6972 /*
6973 * Commit the bounce buffer entry.
6974 */
6975 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6976 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6977 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6978 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6979 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6980 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6981 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6982 pIemCpu->iNextMapping = iMemMap + 1;
6983 pIemCpu->cActiveMappings++;
6984
6985 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6986 *ppvMem = pbBuf;
6987 return VINF_SUCCESS;
6988}
6989
6990
6991
6992/**
6993 * Maps the specified guest memory for the given kind of access.
6994 *
6995 * This may be using bounce buffering of the memory if it's crossing a page
6996 * boundary or if there is an access handler installed for any of it. Because
6997 * of lock prefix guarantees, we're in for some extra clutter when this
6998 * happens.
6999 *
7000 * This may raise a \#GP, \#SS, \#PF or \#AC.
7001 *
7002 * @returns VBox strict status code.
7003 *
7004 * @param pIemCpu The IEM per CPU data.
7005 * @param ppvMem Where to return the pointer to the mapped
7006 * memory.
7007 * @param cbMem The number of bytes to map. This is usually 1,
7008 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7009 * string operations it can be up to a page.
7010 * @param iSegReg The index of the segment register to use for
7011 * this access. The base and limits are checked.
7012 * Use UINT8_MAX to indicate that no segmentation
7013 * is required (for IDT, GDT and LDT accesses).
7014 * @param GCPtrMem The address of the guest memory.
7015 * @param fAccess How the memory is being accessed. The
7016 * IEM_ACCESS_TYPE_XXX bit is used to figure out
7017 * how to map the memory, while the
7018 * IEM_ACCESS_WHAT_XXX bit is used when raising
7019 * exceptions.
7020 */
7021IEM_STATIC VBOXSTRICTRC
7022iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
7023{
7024 /*
7025 * Check the input and figure out which mapping entry to use.
7026 */
7027 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7028 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
7029 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));
7030
7031 unsigned iMemMap = pIemCpu->iNextMapping;
7032 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
7033 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7034 {
7035 iMemMap = iemMemMapFindFree(pIemCpu);
7036 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
7037 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
7038 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
7039 pIemCpu->aMemMappings[2].fAccess),
7040 VERR_IEM_IPE_9);
7041 }
7042
7043 /*
7044 * Map the memory, checking that we can actually access it. If something
7045 * slightly complicated happens, fall back on bounce buffering.
7046 */
7047 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7048 if (rcStrict != VINF_SUCCESS)
7049 return rcStrict;
7050
7051 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
7052 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
7053
7054 RTGCPHYS GCPhysFirst;
7055 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
7056 if (rcStrict != VINF_SUCCESS)
7057 return rcStrict;
7058
7059 void *pvMem;
7060 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7061 if (rcStrict != VINF_SUCCESS)
7062 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7063
7064 /*
7065 * Fill in the mapping table entry.
7066 */
7067 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7068 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7069 pIemCpu->iNextMapping = iMemMap + 1;
7070 pIemCpu->cActiveMappings++;
7071
7072 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7073 *ppvMem = pvMem;
7074 return VINF_SUCCESS;
7075}
7076
7077
7078/**
7079 * Commits the guest memory if bounce buffered and unmaps it.
7080 *
7081 * @returns Strict VBox status code.
7082 * @param pIemCpu The IEM per CPU data.
7083 * @param pvMem The mapping.
7084 * @param fAccess The kind of access.
7085 */
7086IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7087{
7088 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7089 AssertReturn(iMemMap >= 0, iMemMap);
7090
7091 /* If it's bounce buffered, we may need to write back the buffer. */
7092 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7093 {
7094 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7095 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7096 }
7097 /* Otherwise unlock it. */
7098 else
7099 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7100
7101 /* Free the entry. */
7102 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7103 Assert(pIemCpu->cActiveMappings != 0);
7104 pIemCpu->cActiveMappings--;
7105 return VINF_SUCCESS;
7106}
7107
7108
7109/**
7110 * Rollbacks mappings, releasing page locks and such.
7111 *
7112 * The caller shall only call this after checking cActiveMappings.
7113 *
7114 * @returns Strict VBox status code to pass up.
7115 * @param pIemCpu The IEM per CPU data.
7116 */
7117IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7118{
7119 Assert(pIemCpu->cActiveMappings > 0);
7120
7121 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7122 while (iMemMap-- > 0)
7123 {
7124 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7125 if (fAccess != IEM_ACCESS_INVALID)
7126 {
7127 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7128 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7129 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7130 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7131 Assert(pIemCpu->cActiveMappings > 0);
7132 pIemCpu->cActiveMappings--;
7133 }
7134 }
7135}
7136
7137
7138/**
7139 * Fetches a data byte.
7140 *
7141 * @returns Strict VBox status code.
7142 * @param pIemCpu The IEM per CPU data.
7143 * @param pu8Dst Where to return the byte.
7144 * @param iSegReg The index of the segment register to use for
7145 * this access. The base and limits are checked.
7146 * @param GCPtrMem The address of the guest memory.
7147 */
7148IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7149{
7150 /* The lazy approach for now... */
7151 uint8_t const *pu8Src;
7152 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7153 if (rc == VINF_SUCCESS)
7154 {
7155 *pu8Dst = *pu8Src;
7156 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7157 }
7158 return rc;
7159}
7160
7161
7162/**
7163 * Fetches a data word.
7164 *
7165 * @returns Strict VBox status code.
7166 * @param pIemCpu The IEM per CPU data.
7167 * @param pu16Dst Where to return the word.
7168 * @param iSegReg The index of the segment register to use for
7169 * this access. The base and limits are checked.
7170 * @param GCPtrMem The address of the guest memory.
7171 */
7172IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7173{
7174 /* The lazy approach for now... */
7175 uint16_t const *pu16Src;
7176 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7177 if (rc == VINF_SUCCESS)
7178 {
7179 *pu16Dst = *pu16Src;
7180 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7181 }
7182 return rc;
7183}
7184
7185
7186/**
7187 * Fetches a data dword.
7188 *
7189 * @returns Strict VBox status code.
7190 * @param pIemCpu The IEM per CPU data.
7191 * @param pu32Dst Where to return the dword.
7192 * @param iSegReg The index of the segment register to use for
7193 * this access. The base and limits are checked.
7194 * @param GCPtrMem The address of the guest memory.
7195 */
7196IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7197{
7198 /* The lazy approach for now... */
7199 uint32_t const *pu32Src;
7200 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7201 if (rc == VINF_SUCCESS)
7202 {
7203 *pu32Dst = *pu32Src;
7204 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7205 }
7206 return rc;
7207}
7208
7209
7210#ifdef SOME_UNUSED_FUNCTION
7211/**
7212 * Fetches a data dword and sign extends it to a qword.
7213 *
7214 * @returns Strict VBox status code.
7215 * @param pIemCpu The IEM per CPU data.
7216 * @param pu64Dst Where to return the sign extended value.
7217 * @param iSegReg The index of the segment register to use for
7218 * this access. The base and limits are checked.
7219 * @param GCPtrMem The address of the guest memory.
7220 */
7221IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7222{
7223 /* The lazy approach for now... */
7224 int32_t const *pi32Src;
7225 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7226 if (rc == VINF_SUCCESS)
7227 {
7228 *pu64Dst = *pi32Src;
7229 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7230 }
7231#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7232 else
7233 *pu64Dst = 0;
7234#endif
7235 return rc;
7236}
7237#endif
7238
7239
7240/**
7241 * Fetches a data qword.
7242 *
7243 * @returns Strict VBox status code.
7244 * @param pIemCpu The IEM per CPU data.
7245 * @param pu64Dst Where to return the qword.
7246 * @param iSegReg The index of the segment register to use for
7247 * this access. The base and limits are checked.
7248 * @param GCPtrMem The address of the guest memory.
7249 */
7250IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7251{
7252 /* The lazy approach for now... */
7253 uint64_t const *pu64Src;
7254 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7255 if (rc == VINF_SUCCESS)
7256 {
7257 *pu64Dst = *pu64Src;
7258 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7259 }
7260 return rc;
7261}
7262
7263
7264/**
7265 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7266 *
7267 * @returns Strict VBox status code.
7268 * @param pIemCpu The IEM per CPU data.
7269 * @param pu64Dst Where to return the qword.
7270 * @param iSegReg The index of the segment register to use for
7271 * this access. The base and limits are checked.
7272 * @param GCPtrMem The address of the guest memory.
7273 */
7274IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7275{
7276 /* The lazy approach for now... */
7277 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7278 if (RT_UNLIKELY(GCPtrMem & 15))
7279 return iemRaiseGeneralProtectionFault0(pIemCpu);
7280
7281 uint64_t const *pu64Src;
7282 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7283 if (rc == VINF_SUCCESS)
7284 {
7285 *pu64Dst = *pu64Src;
7286 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7287 }
7288 return rc;
7289}
7290
7291
7292/**
7293 * Fetches a data tword.
7294 *
7295 * @returns Strict VBox status code.
7296 * @param pIemCpu The IEM per CPU data.
7297 * @param pr80Dst Where to return the tword.
7298 * @param iSegReg The index of the segment register to use for
7299 * this access. The base and limits are checked.
7300 * @param GCPtrMem The address of the guest memory.
7301 */
7302IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7303{
7304 /* The lazy approach for now... */
7305 PCRTFLOAT80U pr80Src;
7306 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7307 if (rc == VINF_SUCCESS)
7308 {
7309 *pr80Dst = *pr80Src;
7310 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7311 }
7312 return rc;
7313}
7314
7315
7316/**
7317 * Fetches a data dqword (double qword), generally SSE related.
7318 *
7319 * @returns Strict VBox status code.
7320 * @param pIemCpu The IEM per CPU data.
7321 * @param pu128Dst Where to return the qword.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 */
7326IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7327{
7328 /* The lazy approach for now... */
7329 uint128_t const *pu128Src;
7330 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7331 if (rc == VINF_SUCCESS)
7332 {
7333 *pu128Dst = *pu128Src;
7334 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7335 }
7336 return rc;
7337}
7338
7339
7340/**
7341 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7342 * related.
7343 *
7344 * Raises \#GP(0) if not aligned.
7345 *
7346 * @returns Strict VBox status code.
7347 * @param pIemCpu The IEM per CPU data.
7348 * @param pu128Dst Where to return the qword.
7349 * @param iSegReg The index of the segment register to use for
7350 * this access. The base and limits are checked.
7351 * @param GCPtrMem The address of the guest memory.
7352 */
7353IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7354{
7355 /* The lazy approach for now... */
7356 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7357 if ( (GCPtrMem & 15)
7358 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7359 return iemRaiseGeneralProtectionFault0(pIemCpu);
7360
7361 uint128_t const *pu128Src;
7362 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7363 if (rc == VINF_SUCCESS)
7364 {
7365 *pu128Dst = *pu128Src;
7366 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7367 }
7368 return rc;
7369}
7370
7371
7372
7373
7374/**
7375 * Fetches a descriptor register (lgdt, lidt).
7376 *
7377 * @returns Strict VBox status code.
7378 * @param pIemCpu The IEM per CPU data.
7379 * @param pcbLimit Where to return the limit.
7380 * @param pGCPtrBase Where to return the base.
7381 * @param iSegReg The index of the segment register to use for
7382 * this access. The base and limits are checked.
7383 * @param GCPtrMem The address of the guest memory.
7384 * @param enmOpSize The effective operand size.
7385 */
7386IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7387 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7388{
7389 uint8_t const *pu8Src;
7390 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7391 (void **)&pu8Src,
7392 enmOpSize == IEMMODE_64BIT
7393 ? 2 + 8
7394 : enmOpSize == IEMMODE_32BIT
7395 ? 2 + 4
7396 : 2 + 3,
7397 iSegReg,
7398 GCPtrMem,
7399 IEM_ACCESS_DATA_R);
7400 if (rcStrict == VINF_SUCCESS)
7401 {
7402 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7403 switch (enmOpSize)
7404 {
7405 case IEMMODE_16BIT:
7406 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7407 break;
7408 case IEMMODE_32BIT:
7409 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7410 break;
7411 case IEMMODE_64BIT:
7412 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7413 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7414 break;
7415
7416 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7417 }
7418 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7419 }
7420 return rcStrict;
7421}
7422
7423
7424
7425/**
7426 * Stores a data byte.
7427 *
7428 * @returns Strict VBox status code.
7429 * @param pIemCpu The IEM per CPU data.
7430 * @param iSegReg The index of the segment register to use for
7431 * this access. The base and limits are checked.
7432 * @param GCPtrMem The address of the guest memory.
7433 * @param u8Value The value to store.
7434 */
7435IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7436{
7437 /* The lazy approach for now... */
7438 uint8_t *pu8Dst;
7439 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7440 if (rc == VINF_SUCCESS)
7441 {
7442 *pu8Dst = u8Value;
7443 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7444 }
7445 return rc;
7446}
7447
7448
7449/**
7450 * Stores a data word.
7451 *
7452 * @returns Strict VBox status code.
7453 * @param pIemCpu The IEM per CPU data.
7454 * @param iSegReg The index of the segment register to use for
7455 * this access. The base and limits are checked.
7456 * @param GCPtrMem The address of the guest memory.
7457 * @param u16Value The value to store.
7458 */
7459IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7460{
7461 /* The lazy approach for now... */
7462 uint16_t *pu16Dst;
7463 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7464 if (rc == VINF_SUCCESS)
7465 {
7466 *pu16Dst = u16Value;
7467 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7468 }
7469 return rc;
7470}
7471
7472
7473/**
7474 * Stores a data dword.
7475 *
7476 * @returns Strict VBox status code.
7477 * @param pIemCpu The IEM per CPU data.
7478 * @param iSegReg The index of the segment register to use for
7479 * this access. The base and limits are checked.
7480 * @param GCPtrMem The address of the guest memory.
7481 * @param u32Value The value to store.
7482 */
7483IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7484{
7485 /* The lazy approach for now... */
7486 uint32_t *pu32Dst;
7487 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7488 if (rc == VINF_SUCCESS)
7489 {
7490 *pu32Dst = u32Value;
7491 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7492 }
7493 return rc;
7494}
7495
7496
7497/**
7498 * Stores a data qword.
7499 *
7500 * @returns Strict VBox status code.
7501 * @param pIemCpu The IEM per CPU data.
7502 * @param iSegReg The index of the segment register to use for
7503 * this access. The base and limits are checked.
7504 * @param GCPtrMem The address of the guest memory.
7505 * @param u64Value The value to store.
7506 */
7507IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7508{
7509 /* The lazy approach for now... */
7510 uint64_t *pu64Dst;
7511 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7512 if (rc == VINF_SUCCESS)
7513 {
7514 *pu64Dst = u64Value;
7515 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7516 }
7517 return rc;
7518}
7519
7520
7521/**
7522 * Stores a data dqword.
7523 *
7524 * @returns Strict VBox status code.
7525 * @param pIemCpu The IEM per CPU data.
7526 * @param iSegReg The index of the segment register to use for
7527 * this access. The base and limits are checked.
7528 * @param GCPtrMem The address of the guest memory.
7529 * @param u128Value The value to store.
7530 */
7531IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7532{
7533 /* The lazy approach for now... */
7534 uint128_t *pu128Dst;
7535 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7536 if (rc == VINF_SUCCESS)
7537 {
7538 *pu128Dst = u128Value;
7539 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7540 }
7541 return rc;
7542}
7543
7544
7545/**
7546 * Stores a data dqword, SSE aligned.
7547 *
7548 * @returns Strict VBox status code.
7549 * @param pIemCpu The IEM per CPU data.
7550 * @param iSegReg The index of the segment register to use for
7551 * this access. The base and limits are checked.
7552 * @param GCPtrMem The address of the guest memory.
7553 * @param u128Value The value to store.
7554 */
7555IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7556{
7557 /* The lazy approach for now... */
7558 if ( (GCPtrMem & 15)
7559 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7560 return iemRaiseGeneralProtectionFault0(pIemCpu);
7561
7562 uint128_t *pu128Dst;
7563 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7564 if (rc == VINF_SUCCESS)
7565 {
7566 *pu128Dst = u128Value;
7567 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7568 }
7569 return rc;
7570}
7571
7572
7573/**
7574 * Stores a descriptor register (sgdt, sidt).
7575 *
7576 * @returns Strict VBox status code.
7577 * @param pIemCpu The IEM per CPU data.
7578 * @param cbLimit The limit.
7579 * @param GCPtrBase The base address.
7580 * @param iSegReg The index of the segment register to use for
7581 * this access. The base and limits are checked.
7582 * @param GCPtrMem The address of the guest memory.
7583 * @param enmOpSize The effective operand size.
7584 */
7585IEM_STATIC VBOXSTRICTRC
7586iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7587{
7588 /** @todo Looks like SIDT and SGDT perform two separate writes here,
7589 * first the limit, then base. Should the base write hit a segment
7590 * limit, the first write isn't rolled back. */
7591 uint8_t *pu8Src;
7592 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7593 (void **)&pu8Src,
7594 enmOpSize == IEMMODE_64BIT
7595 ? 2 + 8
7596 : enmOpSize == IEMMODE_32BIT
7597 ? 2 + 4
7598 : 2 + 3,
7599 iSegReg,
7600 GCPtrMem,
7601 IEM_ACCESS_DATA_W);
7602 if (rcStrict == VINF_SUCCESS)
7603 {
7604 pu8Src[0] = RT_BYTE1(cbLimit);
7605 pu8Src[1] = RT_BYTE2(cbLimit);
7606 pu8Src[2] = RT_BYTE1(GCPtrBase);
7607 pu8Src[3] = RT_BYTE2(GCPtrBase);
7608 pu8Src[4] = RT_BYTE3(GCPtrBase);
7609 if (enmOpSize == IEMMODE_16BIT)
7610 pu8Src[5] = IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_286 ? 0xff : 0x00;
7611 else
7612 {
7613 pu8Src[5] = RT_BYTE4(GCPtrBase);
7614 if (enmOpSize == IEMMODE_64BIT)
7615 {
7616 pu8Src[6] = RT_BYTE5(GCPtrBase);
7617 pu8Src[7] = RT_BYTE6(GCPtrBase);
7618 pu8Src[8] = RT_BYTE7(GCPtrBase);
7619 pu8Src[9] = RT_BYTE8(GCPtrBase);
7620 }
7621 }
7622 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7623 }
7624 return rcStrict;
7625}
7626
7627
7628/**
7629 * Pushes a word onto the stack.
7630 *
7631 * @returns Strict VBox status code.
7632 * @param pIemCpu The IEM per CPU data.
7633 * @param u16Value The value to push.
7634 */
7635IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7636{
7637 /* Increment the stack pointer. */
7638 uint64_t uNewRsp;
7639 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7640 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7641
7642 /* Write the word the lazy way. */
7643 uint16_t *pu16Dst;
7644 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7645 if (rc == VINF_SUCCESS)
7646 {
7647 *pu16Dst = u16Value;
7648 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7649 }
7650
7651 /* Commit the new RSP value unless we an access handler made trouble. */
7652 if (rc == VINF_SUCCESS)
7653 pCtx->rsp = uNewRsp;
7654
7655 return rc;
7656}
7657
7658
7659/**
7660 * Pushes a dword onto the stack.
7661 *
7662 * @returns Strict VBox status code.
7663 * @param pIemCpu The IEM per CPU data.
7664 * @param u32Value The value to push.
7665 */
7666IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7667{
7668 /* Increment the stack pointer. */
7669 uint64_t uNewRsp;
7670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7671 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7672
7673 /* Write the dword the lazy way. */
7674 uint32_t *pu32Dst;
7675 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7676 if (rc == VINF_SUCCESS)
7677 {
7678 *pu32Dst = u32Value;
7679 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7680 }
7681
7682 /* Commit the new RSP value unless we an access handler made trouble. */
7683 if (rc == VINF_SUCCESS)
7684 pCtx->rsp = uNewRsp;
7685
7686 return rc;
7687}
7688
7689
7690/**
7691 * Pushes a dword segment register value onto the stack.
7692 *
7693 * @returns Strict VBox status code.
7694 * @param pIemCpu The IEM per CPU data.
7695 * @param u32Value The value to push.
7696 */
7697IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7698{
7699 /* Increment the stack pointer. */
7700 uint64_t uNewRsp;
7701 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7702 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7703
7704 VBOXSTRICTRC rc;
7705 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7706 {
7707 /* The recompiler writes a full dword. */
7708 uint32_t *pu32Dst;
7709 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7710 if (rc == VINF_SUCCESS)
7711 {
7712 *pu32Dst = u32Value;
7713 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7714 }
7715 }
7716 else
7717 {
7718 /* The intel docs talks about zero extending the selector register
7719 value. My actual intel CPU here might be zero extending the value
7720 but it still only writes the lower word... */
7721 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7722 * happens when crossing an electric page boundrary, is the high word checked
7723 * for write accessibility or not? Probably it is. What about segment limits?
7724 * It appears this behavior is also shared with trap error codes.
7725 *
7726 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7727 * ancient hardware when it actually did change. */
7728 uint16_t *pu16Dst;
7729 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7730 if (rc == VINF_SUCCESS)
7731 {
7732 *pu16Dst = (uint16_t)u32Value;
7733 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7734 }
7735 }
7736
7737 /* Commit the new RSP value unless we an access handler made trouble. */
7738 if (rc == VINF_SUCCESS)
7739 pCtx->rsp = uNewRsp;
7740
7741 return rc;
7742}
7743
7744
7745/**
7746 * Pushes a qword onto the stack.
7747 *
7748 * @returns Strict VBox status code.
7749 * @param pIemCpu The IEM per CPU data.
7750 * @param u64Value The value to push.
7751 */
7752IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7753{
7754 /* Increment the stack pointer. */
7755 uint64_t uNewRsp;
7756 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7757 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7758
7759 /* Write the word the lazy way. */
7760 uint64_t *pu64Dst;
7761 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7762 if (rc == VINF_SUCCESS)
7763 {
7764 *pu64Dst = u64Value;
7765 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7766 }
7767
7768 /* Commit the new RSP value unless we an access handler made trouble. */
7769 if (rc == VINF_SUCCESS)
7770 pCtx->rsp = uNewRsp;
7771
7772 return rc;
7773}
7774
7775
7776/**
7777 * Pops a word from the stack.
7778 *
7779 * @returns Strict VBox status code.
7780 * @param pIemCpu The IEM per CPU data.
7781 * @param pu16Value Where to store the popped value.
7782 */
7783IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7784{
7785 /* Increment the stack pointer. */
7786 uint64_t uNewRsp;
7787 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7788 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7789
7790 /* Write the word the lazy way. */
7791 uint16_t const *pu16Src;
7792 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7793 if (rc == VINF_SUCCESS)
7794 {
7795 *pu16Value = *pu16Src;
7796 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7797
7798 /* Commit the new RSP value. */
7799 if (rc == VINF_SUCCESS)
7800 pCtx->rsp = uNewRsp;
7801 }
7802
7803 return rc;
7804}
7805
7806
7807/**
7808 * Pops a dword from the stack.
7809 *
7810 * @returns Strict VBox status code.
7811 * @param pIemCpu The IEM per CPU data.
7812 * @param pu32Value Where to store the popped value.
7813 */
7814IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7815{
7816 /* Increment the stack pointer. */
7817 uint64_t uNewRsp;
7818 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7819 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7820
7821 /* Write the word the lazy way. */
7822 uint32_t const *pu32Src;
7823 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7824 if (rc == VINF_SUCCESS)
7825 {
7826 *pu32Value = *pu32Src;
7827 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7828
7829 /* Commit the new RSP value. */
7830 if (rc == VINF_SUCCESS)
7831 pCtx->rsp = uNewRsp;
7832 }
7833
7834 return rc;
7835}
7836
7837
7838/**
7839 * Pops a qword from the stack.
7840 *
7841 * @returns Strict VBox status code.
7842 * @param pIemCpu The IEM per CPU data.
7843 * @param pu64Value Where to store the popped value.
7844 */
7845IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7846{
7847 /* Increment the stack pointer. */
7848 uint64_t uNewRsp;
7849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7850 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7851
7852 /* Write the word the lazy way. */
7853 uint64_t const *pu64Src;
7854 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7855 if (rc == VINF_SUCCESS)
7856 {
7857 *pu64Value = *pu64Src;
7858 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7859
7860 /* Commit the new RSP value. */
7861 if (rc == VINF_SUCCESS)
7862 pCtx->rsp = uNewRsp;
7863 }
7864
7865 return rc;
7866}
7867
7868
7869/**
7870 * Pushes a word onto the stack, using a temporary stack pointer.
7871 *
7872 * @returns Strict VBox status code.
7873 * @param pIemCpu The IEM per CPU data.
7874 * @param u16Value The value to push.
7875 * @param pTmpRsp Pointer to the temporary stack pointer.
7876 */
7877IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7878{
7879 /* Increment the stack pointer. */
7880 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7881 RTUINT64U NewRsp = *pTmpRsp;
7882 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7883
7884 /* Write the word the lazy way. */
7885 uint16_t *pu16Dst;
7886 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7887 if (rc == VINF_SUCCESS)
7888 {
7889 *pu16Dst = u16Value;
7890 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7891 }
7892
7893 /* Commit the new RSP value unless we an access handler made trouble. */
7894 if (rc == VINF_SUCCESS)
7895 *pTmpRsp = NewRsp;
7896
7897 return rc;
7898}
7899
7900
7901/**
7902 * Pushes a dword onto the stack, using a temporary stack pointer.
7903 *
7904 * @returns Strict VBox status code.
7905 * @param pIemCpu The IEM per CPU data.
7906 * @param u32Value The value to push.
7907 * @param pTmpRsp Pointer to the temporary stack pointer.
7908 */
7909IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7910{
7911 /* Increment the stack pointer. */
7912 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7913 RTUINT64U NewRsp = *pTmpRsp;
7914 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7915
7916 /* Write the word the lazy way. */
7917 uint32_t *pu32Dst;
7918 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7919 if (rc == VINF_SUCCESS)
7920 {
7921 *pu32Dst = u32Value;
7922 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7923 }
7924
7925 /* Commit the new RSP value unless we an access handler made trouble. */
7926 if (rc == VINF_SUCCESS)
7927 *pTmpRsp = NewRsp;
7928
7929 return rc;
7930}
7931
7932
7933/**
7934 * Pushes a dword onto the stack, using a temporary stack pointer.
7935 *
7936 * @returns Strict VBox status code.
7937 * @param pIemCpu The IEM per CPU data.
7938 * @param u64Value The value to push.
7939 * @param pTmpRsp Pointer to the temporary stack pointer.
7940 */
7941IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7942{
7943 /* Increment the stack pointer. */
7944 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7945 RTUINT64U NewRsp = *pTmpRsp;
7946 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7947
7948 /* Write the word the lazy way. */
7949 uint64_t *pu64Dst;
7950 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7951 if (rc == VINF_SUCCESS)
7952 {
7953 *pu64Dst = u64Value;
7954 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7955 }
7956
7957 /* Commit the new RSP value unless we an access handler made trouble. */
7958 if (rc == VINF_SUCCESS)
7959 *pTmpRsp = NewRsp;
7960
7961 return rc;
7962}
7963
7964
7965/**
7966 * Pops a word from the stack, using a temporary stack pointer.
7967 *
7968 * @returns Strict VBox status code.
7969 * @param pIemCpu The IEM per CPU data.
7970 * @param pu16Value Where to store the popped value.
7971 * @param pTmpRsp Pointer to the temporary stack pointer.
7972 */
7973IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7974{
7975 /* Increment the stack pointer. */
7976 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7977 RTUINT64U NewRsp = *pTmpRsp;
7978 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7979
7980 /* Write the word the lazy way. */
7981 uint16_t const *pu16Src;
7982 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7983 if (rc == VINF_SUCCESS)
7984 {
7985 *pu16Value = *pu16Src;
7986 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7987
7988 /* Commit the new RSP value. */
7989 if (rc == VINF_SUCCESS)
7990 *pTmpRsp = NewRsp;
7991 }
7992
7993 return rc;
7994}
7995
7996
7997/**
7998 * Pops a dword from the stack, using a temporary stack pointer.
7999 *
8000 * @returns Strict VBox status code.
8001 * @param pIemCpu The IEM per CPU data.
8002 * @param pu32Value Where to store the popped value.
8003 * @param pTmpRsp Pointer to the temporary stack pointer.
8004 */
8005IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
8006{
8007 /* Increment the stack pointer. */
8008 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8009 RTUINT64U NewRsp = *pTmpRsp;
8010 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
8011
8012 /* Write the word the lazy way. */
8013 uint32_t const *pu32Src;
8014 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8015 if (rc == VINF_SUCCESS)
8016 {
8017 *pu32Value = *pu32Src;
8018 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8019
8020 /* Commit the new RSP value. */
8021 if (rc == VINF_SUCCESS)
8022 *pTmpRsp = NewRsp;
8023 }
8024
8025 return rc;
8026}
8027
8028
8029/**
8030 * Pops a qword from the stack, using a temporary stack pointer.
8031 *
8032 * @returns Strict VBox status code.
8033 * @param pIemCpu The IEM per CPU data.
8034 * @param pu64Value Where to store the popped value.
8035 * @param pTmpRsp Pointer to the temporary stack pointer.
8036 */
8037IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
8038{
8039 /* Increment the stack pointer. */
8040 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8041 RTUINT64U NewRsp = *pTmpRsp;
8042 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8043
8044 /* Write the word the lazy way. */
8045 uint64_t const *pu64Src;
8046 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8047 if (rcStrict == VINF_SUCCESS)
8048 {
8049 *pu64Value = *pu64Src;
8050 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8051
8052 /* Commit the new RSP value. */
8053 if (rcStrict == VINF_SUCCESS)
8054 *pTmpRsp = NewRsp;
8055 }
8056
8057 return rcStrict;
8058}
8059
8060
8061/**
8062 * Begin a special stack push (used by interrupt, exceptions and such).
8063 *
8064 * This will raise \#SS or \#PF if appropriate.
8065 *
8066 * @returns Strict VBox status code.
8067 * @param pIemCpu The IEM per CPU data.
8068 * @param cbMem The number of bytes to push onto the stack.
8069 * @param ppvMem Where to return the pointer to the stack memory.
8070 * As with the other memory functions this could be
8071 * direct access or bounce buffered access, so
8072 * don't commit register until the commit call
8073 * succeeds.
8074 * @param puNewRsp Where to return the new RSP value. This must be
8075 * passed unchanged to
8076 * iemMemStackPushCommitSpecial().
8077 */
8078IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8079{
8080 Assert(cbMem < UINT8_MAX);
8081 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8082 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8083 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8084}
8085
8086
8087/**
8088 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8089 *
8090 * This will update the rSP.
8091 *
8092 * @returns Strict VBox status code.
8093 * @param pIemCpu The IEM per CPU data.
8094 * @param pvMem The pointer returned by
8095 * iemMemStackPushBeginSpecial().
8096 * @param uNewRsp The new RSP value returned by
8097 * iemMemStackPushBeginSpecial().
8098 */
8099IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8100{
8101 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8102 if (rcStrict == VINF_SUCCESS)
8103 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8104 return rcStrict;
8105}
8106
8107
8108/**
8109 * Begin a special stack pop (used by iret, retf and such).
8110 *
8111 * This will raise \#SS or \#PF if appropriate.
8112 *
8113 * @returns Strict VBox status code.
8114 * @param pIemCpu The IEM per CPU data.
8115 * @param cbMem The number of bytes to push onto the stack.
8116 * @param ppvMem Where to return the pointer to the stack memory.
8117 * @param puNewRsp Where to return the new RSP value. This must be
8118 * passed unchanged to
8119 * iemMemStackPopCommitSpecial() or applied
8120 * manually if iemMemStackPopDoneSpecial() is used.
8121 */
8122IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8123{
8124 Assert(cbMem < UINT8_MAX);
8125 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8126 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8127 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8128}
8129
8130
8131/**
8132 * Continue a special stack pop (used by iret and retf).
8133 *
8134 * This will raise \#SS or \#PF if appropriate.
8135 *
8136 * @returns Strict VBox status code.
8137 * @param pIemCpu The IEM per CPU data.
8138 * @param cbMem The number of bytes to push onto the stack.
8139 * @param ppvMem Where to return the pointer to the stack memory.
8140 * @param puNewRsp Where to return the new RSP value. This must be
8141 * passed unchanged to
8142 * iemMemStackPopCommitSpecial() or applied
8143 * manually if iemMemStackPopDoneSpecial() is used.
8144 */
8145IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8146{
8147 Assert(cbMem < UINT8_MAX);
8148 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8149 RTUINT64U NewRsp;
8150 NewRsp.u = *puNewRsp;
8151 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8152 *puNewRsp = NewRsp.u;
8153 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8154}
8155
8156
8157/**
8158 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8159 *
8160 * This will update the rSP.
8161 *
8162 * @returns Strict VBox status code.
8163 * @param pIemCpu The IEM per CPU data.
8164 * @param pvMem The pointer returned by
8165 * iemMemStackPopBeginSpecial().
8166 * @param uNewRsp The new RSP value returned by
8167 * iemMemStackPopBeginSpecial().
8168 */
8169IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8170{
8171 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8172 if (rcStrict == VINF_SUCCESS)
8173 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8174 return rcStrict;
8175}
8176
8177
8178/**
8179 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8180 * iemMemStackPopContinueSpecial).
8181 *
8182 * The caller will manually commit the rSP.
8183 *
8184 * @returns Strict VBox status code.
8185 * @param pIemCpu The IEM per CPU data.
8186 * @param pvMem The pointer returned by
8187 * iemMemStackPopBeginSpecial() or
8188 * iemMemStackPopContinueSpecial().
8189 */
8190IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8191{
8192 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8193}
8194
8195
8196/**
8197 * Fetches a system table byte.
8198 *
8199 * @returns Strict VBox status code.
8200 * @param pIemCpu The IEM per CPU data.
8201 * @param pbDst Where to return the byte.
8202 * @param iSegReg The index of the segment register to use for
8203 * this access. The base and limits are checked.
8204 * @param GCPtrMem The address of the guest memory.
8205 */
8206IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8207{
8208 /* The lazy approach for now... */
8209 uint8_t const *pbSrc;
8210 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8211 if (rc == VINF_SUCCESS)
8212 {
8213 *pbDst = *pbSrc;
8214 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8215 }
8216 return rc;
8217}
8218
8219
8220/**
8221 * Fetches a system table word.
8222 *
8223 * @returns Strict VBox status code.
8224 * @param pIemCpu The IEM per CPU data.
8225 * @param pu16Dst Where to return the word.
8226 * @param iSegReg The index of the segment register to use for
8227 * this access. The base and limits are checked.
8228 * @param GCPtrMem The address of the guest memory.
8229 */
8230IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8231{
8232 /* The lazy approach for now... */
8233 uint16_t const *pu16Src;
8234 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8235 if (rc == VINF_SUCCESS)
8236 {
8237 *pu16Dst = *pu16Src;
8238 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8239 }
8240 return rc;
8241}
8242
8243
8244/**
8245 * Fetches a system table dword.
8246 *
8247 * @returns Strict VBox status code.
8248 * @param pIemCpu The IEM per CPU data.
8249 * @param pu32Dst Where to return the dword.
8250 * @param iSegReg The index of the segment register to use for
8251 * this access. The base and limits are checked.
8252 * @param GCPtrMem The address of the guest memory.
8253 */
8254IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8255{
8256 /* The lazy approach for now... */
8257 uint32_t const *pu32Src;
8258 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8259 if (rc == VINF_SUCCESS)
8260 {
8261 *pu32Dst = *pu32Src;
8262 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8263 }
8264 return rc;
8265}
8266
8267
8268/**
8269 * Fetches a system table qword.
8270 *
8271 * @returns Strict VBox status code.
8272 * @param pIemCpu The IEM per CPU data.
8273 * @param pu64Dst Where to return the qword.
8274 * @param iSegReg The index of the segment register to use for
8275 * this access. The base and limits are checked.
8276 * @param GCPtrMem The address of the guest memory.
8277 */
8278IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8279{
8280 /* The lazy approach for now... */
8281 uint64_t const *pu64Src;
8282 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8283 if (rc == VINF_SUCCESS)
8284 {
8285 *pu64Dst = *pu64Src;
8286 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8287 }
8288 return rc;
8289}
8290
8291
8292/**
8293 * Fetches a descriptor table entry with caller specified error code.
8294 *
8295 * @returns Strict VBox status code.
8296 * @param pIemCpu The IEM per CPU.
8297 * @param pDesc Where to return the descriptor table entry.
8298 * @param uSel The selector which table entry to fetch.
8299 * @param uXcpt The exception to raise on table lookup error.
8300 * @param uErrorCode The error code associated with the exception.
8301 */
8302IEM_STATIC VBOXSTRICTRC
8303iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8304{
8305 AssertPtr(pDesc);
8306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8307
8308 /** @todo did the 286 require all 8 bytes to be accessible? */
8309 /*
8310 * Get the selector table base and check bounds.
8311 */
8312 RTGCPTR GCPtrBase;
8313 if (uSel & X86_SEL_LDT)
8314 {
8315 if ( !pCtx->ldtr.Attr.n.u1Present
8316 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8317 {
8318 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8319 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8320 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8321 uErrorCode, 0);
8322 }
8323
8324 Assert(pCtx->ldtr.Attr.n.u1Present);
8325 GCPtrBase = pCtx->ldtr.u64Base;
8326 }
8327 else
8328 {
8329 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8330 {
8331 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8332 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8333 uErrorCode, 0);
8334 }
8335 GCPtrBase = pCtx->gdtr.pGdt;
8336 }
8337
8338 /*
8339 * Read the legacy descriptor and maybe the long mode extensions if
8340 * required.
8341 */
8342 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8343 if (rcStrict == VINF_SUCCESS)
8344 {
8345 if ( !IEM_IS_LONG_MODE(pIemCpu)
8346 || pDesc->Legacy.Gen.u1DescType)
8347 pDesc->Long.au64[1] = 0;
8348 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8349 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8350 else
8351 {
8352 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8353 /** @todo is this the right exception? */
8354 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8355 }
8356 }
8357 return rcStrict;
8358}
8359
8360
8361/**
8362 * Fetches a descriptor table entry.
8363 *
8364 * @returns Strict VBox status code.
8365 * @param pIemCpu The IEM per CPU.
8366 * @param pDesc Where to return the descriptor table entry.
8367 * @param uSel The selector which table entry to fetch.
8368 * @param uXcpt The exception to raise on table lookup error.
8369 */
8370IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8371{
8372 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8373}
8374
8375
8376/**
8377 * Fakes a long mode stack selector for SS = 0.
8378 *
8379 * @param pDescSs Where to return the fake stack descriptor.
8380 * @param uDpl The DPL we want.
8381 */
8382IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8383{
8384 pDescSs->Long.au64[0] = 0;
8385 pDescSs->Long.au64[1] = 0;
8386 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8387 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8388 pDescSs->Long.Gen.u2Dpl = uDpl;
8389 pDescSs->Long.Gen.u1Present = 1;
8390 pDescSs->Long.Gen.u1Long = 1;
8391}
8392
8393
8394/**
8395 * Marks the selector descriptor as accessed (only non-system descriptors).
8396 *
8397 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8398 * will therefore skip the limit checks.
8399 *
8400 * @returns Strict VBox status code.
8401 * @param pIemCpu The IEM per CPU.
8402 * @param uSel The selector.
8403 */
8404IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8405{
8406 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8407
8408 /*
8409 * Get the selector table base and calculate the entry address.
8410 */
8411 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8412 ? pCtx->ldtr.u64Base
8413 : pCtx->gdtr.pGdt;
8414 GCPtr += uSel & X86_SEL_MASK;
8415
8416 /*
8417 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8418 * ugly stuff to avoid this. This will make sure it's an atomic access
8419 * as well more or less remove any question about 8-bit or 32-bit accesss.
8420 */
8421 VBOXSTRICTRC rcStrict;
8422 uint32_t volatile *pu32;
8423 if ((GCPtr & 3) == 0)
8424 {
8425 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8426 GCPtr += 2 + 2;
8427 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8428 if (rcStrict != VINF_SUCCESS)
8429 return rcStrict;
8430 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8431 }
8432 else
8433 {
8434 /* The misaligned GDT/LDT case, map the whole thing. */
8435 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8436 if (rcStrict != VINF_SUCCESS)
8437 return rcStrict;
8438 switch ((uintptr_t)pu32 & 3)
8439 {
8440 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8441 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8442 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8443 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8444 }
8445 }
8446
8447 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8448}
8449
8450/** @} */
8451
8452
8453/*
8454 * Include the C/C++ implementation of instruction.
8455 */
8456#include "IEMAllCImpl.cpp.h"
8457
8458
8459
8460/** @name "Microcode" macros.
8461 *
8462 * The idea is that we should be able to use the same code to interpret
8463 * instructions as well as recompiler instructions. Thus this obfuscation.
8464 *
8465 * @{
8466 */
8467#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8468#define IEM_MC_END() }
8469#define IEM_MC_PAUSE() do {} while (0)
8470#define IEM_MC_CONTINUE() do {} while (0)
8471
8472/** Internal macro. */
8473#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8474 do \
8475 { \
8476 VBOXSTRICTRC rcStrict2 = a_Expr; \
8477 if (rcStrict2 != VINF_SUCCESS) \
8478 return rcStrict2; \
8479 } while (0)
8480
8481#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8482#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8483#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8484#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8485#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8486#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8487#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8488
8489#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8490#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8491 do { \
8492 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8493 return iemRaiseDeviceNotAvailable(pIemCpu); \
8494 } while (0)
8495#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8496 do { \
8497 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8498 return iemRaiseMathFault(pIemCpu); \
8499 } while (0)
8500#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8501 do { \
8502 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8503 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8504 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8505 return iemRaiseUndefinedOpcode(pIemCpu); \
8506 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8507 return iemRaiseDeviceNotAvailable(pIemCpu); \
8508 } while (0)
8509#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8510 do { \
8511 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8512 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8513 return iemRaiseUndefinedOpcode(pIemCpu); \
8514 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8515 return iemRaiseDeviceNotAvailable(pIemCpu); \
8516 } while (0)
8517#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8518 do { \
8519 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8520 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8521 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8522 return iemRaiseUndefinedOpcode(pIemCpu); \
8523 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8524 return iemRaiseDeviceNotAvailable(pIemCpu); \
8525 } while (0)
8526#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8527 do { \
8528 if (pIemCpu->uCpl != 0) \
8529 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8530 } while (0)
8531
8532
8533#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8534#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8535#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8536#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8537#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8538#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8539#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8540 uint32_t a_Name; \
8541 uint32_t *a_pName = &a_Name
8542#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8543 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8544
8545#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8546#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8547
8548#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8549#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8550#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8551#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8552#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8553#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8554#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8555#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8556#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8557#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8558#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8559#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8560#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8561#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8562#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8563#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8564#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8565#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8566#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8567#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8568#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8569#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8570#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8571#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8572#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8573#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8574#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8575#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8576#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8577/** @note Not for IOPL or IF testing or modification. */
8578#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8579#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8580#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8581#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8582
8583#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8584#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8585#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8586#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8587#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8588#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8589#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8590#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8591#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8592#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8593#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8594 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8595
8596#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8597#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8598/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8599 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8600#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8601#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8602/** @note Not for IOPL or IF testing or modification. */
8603#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8604
8605#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8606#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8607#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8608 do { \
8609 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8610 *pu32Reg += (a_u32Value); \
8611 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8612 } while (0)
8613#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8614
8615#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8616#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8617#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8618 do { \
8619 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8620 *pu32Reg -= (a_u32Value); \
8621 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8622 } while (0)
8623#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8624#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
8625
8626#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8627#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8628#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8629#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8630#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8631#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8632#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8633
8634#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8635#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8636#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8637#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8638
8639#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8640#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8641#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8642
8643#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8644#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
8645#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8646
8647#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8648#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8649#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8650
8651#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8652#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8653#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8654
8655#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8656
8657#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8658
8659#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8660#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8661#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8662 do { \
8663 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8664 *pu32Reg &= (a_u32Value); \
8665 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8666 } while (0)
8667#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8668
8669#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8670#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8671#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8672 do { \
8673 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8674 *pu32Reg |= (a_u32Value); \
8675 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8676 } while (0)
8677#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8678
8679
8680/** @note Not for IOPL or IF modification. */
8681#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8682/** @note Not for IOPL or IF modification. */
8683#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8684/** @note Not for IOPL or IF modification. */
8685#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8686
8687#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8688
8689
8690#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8691 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8692#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8693 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8694#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8695 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8696#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8697 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8698#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8699 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8700#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8701 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8702#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8703 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8704
8705#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8706 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8707#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8708 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8709#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8710 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8711#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8712 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8713#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8714 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8715 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8716 } while (0)
8717#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8718 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8719 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8720 } while (0)
8721#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8722 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8723#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8724 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8725#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8726 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8727
8728#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8730#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8732#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8734
8735#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8737#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8739#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8741
8742#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8744#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8746#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8748
8749#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8751
8752#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8754#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8756#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8758#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8760
8761#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8763#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8765#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8767
8768#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8770#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8772
8773
8774
8775#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8776 do { \
8777 uint8_t u8Tmp; \
8778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8779 (a_u16Dst) = u8Tmp; \
8780 } while (0)
8781#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8782 do { \
8783 uint8_t u8Tmp; \
8784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8785 (a_u32Dst) = u8Tmp; \
8786 } while (0)
8787#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8788 do { \
8789 uint8_t u8Tmp; \
8790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8791 (a_u64Dst) = u8Tmp; \
8792 } while (0)
8793#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8794 do { \
8795 uint16_t u16Tmp; \
8796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8797 (a_u32Dst) = u16Tmp; \
8798 } while (0)
8799#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8800 do { \
8801 uint16_t u16Tmp; \
8802 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8803 (a_u64Dst) = u16Tmp; \
8804 } while (0)
8805#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8806 do { \
8807 uint32_t u32Tmp; \
8808 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8809 (a_u64Dst) = u32Tmp; \
8810 } while (0)
8811
8812#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8813 do { \
8814 uint8_t u8Tmp; \
8815 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8816 (a_u16Dst) = (int8_t)u8Tmp; \
8817 } while (0)
8818#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8819 do { \
8820 uint8_t u8Tmp; \
8821 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8822 (a_u32Dst) = (int8_t)u8Tmp; \
8823 } while (0)
8824#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8825 do { \
8826 uint8_t u8Tmp; \
8827 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8828 (a_u64Dst) = (int8_t)u8Tmp; \
8829 } while (0)
8830#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8831 do { \
8832 uint16_t u16Tmp; \
8833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8834 (a_u32Dst) = (int16_t)u16Tmp; \
8835 } while (0)
8836#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8837 do { \
8838 uint16_t u16Tmp; \
8839 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8840 (a_u64Dst) = (int16_t)u16Tmp; \
8841 } while (0)
8842#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8843 do { \
8844 uint32_t u32Tmp; \
8845 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8846 (a_u64Dst) = (int32_t)u32Tmp; \
8847 } while (0)
8848
8849#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8850 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8851#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8852 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8853#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8854 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8855#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8856 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8857
8858#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8859 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8860#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8862#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8863 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8864#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8865 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8866
8867#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8868#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8869#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8870#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8871#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8872#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8873#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8874 do { \
8875 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8876 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8877 } while (0)
8878
8879#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8880 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8881#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8882 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8883
8884
8885#define IEM_MC_PUSH_U16(a_u16Value) \
8886 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8887#define IEM_MC_PUSH_U32(a_u32Value) \
8888 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8889#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8890 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8891#define IEM_MC_PUSH_U64(a_u64Value) \
8892 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8893
8894#define IEM_MC_POP_U16(a_pu16Value) \
8895 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8896#define IEM_MC_POP_U32(a_pu32Value) \
8897 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8898#define IEM_MC_POP_U64(a_pu64Value) \
8899 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8900
8901/** Maps guest memory for direct or bounce buffered access.
8902 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8903 * @remarks May return.
8904 */
8905#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8906 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8907
8908/** Maps guest memory for direct or bounce buffered access.
8909 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8910 * @remarks May return.
8911 */
8912#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8913 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8914
8915/** Commits the memory and unmaps the guest memory.
8916 * @remarks May return.
8917 */
8918#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8919 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8920
8921/** Commits the memory and unmaps the guest memory unless the FPU status word
8922 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8923 * that would cause FLD not to store.
8924 *
8925 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8926 * store, while \#P will not.
8927 *
8928 * @remarks May in theory return - for now.
8929 */
8930#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8931 do { \
8932 if ( !(a_u16FSW & X86_FSW_ES) \
8933 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8934 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8935 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8936 } while (0)
8937
8938/** Calculate efficient address from R/M. */
8939#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8940 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8941
8942#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8943#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8944#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8945#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8946#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8947#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8948#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8949
8950/**
8951 * Defers the rest of the instruction emulation to a C implementation routine
8952 * and returns, only taking the standard parameters.
8953 *
8954 * @param a_pfnCImpl The pointer to the C routine.
8955 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8956 */
8957#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8958
8959/**
8960 * Defers the rest of instruction emulation to a C implementation routine and
8961 * returns, taking one argument in addition to the standard ones.
8962 *
8963 * @param a_pfnCImpl The pointer to the C routine.
8964 * @param a0 The argument.
8965 */
8966#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8967
8968/**
8969 * Defers the rest of the instruction emulation to a C implementation routine
8970 * and returns, taking two arguments in addition to the standard ones.
8971 *
8972 * @param a_pfnCImpl The pointer to the C routine.
8973 * @param a0 The first extra argument.
8974 * @param a1 The second extra argument.
8975 */
8976#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8977
8978/**
8979 * Defers the rest of the instruction emulation to a C implementation routine
8980 * and returns, taking three arguments in addition to the standard ones.
8981 *
8982 * @param a_pfnCImpl The pointer to the C routine.
8983 * @param a0 The first extra argument.
8984 * @param a1 The second extra argument.
8985 * @param a2 The third extra argument.
8986 */
8987#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8988
8989/**
8990 * Defers the rest of the instruction emulation to a C implementation routine
8991 * and returns, taking four arguments in addition to the standard ones.
8992 *
8993 * @param a_pfnCImpl The pointer to the C routine.
8994 * @param a0 The first extra argument.
8995 * @param a1 The second extra argument.
8996 * @param a2 The third extra argument.
8997 * @param a3 The fourth extra argument.
8998 */
8999#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
9000
9001/**
9002 * Defers the rest of the instruction emulation to a C implementation routine
9003 * and returns, taking two arguments in addition to the standard ones.
9004 *
9005 * @param a_pfnCImpl The pointer to the C routine.
9006 * @param a0 The first extra argument.
9007 * @param a1 The second extra argument.
9008 * @param a2 The third extra argument.
9009 * @param a3 The fourth extra argument.
9010 * @param a4 The fifth extra argument.
9011 */
9012#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
9013
9014/**
9015 * Defers the entire instruction emulation to a C implementation routine and
9016 * returns, only taking the standard parameters.
9017 *
9018 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9019 *
9020 * @param a_pfnCImpl The pointer to the C routine.
9021 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
9022 */
9023#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
9024
9025/**
9026 * Defers the entire instruction emulation to a C implementation routine and
9027 * returns, taking one argument in addition to the standard ones.
9028 *
9029 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9030 *
9031 * @param a_pfnCImpl The pointer to the C routine.
9032 * @param a0 The argument.
9033 */
9034#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
9035
9036/**
9037 * Defers the entire instruction emulation to a C implementation routine and
9038 * returns, taking two arguments in addition to the standard ones.
9039 *
9040 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9041 *
9042 * @param a_pfnCImpl The pointer to the C routine.
9043 * @param a0 The first extra argument.
9044 * @param a1 The second extra argument.
9045 */
9046#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
9047
9048/**
9049 * Defers the entire instruction emulation to a C implementation routine and
9050 * returns, taking three arguments in addition to the standard ones.
9051 *
9052 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
9053 *
9054 * @param a_pfnCImpl The pointer to the C routine.
9055 * @param a0 The first extra argument.
9056 * @param a1 The second extra argument.
9057 * @param a2 The third extra argument.
9058 */
9059#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
9060
9061/**
9062 * Calls a FPU assembly implementation taking one visible argument.
9063 *
9064 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9065 * @param a0 The first extra argument.
9066 */
9067#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
9068 do { \
9069 iemFpuPrepareUsage(pIemCpu); \
9070 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9071 } while (0)
9072
9073/**
9074 * Calls a FPU assembly implementation taking two visible arguments.
9075 *
9076 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9077 * @param a0 The first extra argument.
9078 * @param a1 The second extra argument.
9079 */
9080#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9081 do { \
9082 iemFpuPrepareUsage(pIemCpu); \
9083 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9084 } while (0)
9085
9086/**
9087 * Calls a FPU assembly implementation taking three visible arguments.
9088 *
9089 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9090 * @param a0 The first extra argument.
9091 * @param a1 The second extra argument.
9092 * @param a2 The third extra argument.
9093 */
9094#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9095 do { \
9096 iemFpuPrepareUsage(pIemCpu); \
9097 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9098 } while (0)
9099
9100#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9101 do { \
9102 (a_FpuData).FSW = (a_FSW); \
9103 (a_FpuData).r80Result = *(a_pr80Value); \
9104 } while (0)
9105
9106/** Pushes FPU result onto the stack. */
9107#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9108 iemFpuPushResult(pIemCpu, &a_FpuData)
9109/** Pushes FPU result onto the stack and sets the FPUDP. */
9110#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9111 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9112
9113/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9114#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9115 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9116
9117/** Stores FPU result in a stack register. */
9118#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9119 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9120/** Stores FPU result in a stack register and pops the stack. */
9121#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9122 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9123/** Stores FPU result in a stack register and sets the FPUDP. */
9124#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9125 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9126/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9127 * stack. */
9128#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9129 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9130
9131/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9132#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9133 iemFpuUpdateOpcodeAndIp(pIemCpu)
9134/** Free a stack register (for FFREE and FFREEP). */
9135#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9136 iemFpuStackFree(pIemCpu, a_iStReg)
9137/** Increment the FPU stack pointer. */
9138#define IEM_MC_FPU_STACK_INC_TOP() \
9139 iemFpuStackIncTop(pIemCpu)
9140/** Decrement the FPU stack pointer. */
9141#define IEM_MC_FPU_STACK_DEC_TOP() \
9142 iemFpuStackDecTop(pIemCpu)
9143
9144/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9145#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9146 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9147/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9148#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9149 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9150/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9151#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9152 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9153/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9154#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9155 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9156/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9157 * stack. */
9158#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9159 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9160/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9161#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9162 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9163
9164/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9165#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9166 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9167/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9168 * stack. */
9169#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9170 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9171/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9172 * FPUDS. */
9173#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9174 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9175/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9176 * FPUDS. Pops stack. */
9177#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9178 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9179/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9180 * stack twice. */
9181#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9182 iemFpuStackUnderflowThenPopPop(pIemCpu)
9183/** Raises a FPU stack underflow exception for an instruction pushing a result
9184 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9185#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9186 iemFpuStackPushUnderflow(pIemCpu)
9187/** Raises a FPU stack underflow exception for an instruction pushing a result
9188 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9189#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9190 iemFpuStackPushUnderflowTwo(pIemCpu)
9191
9192/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9193 * FPUIP, FPUCS and FOP. */
9194#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9195 iemFpuStackPushOverflow(pIemCpu)
9196/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9197 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9198#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9199 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9200/** Indicates that we (might) have modified the FPU state. */
9201#define IEM_MC_USED_FPU() \
9202 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9203
9204/**
9205 * Calls a MMX assembly implementation taking two visible arguments.
9206 *
9207 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9208 * @param a0 The first extra argument.
9209 * @param a1 The second extra argument.
9210 */
9211#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9212 do { \
9213 iemFpuPrepareUsage(pIemCpu); \
9214 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9215 } while (0)
9216
9217/**
9218 * Calls a MMX assembly implementation taking three visible arguments.
9219 *
9220 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9221 * @param a0 The first extra argument.
9222 * @param a1 The second extra argument.
9223 * @param a2 The third extra argument.
9224 */
9225#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9226 do { \
9227 iemFpuPrepareUsage(pIemCpu); \
9228 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9229 } while (0)
9230
9231
9232/**
9233 * Calls a SSE assembly implementation taking two visible arguments.
9234 *
9235 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9236 * @param a0 The first extra argument.
9237 * @param a1 The second extra argument.
9238 */
9239#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9240 do { \
9241 iemFpuPrepareUsageSse(pIemCpu); \
9242 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9243 } while (0)
9244
9245/**
9246 * Calls a SSE assembly implementation taking three visible arguments.
9247 *
9248 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9249 * @param a0 The first extra argument.
9250 * @param a1 The second extra argument.
9251 * @param a2 The third extra argument.
9252 */
9253#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9254 do { \
9255 iemFpuPrepareUsageSse(pIemCpu); \
9256 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9257 } while (0)
9258
9259
9260/** @note Not for IOPL or IF testing. */
9261#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9262/** @note Not for IOPL or IF testing. */
9263#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9264/** @note Not for IOPL or IF testing. */
9265#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9266/** @note Not for IOPL or IF testing. */
9267#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9268/** @note Not for IOPL or IF testing. */
9269#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9270 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9271 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9272/** @note Not for IOPL or IF testing. */
9273#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9274 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9275 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9276/** @note Not for IOPL or IF testing. */
9277#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9278 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9279 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9280 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9281/** @note Not for IOPL or IF testing. */
9282#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9283 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9284 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9285 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9286#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9287#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9288#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9289/** @note Not for IOPL or IF testing. */
9290#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9291 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9292 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9293/** @note Not for IOPL or IF testing. */
9294#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9295 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9296 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9297/** @note Not for IOPL or IF testing. */
9298#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9299 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9300 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9301/** @note Not for IOPL or IF testing. */
9302#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9303 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9304 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9305/** @note Not for IOPL or IF testing. */
9306#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9307 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9308 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9309/** @note Not for IOPL or IF testing. */
9310#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9311 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9312 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9313#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9314#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9315#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9316 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9317#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9318 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9319#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9320 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9321#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9322 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9323#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9324 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9325#define IEM_MC_IF_FCW_IM() \
9326 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9327
9328#define IEM_MC_ELSE() } else {
9329#define IEM_MC_ENDIF() } do {} while (0)
9330
9331/** @} */
9332
9333
9334/** @name Opcode Debug Helpers.
9335 * @{
9336 */
9337#ifdef DEBUG
9338# define IEMOP_MNEMONIC(a_szMnemonic) \
9339 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9340 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9341# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9342 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9343 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9344#else
9345# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9346# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9347#endif
9348
9349/** @} */
9350
9351
9352/** @name Opcode Helpers.
9353 * @{
9354 */
9355
9356#ifdef IN_RING3
9357# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9358 do { \
9359 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9360 else \
9361 { \
9362 DBGFSTOP(IEMCPU_TO_VM(pIemCpu)); \
9363 return IEMOP_RAISE_INVALID_OPCODE(); \
9364 } \
9365 } while (0)
9366#else
9367# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
9368 do { \
9369 if (IEM_GET_TARGET_CPU(pIemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
9370 else return IEMOP_RAISE_INVALID_OPCODE(); \
9371 } while (0)
9372#endif
9373
9374/** The instruction requires a 186 or later. */
9375#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
9376# define IEMOP_HLP_MIN_186() do { } while (0)
9377#else
9378# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
9379#endif
9380
9381/** The instruction requires a 286 or later. */
9382#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
9383# define IEMOP_HLP_MIN_286() do { } while (0)
9384#else
9385# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
9386#endif
9387
9388/** The instruction requires a 386 or later. */
9389#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9390# define IEMOP_HLP_MIN_386() do { } while (0)
9391#else
9392# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
9393#endif
9394
9395/** The instruction requires a 386 or later if the given expression is true. */
9396#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
9397# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
9398#else
9399# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
9400#endif
9401
9402/** The instruction requires a 486 or later. */
9403#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
9404# define IEMOP_HLP_MIN_486() do { } while (0)
9405#else
9406# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
9407#endif
9408
9409/** The instruction requires a Pentium (586) or later. */
9410#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
9411# define IEMOP_HLP_MIN_586() do { } while (0)
9412#else
9413# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
9414#endif
9415
9416/** The instruction requires a PentiumPro (686) or later. */
9417#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
9418# define IEMOP_HLP_MIN_686() do { } while (0)
9419#else
9420# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
9421#endif
9422
9423
9424/** The instruction raises an \#UD in real and V8086 mode. */
9425#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9426 do \
9427 { \
9428 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9429 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9430 } while (0)
9431
9432/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9433 * lock prefixed.
9434 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9435#define IEMOP_HLP_NO_LOCK_PREFIX() \
9436 do \
9437 { \
9438 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9439 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9440 } while (0)
9441
9442/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9443 * 64-bit mode. */
9444#define IEMOP_HLP_NO_64BIT() \
9445 do \
9446 { \
9447 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9448 return IEMOP_RAISE_INVALID_OPCODE(); \
9449 } while (0)
9450
9451/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9452 * 64-bit mode. */
9453#define IEMOP_HLP_ONLY_64BIT() \
9454 do \
9455 { \
9456 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9457 return IEMOP_RAISE_INVALID_OPCODE(); \
9458 } while (0)
9459
9460/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9461#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9462 do \
9463 { \
9464 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9465 iemRecalEffOpSize64Default(pIemCpu); \
9466 } while (0)
9467
9468/** The instruction has 64-bit operand size if 64-bit mode. */
9469#define IEMOP_HLP_64BIT_OP_SIZE() \
9470 do \
9471 { \
9472 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9473 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9474 } while (0)
9475
9476/** Only a REX prefix immediately preceeding the first opcode byte takes
9477 * effect. This macro helps ensuring this as well as logging bad guest code. */
9478#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9479 do \
9480 { \
9481 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9482 { \
9483 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9484 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9485 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9486 pIemCpu->uRexB = 0; \
9487 pIemCpu->uRexIndex = 0; \
9488 pIemCpu->uRexReg = 0; \
9489 iemRecalEffOpSize(pIemCpu); \
9490 } \
9491 } while (0)
9492
9493/**
9494 * Done decoding.
9495 */
9496#define IEMOP_HLP_DONE_DECODING() \
9497 do \
9498 { \
9499 /*nothing for now, maybe later... */ \
9500 } while (0)
9501
9502/**
9503 * Done decoding, raise \#UD exception if lock prefix present.
9504 */
9505#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9506 do \
9507 { \
9508 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9509 { /* likely */ } \
9510 else \
9511 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9512 } while (0)
9513#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9514 do \
9515 { \
9516 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9517 { /* likely */ } \
9518 else \
9519 { \
9520 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9521 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9522 } \
9523 } while (0)
9524#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9525 do \
9526 { \
9527 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9528 { /* likely */ } \
9529 else \
9530 { \
9531 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9532 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9533 } \
9534 } while (0)
9535/**
9536 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9537 * are present.
9538 */
9539#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9540 do \
9541 { \
9542 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9543 { /* likely */ } \
9544 else \
9545 return IEMOP_RAISE_INVALID_OPCODE(); \
9546 } while (0)
9547
9548
9549/**
9550 * Calculates the effective address of a ModR/M memory operand.
9551 *
9552 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9553 *
9554 * @return Strict VBox status code.
9555 * @param pIemCpu The IEM per CPU data.
9556 * @param bRm The ModRM byte.
9557 * @param cbImm The size of any immediate following the
9558 * effective address opcode bytes. Important for
9559 * RIP relative addressing.
9560 * @param pGCPtrEff Where to return the effective address.
9561 */
9562IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9563{
9564 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9565 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9566#define SET_SS_DEF() \
9567 do \
9568 { \
9569 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9570 pIemCpu->iEffSeg = X86_SREG_SS; \
9571 } while (0)
9572
9573 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9574 {
9575/** @todo Check the effective address size crap! */
9576 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9577 {
9578 uint16_t u16EffAddr;
9579
9580 /* Handle the disp16 form with no registers first. */
9581 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9582 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9583 else
9584 {
9585 /* Get the displacment. */
9586 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9587 {
9588 case 0: u16EffAddr = 0; break;
9589 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9590 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9591 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9592 }
9593
9594 /* Add the base and index registers to the disp. */
9595 switch (bRm & X86_MODRM_RM_MASK)
9596 {
9597 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9598 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9599 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9600 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9601 case 4: u16EffAddr += pCtx->si; break;
9602 case 5: u16EffAddr += pCtx->di; break;
9603 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9604 case 7: u16EffAddr += pCtx->bx; break;
9605 }
9606 }
9607
9608 *pGCPtrEff = u16EffAddr;
9609 }
9610 else
9611 {
9612 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9613 uint32_t u32EffAddr;
9614
9615 /* Handle the disp32 form with no registers first. */
9616 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9617 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9618 else
9619 {
9620 /* Get the register (or SIB) value. */
9621 switch ((bRm & X86_MODRM_RM_MASK))
9622 {
9623 case 0: u32EffAddr = pCtx->eax; break;
9624 case 1: u32EffAddr = pCtx->ecx; break;
9625 case 2: u32EffAddr = pCtx->edx; break;
9626 case 3: u32EffAddr = pCtx->ebx; break;
9627 case 4: /* SIB */
9628 {
9629 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9630
9631 /* Get the index and scale it. */
9632 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9633 {
9634 case 0: u32EffAddr = pCtx->eax; break;
9635 case 1: u32EffAddr = pCtx->ecx; break;
9636 case 2: u32EffAddr = pCtx->edx; break;
9637 case 3: u32EffAddr = pCtx->ebx; break;
9638 case 4: u32EffAddr = 0; /*none */ break;
9639 case 5: u32EffAddr = pCtx->ebp; break;
9640 case 6: u32EffAddr = pCtx->esi; break;
9641 case 7: u32EffAddr = pCtx->edi; break;
9642 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9643 }
9644 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9645
9646 /* add base */
9647 switch (bSib & X86_SIB_BASE_MASK)
9648 {
9649 case 0: u32EffAddr += pCtx->eax; break;
9650 case 1: u32EffAddr += pCtx->ecx; break;
9651 case 2: u32EffAddr += pCtx->edx; break;
9652 case 3: u32EffAddr += pCtx->ebx; break;
9653 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9654 case 5:
9655 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9656 {
9657 u32EffAddr += pCtx->ebp;
9658 SET_SS_DEF();
9659 }
9660 else
9661 {
9662 uint32_t u32Disp;
9663 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9664 u32EffAddr += u32Disp;
9665 }
9666 break;
9667 case 6: u32EffAddr += pCtx->esi; break;
9668 case 7: u32EffAddr += pCtx->edi; break;
9669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9670 }
9671 break;
9672 }
9673 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9674 case 6: u32EffAddr = pCtx->esi; break;
9675 case 7: u32EffAddr = pCtx->edi; break;
9676 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9677 }
9678
9679 /* Get and add the displacement. */
9680 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9681 {
9682 case 0:
9683 break;
9684 case 1:
9685 {
9686 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9687 u32EffAddr += i8Disp;
9688 break;
9689 }
9690 case 2:
9691 {
9692 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9693 u32EffAddr += u32Disp;
9694 break;
9695 }
9696 default:
9697 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9698 }
9699
9700 }
9701 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9702 *pGCPtrEff = u32EffAddr;
9703 else
9704 {
9705 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9706 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9707 }
9708 }
9709 }
9710 else
9711 {
9712 uint64_t u64EffAddr;
9713
9714 /* Handle the rip+disp32 form with no registers first. */
9715 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9716 {
9717 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9718 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9719 }
9720 else
9721 {
9722 /* Get the register (or SIB) value. */
9723 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9724 {
9725 case 0: u64EffAddr = pCtx->rax; break;
9726 case 1: u64EffAddr = pCtx->rcx; break;
9727 case 2: u64EffAddr = pCtx->rdx; break;
9728 case 3: u64EffAddr = pCtx->rbx; break;
9729 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9730 case 6: u64EffAddr = pCtx->rsi; break;
9731 case 7: u64EffAddr = pCtx->rdi; break;
9732 case 8: u64EffAddr = pCtx->r8; break;
9733 case 9: u64EffAddr = pCtx->r9; break;
9734 case 10: u64EffAddr = pCtx->r10; break;
9735 case 11: u64EffAddr = pCtx->r11; break;
9736 case 13: u64EffAddr = pCtx->r13; break;
9737 case 14: u64EffAddr = pCtx->r14; break;
9738 case 15: u64EffAddr = pCtx->r15; break;
9739 /* SIB */
9740 case 4:
9741 case 12:
9742 {
9743 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9744
9745 /* Get the index and scale it. */
9746 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9747 {
9748 case 0: u64EffAddr = pCtx->rax; break;
9749 case 1: u64EffAddr = pCtx->rcx; break;
9750 case 2: u64EffAddr = pCtx->rdx; break;
9751 case 3: u64EffAddr = pCtx->rbx; break;
9752 case 4: u64EffAddr = 0; /*none */ break;
9753 case 5: u64EffAddr = pCtx->rbp; break;
9754 case 6: u64EffAddr = pCtx->rsi; break;
9755 case 7: u64EffAddr = pCtx->rdi; break;
9756 case 8: u64EffAddr = pCtx->r8; break;
9757 case 9: u64EffAddr = pCtx->r9; break;
9758 case 10: u64EffAddr = pCtx->r10; break;
9759 case 11: u64EffAddr = pCtx->r11; break;
9760 case 12: u64EffAddr = pCtx->r12; break;
9761 case 13: u64EffAddr = pCtx->r13; break;
9762 case 14: u64EffAddr = pCtx->r14; break;
9763 case 15: u64EffAddr = pCtx->r15; break;
9764 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9765 }
9766 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9767
9768 /* add base */
9769 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9770 {
9771 case 0: u64EffAddr += pCtx->rax; break;
9772 case 1: u64EffAddr += pCtx->rcx; break;
9773 case 2: u64EffAddr += pCtx->rdx; break;
9774 case 3: u64EffAddr += pCtx->rbx; break;
9775 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9776 case 6: u64EffAddr += pCtx->rsi; break;
9777 case 7: u64EffAddr += pCtx->rdi; break;
9778 case 8: u64EffAddr += pCtx->r8; break;
9779 case 9: u64EffAddr += pCtx->r9; break;
9780 case 10: u64EffAddr += pCtx->r10; break;
9781 case 11: u64EffAddr += pCtx->r11; break;
9782 case 12: u64EffAddr += pCtx->r12; break;
9783 case 14: u64EffAddr += pCtx->r14; break;
9784 case 15: u64EffAddr += pCtx->r15; break;
9785 /* complicated encodings */
9786 case 5:
9787 case 13:
9788 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9789 {
9790 if (!pIemCpu->uRexB)
9791 {
9792 u64EffAddr += pCtx->rbp;
9793 SET_SS_DEF();
9794 }
9795 else
9796 u64EffAddr += pCtx->r13;
9797 }
9798 else
9799 {
9800 uint32_t u32Disp;
9801 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9802 u64EffAddr += (int32_t)u32Disp;
9803 }
9804 break;
9805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9806 }
9807 break;
9808 }
9809 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9810 }
9811
9812 /* Get and add the displacement. */
9813 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9814 {
9815 case 0:
9816 break;
9817 case 1:
9818 {
9819 int8_t i8Disp;
9820 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9821 u64EffAddr += i8Disp;
9822 break;
9823 }
9824 case 2:
9825 {
9826 uint32_t u32Disp;
9827 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9828 u64EffAddr += (int32_t)u32Disp;
9829 break;
9830 }
9831 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9832 }
9833
9834 }
9835
9836 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9837 *pGCPtrEff = u64EffAddr;
9838 else
9839 {
9840 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9841 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9842 }
9843 }
9844
9845 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9846 return VINF_SUCCESS;
9847}
9848
9849/** @} */
9850
9851
9852
9853/*
9854 * Include the instructions
9855 */
9856#include "IEMAllInstructions.cpp.h"
9857
9858
9859
9860
9861#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9862
9863/**
9864 * Sets up execution verification mode.
9865 */
9866IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9867{
9868 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9869 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9870
9871 /*
9872 * Always note down the address of the current instruction.
9873 */
9874 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9875 pIemCpu->uOldRip = pOrgCtx->rip;
9876
9877 /*
9878 * Enable verification and/or logging.
9879 */
9880 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9881 if ( fNewNoRem
9882 && ( 0
9883#if 0 /* auto enable on first paged protected mode interrupt */
9884 || ( pOrgCtx->eflags.Bits.u1IF
9885 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9886 && TRPMHasTrap(pVCpu)
9887 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9888#endif
9889#if 0
9890 || ( pOrgCtx->cs == 0x10
9891 && ( pOrgCtx->rip == 0x90119e3e
9892 || pOrgCtx->rip == 0x901d9810)
9893#endif
9894#if 0 /* Auto enable DSL - FPU stuff. */
9895 || ( pOrgCtx->cs == 0x10
9896 && (// pOrgCtx->rip == 0xc02ec07f
9897 //|| pOrgCtx->rip == 0xc02ec082
9898 //|| pOrgCtx->rip == 0xc02ec0c9
9899 0
9900 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9901#endif
9902#if 0 /* Auto enable DSL - fstp st0 stuff. */
9903 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9904#endif
9905#if 0
9906 || pOrgCtx->rip == 0x9022bb3a
9907#endif
9908#if 0
9909 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9910#endif
9911#if 0
9912 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9913 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9914#endif
9915#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9916 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9917 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9918 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9919#endif
9920#if 0 /* NT4SP1 - xadd early boot. */
9921 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9922#endif
9923#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9924 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9925#endif
9926#if 0 /* NT4SP1 - cmpxchg (AMD). */
9927 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9928#endif
9929#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9930 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9931#endif
9932#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9933 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9934
9935#endif
9936#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9937 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9938
9939#endif
9940#if 0 /* NT4SP1 - frstor [ecx] */
9941 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9942#endif
9943#if 0 /* xxxxxx - All long mode code. */
9944 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9945#endif
9946#if 0 /* rep movsq linux 3.7 64-bit boot. */
9947 || (pOrgCtx->rip == 0x0000000000100241)
9948#endif
9949#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9950 || (pOrgCtx->rip == 0x000000000215e240)
9951#endif
9952#if 0 /* DOS's size-overridden iret to v8086. */
9953 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9954#endif
9955 )
9956 )
9957 {
9958 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9959 RTLogFlags(NULL, "enabled");
9960 fNewNoRem = false;
9961 }
9962 if (fNewNoRem != pIemCpu->fNoRem)
9963 {
9964 pIemCpu->fNoRem = fNewNoRem;
9965 if (!fNewNoRem)
9966 {
9967 LogAlways(("Enabling verification mode!\n"));
9968 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9969 }
9970 else
9971 LogAlways(("Disabling verification mode!\n"));
9972 }
9973
9974 /*
9975 * Switch state.
9976 */
9977 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9978 {
9979 static CPUMCTX s_DebugCtx; /* Ugly! */
9980
9981 s_DebugCtx = *pOrgCtx;
9982 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9983 }
9984
9985 /*
9986 * See if there is an interrupt pending in TRPM and inject it if we can.
9987 */
9988 pIemCpu->uInjectCpl = UINT8_MAX;
9989 if ( pOrgCtx->eflags.Bits.u1IF
9990 && TRPMHasTrap(pVCpu)
9991 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9992 {
9993 uint8_t u8TrapNo;
9994 TRPMEVENT enmType;
9995 RTGCUINT uErrCode;
9996 RTGCPTR uCr2;
9997 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9998 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9999 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10000 TRPMResetTrap(pVCpu);
10001 pIemCpu->uInjectCpl = pIemCpu->uCpl;
10002 }
10003
10004 /*
10005 * Reset the counters.
10006 */
10007 pIemCpu->cIOReads = 0;
10008 pIemCpu->cIOWrites = 0;
10009 pIemCpu->fIgnoreRaxRdx = false;
10010 pIemCpu->fOverlappingMovs = false;
10011 pIemCpu->fProblematicMemory = false;
10012 pIemCpu->fUndefinedEFlags = 0;
10013
10014 if (IEM_VERIFICATION_ENABLED(pIemCpu))
10015 {
10016 /*
10017 * Free all verification records.
10018 */
10019 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
10020 pIemCpu->pIemEvtRecHead = NULL;
10021 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
10022 do
10023 {
10024 while (pEvtRec)
10025 {
10026 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
10027 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
10028 pIemCpu->pFreeEvtRec = pEvtRec;
10029 pEvtRec = pNext;
10030 }
10031 pEvtRec = pIemCpu->pOtherEvtRecHead;
10032 pIemCpu->pOtherEvtRecHead = NULL;
10033 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
10034 } while (pEvtRec);
10035 }
10036}
10037
10038
10039/**
10040 * Allocate an event record.
10041 * @returns Pointer to a record.
10042 */
10043IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
10044{
10045 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10046 return NULL;
10047
10048 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
10049 if (pEvtRec)
10050 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
10051 else
10052 {
10053 if (!pIemCpu->ppIemEvtRecNext)
10054 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
10055
10056 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
10057 if (!pEvtRec)
10058 return NULL;
10059 }
10060 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
10061 pEvtRec->pNext = NULL;
10062 return pEvtRec;
10063}
10064
10065
10066/**
10067 * IOMMMIORead notification.
10068 */
10069VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
10070{
10071 PVMCPU pVCpu = VMMGetCpu(pVM);
10072 if (!pVCpu)
10073 return;
10074 PIEMCPU pIemCpu = &pVCpu->iem.s;
10075 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10076 if (!pEvtRec)
10077 return;
10078 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
10079 pEvtRec->u.RamRead.GCPhys = GCPhys;
10080 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
10081 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10082 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10083}
10084
10085
10086/**
10087 * IOMMMIOWrite notification.
10088 */
10089VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
10090{
10091 PVMCPU pVCpu = VMMGetCpu(pVM);
10092 if (!pVCpu)
10093 return;
10094 PIEMCPU pIemCpu = &pVCpu->iem.s;
10095 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10096 if (!pEvtRec)
10097 return;
10098 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
10099 pEvtRec->u.RamWrite.GCPhys = GCPhys;
10100 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
10101 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
10102 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
10103 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
10104 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
10105 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10106 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10107}
10108
10109
10110/**
10111 * IOMIOPortRead notification.
10112 */
10113VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
10114{
10115 PVMCPU pVCpu = VMMGetCpu(pVM);
10116 if (!pVCpu)
10117 return;
10118 PIEMCPU pIemCpu = &pVCpu->iem.s;
10119 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10120 if (!pEvtRec)
10121 return;
10122 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10123 pEvtRec->u.IOPortRead.Port = Port;
10124 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10125 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10126 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10127}
10128
10129/**
10130 * IOMIOPortWrite notification.
10131 */
10132VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10133{
10134 PVMCPU pVCpu = VMMGetCpu(pVM);
10135 if (!pVCpu)
10136 return;
10137 PIEMCPU pIemCpu = &pVCpu->iem.s;
10138 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10139 if (!pEvtRec)
10140 return;
10141 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10142 pEvtRec->u.IOPortWrite.Port = Port;
10143 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10144 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10145 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10146 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10147}
10148
10149
10150VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10151{
10152 PVMCPU pVCpu = VMMGetCpu(pVM);
10153 if (!pVCpu)
10154 return;
10155 PIEMCPU pIemCpu = &pVCpu->iem.s;
10156 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10157 if (!pEvtRec)
10158 return;
10159 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
10160 pEvtRec->u.IOPortStrRead.Port = Port;
10161 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
10162 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
10163 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10164 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10165}
10166
10167
10168VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10169{
10170 PVMCPU pVCpu = VMMGetCpu(pVM);
10171 if (!pVCpu)
10172 return;
10173 PIEMCPU pIemCpu = &pVCpu->iem.s;
10174 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10175 if (!pEvtRec)
10176 return;
10177 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
10178 pEvtRec->u.IOPortStrWrite.Port = Port;
10179 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
10180 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
10181 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10182 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10183}
10184
10185
10186/**
10187 * Fakes and records an I/O port read.
10188 *
10189 * @returns VINF_SUCCESS.
10190 * @param pIemCpu The IEM per CPU data.
10191 * @param Port The I/O port.
10192 * @param pu32Value Where to store the fake value.
10193 * @param cbValue The size of the access.
10194 */
10195IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10196{
10197 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10198 if (pEvtRec)
10199 {
10200 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10201 pEvtRec->u.IOPortRead.Port = Port;
10202 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
10203 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10204 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10205 }
10206 pIemCpu->cIOReads++;
10207 *pu32Value = 0xcccccccc;
10208 return VINF_SUCCESS;
10209}
10210
10211
10212/**
10213 * Fakes and records an I/O port write.
10214 *
10215 * @returns VINF_SUCCESS.
10216 * @param pIemCpu The IEM per CPU data.
10217 * @param Port The I/O port.
10218 * @param u32Value The value being written.
10219 * @param cbValue The size of the access.
10220 */
10221IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10222{
10223 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10224 if (pEvtRec)
10225 {
10226 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10227 pEvtRec->u.IOPortWrite.Port = Port;
10228 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
10229 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10230 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10231 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10232 }
10233 pIemCpu->cIOWrites++;
10234 return VINF_SUCCESS;
10235}
10236
10237
10238/**
10239 * Used to add extra details about a stub case.
10240 * @param pIemCpu The IEM per CPU state.
10241 */
10242IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10243{
10244 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10245 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10246 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10247 char szRegs[4096];
10248 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10249 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10250 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10251 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10252 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10253 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10254 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10255 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10256 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10257 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10258 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10259 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10260 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10261 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10262 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10263 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10264 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10265 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10266 " efer=%016VR{efer}\n"
10267 " pat=%016VR{pat}\n"
10268 " sf_mask=%016VR{sf_mask}\n"
10269 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10270 " lstar=%016VR{lstar}\n"
10271 " star=%016VR{star} cstar=%016VR{cstar}\n"
10272 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10273 );
10274
10275 char szInstr1[256];
10276 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10277 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10278 szInstr1, sizeof(szInstr1), NULL);
10279 char szInstr2[256];
10280 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10281 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10282 szInstr2, sizeof(szInstr2), NULL);
10283
10284 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10285}
10286
10287
10288/**
10289 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10290 * dump to the assertion info.
10291 *
10292 * @param pEvtRec The record to dump.
10293 */
10294IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10295{
10296 switch (pEvtRec->enmEvent)
10297 {
10298 case IEMVERIFYEVENT_IOPORT_READ:
10299 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10300 pEvtRec->u.IOPortWrite.Port,
10301 pEvtRec->u.IOPortWrite.cbValue);
10302 break;
10303 case IEMVERIFYEVENT_IOPORT_WRITE:
10304 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10305 pEvtRec->u.IOPortWrite.Port,
10306 pEvtRec->u.IOPortWrite.cbValue,
10307 pEvtRec->u.IOPortWrite.u32Value);
10308 break;
10309 case IEMVERIFYEVENT_IOPORT_STR_READ:
10310 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
10311 pEvtRec->u.IOPortStrWrite.Port,
10312 pEvtRec->u.IOPortStrWrite.cbValue,
10313 pEvtRec->u.IOPortStrWrite.cTransfers);
10314 break;
10315 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10316 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
10317 pEvtRec->u.IOPortStrWrite.Port,
10318 pEvtRec->u.IOPortStrWrite.cbValue,
10319 pEvtRec->u.IOPortStrWrite.cTransfers);
10320 break;
10321 case IEMVERIFYEVENT_RAM_READ:
10322 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10323 pEvtRec->u.RamRead.GCPhys,
10324 pEvtRec->u.RamRead.cb);
10325 break;
10326 case IEMVERIFYEVENT_RAM_WRITE:
10327 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10328 pEvtRec->u.RamWrite.GCPhys,
10329 pEvtRec->u.RamWrite.cb,
10330 (int)pEvtRec->u.RamWrite.cb,
10331 pEvtRec->u.RamWrite.ab);
10332 break;
10333 default:
10334 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10335 break;
10336 }
10337}
10338
10339
10340/**
10341 * Raises an assertion on the specified record, showing the given message with
10342 * a record dump attached.
10343 *
10344 * @param pIemCpu The IEM per CPU data.
10345 * @param pEvtRec1 The first record.
10346 * @param pEvtRec2 The second record.
10347 * @param pszMsg The message explaining why we're asserting.
10348 */
10349IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10350{
10351 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10352 iemVerifyAssertAddRecordDump(pEvtRec1);
10353 iemVerifyAssertAddRecordDump(pEvtRec2);
10354 iemVerifyAssertMsg2(pIemCpu);
10355 RTAssertPanic();
10356}
10357
10358
10359/**
10360 * Raises an assertion on the specified record, showing the given message with
10361 * a record dump attached.
10362 *
10363 * @param pIemCpu The IEM per CPU data.
10364 * @param pEvtRec1 The first record.
10365 * @param pszMsg The message explaining why we're asserting.
10366 */
10367IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10368{
10369 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10370 iemVerifyAssertAddRecordDump(pEvtRec);
10371 iemVerifyAssertMsg2(pIemCpu);
10372 RTAssertPanic();
10373}
10374
10375
10376/**
10377 * Verifies a write record.
10378 *
10379 * @param pIemCpu The IEM per CPU data.
10380 * @param pEvtRec The write record.
10381 * @param fRem Set if REM was doing the other executing. If clear
10382 * it was HM.
10383 */
10384IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10385{
10386 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10387 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10388 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10389 if ( RT_FAILURE(rc)
10390 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10391 {
10392 /* fend off ins */
10393 if ( !pIemCpu->cIOReads
10394 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10395 || ( pEvtRec->u.RamWrite.cb != 1
10396 && pEvtRec->u.RamWrite.cb != 2
10397 && pEvtRec->u.RamWrite.cb != 4) )
10398 {
10399 /* fend off ROMs and MMIO */
10400 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10401 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10402 {
10403 /* fend off fxsave */
10404 if (pEvtRec->u.RamWrite.cb != 512)
10405 {
10406 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10407 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10408 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10409 RTAssertMsg2Add("%s: %.*Rhxs\n"
10410 "iem: %.*Rhxs\n",
10411 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10412 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10413 iemVerifyAssertAddRecordDump(pEvtRec);
10414 iemVerifyAssertMsg2(pIemCpu);
10415 RTAssertPanic();
10416 }
10417 }
10418 }
10419 }
10420
10421}
10422
10423/**
10424 * Performs the post-execution verfication checks.
10425 */
10426IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10427{
10428 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10429 return;
10430
10431 /*
10432 * Switch back the state.
10433 */
10434 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10435 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10436 Assert(pOrgCtx != pDebugCtx);
10437 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10438
10439 /*
10440 * Execute the instruction in REM.
10441 */
10442 bool fRem = false;
10443 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10444 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10445 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10446#ifdef IEM_VERIFICATION_MODE_FULL_HM
10447 if ( HMIsEnabled(pVM)
10448 && pIemCpu->cIOReads == 0
10449 && pIemCpu->cIOWrites == 0
10450 && !pIemCpu->fProblematicMemory)
10451 {
10452 uint64_t uStartRip = pOrgCtx->rip;
10453 unsigned iLoops = 0;
10454 do
10455 {
10456 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10457 iLoops++;
10458 } while ( rc == VINF_SUCCESS
10459 || ( rc == VINF_EM_DBG_STEPPED
10460 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10461 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10462 || ( pOrgCtx->rip != pDebugCtx->rip
10463 && pIemCpu->uInjectCpl != UINT8_MAX
10464 && iLoops < 8) );
10465 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10466 rc = VINF_SUCCESS;
10467 }
10468#endif
10469 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10470 || rc == VINF_IOM_R3_IOPORT_READ
10471 || rc == VINF_IOM_R3_IOPORT_WRITE
10472 || rc == VINF_IOM_R3_MMIO_READ
10473 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10474 || rc == VINF_IOM_R3_MMIO_WRITE
10475 || rc == VINF_CPUM_R3_MSR_READ
10476 || rc == VINF_CPUM_R3_MSR_WRITE
10477 || rc == VINF_EM_RESCHEDULE
10478 )
10479 {
10480 EMRemLock(pVM);
10481 rc = REMR3EmulateInstruction(pVM, pVCpu);
10482 AssertRC(rc);
10483 EMRemUnlock(pVM);
10484 fRem = true;
10485 }
10486
10487 /*
10488 * Compare the register states.
10489 */
10490 unsigned cDiffs = 0;
10491 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10492 {
10493 //Log(("REM and IEM ends up with different registers!\n"));
10494 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10495
10496# define CHECK_FIELD(a_Field) \
10497 do \
10498 { \
10499 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10500 { \
10501 switch (sizeof(pOrgCtx->a_Field)) \
10502 { \
10503 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10504 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10505 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10506 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10507 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10508 } \
10509 cDiffs++; \
10510 } \
10511 } while (0)
10512# define CHECK_XSTATE_FIELD(a_Field) \
10513 do \
10514 { \
10515 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10516 { \
10517 switch (sizeof(pOrgXState->a_Field)) \
10518 { \
10519 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10520 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10521 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10522 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10523 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10524 } \
10525 cDiffs++; \
10526 } \
10527 } while (0)
10528
10529# define CHECK_BIT_FIELD(a_Field) \
10530 do \
10531 { \
10532 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10533 { \
10534 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10535 cDiffs++; \
10536 } \
10537 } while (0)
10538
10539# define CHECK_SEL(a_Sel) \
10540 do \
10541 { \
10542 CHECK_FIELD(a_Sel.Sel); \
10543 CHECK_FIELD(a_Sel.Attr.u); \
10544 CHECK_FIELD(a_Sel.u64Base); \
10545 CHECK_FIELD(a_Sel.u32Limit); \
10546 CHECK_FIELD(a_Sel.fFlags); \
10547 } while (0)
10548
10549 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10550 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10551
10552#if 1 /* The recompiler doesn't update these the intel way. */
10553 if (fRem)
10554 {
10555 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10556 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10557 pOrgXState->x87.CS = pDebugXState->x87.CS;
10558 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10559 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10560 pOrgXState->x87.DS = pDebugXState->x87.DS;
10561 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10562 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10563 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10564 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10565 }
10566#endif
10567 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10568 {
10569 RTAssertMsg2Weak(" the FPU state differs\n");
10570 cDiffs++;
10571 CHECK_XSTATE_FIELD(x87.FCW);
10572 CHECK_XSTATE_FIELD(x87.FSW);
10573 CHECK_XSTATE_FIELD(x87.FTW);
10574 CHECK_XSTATE_FIELD(x87.FOP);
10575 CHECK_XSTATE_FIELD(x87.FPUIP);
10576 CHECK_XSTATE_FIELD(x87.CS);
10577 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10578 CHECK_XSTATE_FIELD(x87.FPUDP);
10579 CHECK_XSTATE_FIELD(x87.DS);
10580 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10581 CHECK_XSTATE_FIELD(x87.MXCSR);
10582 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10583 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10584 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10585 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10586 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10587 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10588 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10589 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10590 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10591 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10592 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10593 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10594 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10595 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10596 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10597 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10598 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10599 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10600 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10601 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10602 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10603 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10604 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10605 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10606 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10607 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10608 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10609 }
10610 CHECK_FIELD(rip);
10611 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10612 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10613 {
10614 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10615 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10616 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10617 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10618 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10619 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10620 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10621 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10622 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10623 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10624 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10625 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10626 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10627 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10628 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10629 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10630 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10631 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10632 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10633 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10634 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10635 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10636 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10637 }
10638
10639 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10640 CHECK_FIELD(rax);
10641 CHECK_FIELD(rcx);
10642 if (!pIemCpu->fIgnoreRaxRdx)
10643 CHECK_FIELD(rdx);
10644 CHECK_FIELD(rbx);
10645 CHECK_FIELD(rsp);
10646 CHECK_FIELD(rbp);
10647 CHECK_FIELD(rsi);
10648 CHECK_FIELD(rdi);
10649 CHECK_FIELD(r8);
10650 CHECK_FIELD(r9);
10651 CHECK_FIELD(r10);
10652 CHECK_FIELD(r11);
10653 CHECK_FIELD(r12);
10654 CHECK_FIELD(r13);
10655 CHECK_SEL(cs);
10656 CHECK_SEL(ss);
10657 CHECK_SEL(ds);
10658 CHECK_SEL(es);
10659 CHECK_SEL(fs);
10660 CHECK_SEL(gs);
10661 CHECK_FIELD(cr0);
10662
10663 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10664 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10665 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10666 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10667 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10668 {
10669 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10670 { /* ignore */ }
10671 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10672 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10673 && fRem)
10674 { /* ignore */ }
10675 else
10676 CHECK_FIELD(cr2);
10677 }
10678 CHECK_FIELD(cr3);
10679 CHECK_FIELD(cr4);
10680 CHECK_FIELD(dr[0]);
10681 CHECK_FIELD(dr[1]);
10682 CHECK_FIELD(dr[2]);
10683 CHECK_FIELD(dr[3]);
10684 CHECK_FIELD(dr[6]);
10685 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10686 CHECK_FIELD(dr[7]);
10687 CHECK_FIELD(gdtr.cbGdt);
10688 CHECK_FIELD(gdtr.pGdt);
10689 CHECK_FIELD(idtr.cbIdt);
10690 CHECK_FIELD(idtr.pIdt);
10691 CHECK_SEL(ldtr);
10692 CHECK_SEL(tr);
10693 CHECK_FIELD(SysEnter.cs);
10694 CHECK_FIELD(SysEnter.eip);
10695 CHECK_FIELD(SysEnter.esp);
10696 CHECK_FIELD(msrEFER);
10697 CHECK_FIELD(msrSTAR);
10698 CHECK_FIELD(msrPAT);
10699 CHECK_FIELD(msrLSTAR);
10700 CHECK_FIELD(msrCSTAR);
10701 CHECK_FIELD(msrSFMASK);
10702 CHECK_FIELD(msrKERNELGSBASE);
10703
10704 if (cDiffs != 0)
10705 {
10706 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10707 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10708 iemVerifyAssertMsg2(pIemCpu);
10709 RTAssertPanic();
10710 }
10711# undef CHECK_FIELD
10712# undef CHECK_BIT_FIELD
10713 }
10714
10715 /*
10716 * If the register state compared fine, check the verification event
10717 * records.
10718 */
10719 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10720 {
10721 /*
10722 * Compare verficiation event records.
10723 * - I/O port accesses should be a 1:1 match.
10724 */
10725 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10726 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10727 while (pIemRec && pOtherRec)
10728 {
10729 /* Since we might miss RAM writes and reads, ignore reads and check
10730 that any written memory is the same extra ones. */
10731 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10732 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10733 && pIemRec->pNext)
10734 {
10735 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10736 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10737 pIemRec = pIemRec->pNext;
10738 }
10739
10740 /* Do the compare. */
10741 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10742 {
10743 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10744 break;
10745 }
10746 bool fEquals;
10747 switch (pIemRec->enmEvent)
10748 {
10749 case IEMVERIFYEVENT_IOPORT_READ:
10750 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10751 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10752 break;
10753 case IEMVERIFYEVENT_IOPORT_WRITE:
10754 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10755 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10756 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10757 break;
10758 case IEMVERIFYEVENT_IOPORT_STR_READ:
10759 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
10760 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
10761 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
10762 break;
10763 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
10764 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
10765 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
10766 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
10767 break;
10768 case IEMVERIFYEVENT_RAM_READ:
10769 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10770 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10771 break;
10772 case IEMVERIFYEVENT_RAM_WRITE:
10773 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10774 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10775 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10776 break;
10777 default:
10778 fEquals = false;
10779 break;
10780 }
10781 if (!fEquals)
10782 {
10783 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10784 break;
10785 }
10786
10787 /* advance */
10788 pIemRec = pIemRec->pNext;
10789 pOtherRec = pOtherRec->pNext;
10790 }
10791
10792 /* Ignore extra writes and reads. */
10793 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10794 {
10795 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10796 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10797 pIemRec = pIemRec->pNext;
10798 }
10799 if (pIemRec != NULL)
10800 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10801 else if (pOtherRec != NULL)
10802 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10803 }
10804 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10805}
10806
10807#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10808
10809/* stubs */
10810IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10811{
10812 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10813 return VERR_INTERNAL_ERROR;
10814}
10815
10816IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10817{
10818 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10819 return VERR_INTERNAL_ERROR;
10820}
10821
10822#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10823
10824
10825#ifdef LOG_ENABLED
10826/**
10827 * Logs the current instruction.
10828 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10829 * @param pCtx The current CPU context.
10830 * @param fSameCtx Set if we have the same context information as the VMM,
10831 * clear if we may have already executed an instruction in
10832 * our debug context. When clear, we assume IEMCPU holds
10833 * valid CPU mode info.
10834 */
10835IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10836{
10837# ifdef IN_RING3
10838 if (LogIs2Enabled())
10839 {
10840 char szInstr[256];
10841 uint32_t cbInstr = 0;
10842 if (fSameCtx)
10843 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10844 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10845 szInstr, sizeof(szInstr), &cbInstr);
10846 else
10847 {
10848 uint32_t fFlags = 0;
10849 switch (pVCpu->iem.s.enmCpuMode)
10850 {
10851 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10852 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10853 case IEMMODE_16BIT:
10854 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10855 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10856 else
10857 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10858 break;
10859 }
10860 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10861 szInstr, sizeof(szInstr), &cbInstr);
10862 }
10863
10864 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10865 Log2(("****\n"
10866 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10867 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10868 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10869 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10870 " %s\n"
10871 ,
10872 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10873 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10874 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10875 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10876 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10877 szInstr));
10878
10879 if (LogIs3Enabled())
10880 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10881 }
10882 else
10883# endif
10884 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10885 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10886}
10887#endif
10888
10889
10890/**
10891 * Makes status code addjustments (pass up from I/O and access handler)
10892 * as well as maintaining statistics.
10893 *
10894 * @returns Strict VBox status code to pass up.
10895 * @param pIemCpu The IEM per CPU data.
10896 * @param rcStrict The status from executing an instruction.
10897 */
10898DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10899{
10900 if (rcStrict != VINF_SUCCESS)
10901 {
10902 if (RT_SUCCESS(rcStrict))
10903 {
10904 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10905 || rcStrict == VINF_IOM_R3_IOPORT_READ
10906 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10907 || rcStrict == VINF_IOM_R3_MMIO_READ
10908 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10909 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10910 || rcStrict == VINF_CPUM_R3_MSR_READ
10911 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10912 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10913 || rcStrict == VINF_EM_RAW_TO_R3
10914 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10915 /* raw-mode / virt handlers only: */
10916 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10917 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10918 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10919 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10920 || rcStrict == VINF_SELM_SYNC_GDT
10921 || rcStrict == VINF_CSAM_PENDING_ACTION
10922 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10923 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10924/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10925 int32_t const rcPassUp = pIemCpu->rcPassUp;
10926 if (rcPassUp == VINF_SUCCESS)
10927 pIemCpu->cRetInfStatuses++;
10928 else if ( rcPassUp < VINF_EM_FIRST
10929 || rcPassUp > VINF_EM_LAST
10930 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10931 {
10932 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10933 pIemCpu->cRetPassUpStatus++;
10934 rcStrict = rcPassUp;
10935 }
10936 else
10937 {
10938 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10939 pIemCpu->cRetInfStatuses++;
10940 }
10941 }
10942 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10943 pIemCpu->cRetAspectNotImplemented++;
10944 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10945 pIemCpu->cRetInstrNotImplemented++;
10946#ifdef IEM_VERIFICATION_MODE_FULL
10947 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10948 rcStrict = VINF_SUCCESS;
10949#endif
10950 else
10951 pIemCpu->cRetErrStatuses++;
10952 }
10953 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10954 {
10955 pIemCpu->cRetPassUpStatus++;
10956 rcStrict = pIemCpu->rcPassUp;
10957 }
10958
10959 return rcStrict;
10960}
10961
10962
10963/**
10964 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10965 * IEMExecOneWithPrefetchedByPC.
10966 *
10967 * @return Strict VBox status code.
10968 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10969 * @param pIemCpu The IEM per CPU data.
10970 * @param fExecuteInhibit If set, execute the instruction following CLI,
10971 * POP SS and MOV SS,GR.
10972 */
10973DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10974{
10975 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10976 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10977 if (rcStrict == VINF_SUCCESS)
10978 pIemCpu->cInstructions++;
10979 if (pIemCpu->cActiveMappings > 0)
10980 iemMemRollback(pIemCpu);
10981//#ifdef DEBUG
10982// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10983//#endif
10984
10985 /* Execute the next instruction as well if a cli, pop ss or
10986 mov ss, Gr has just completed successfully. */
10987 if ( fExecuteInhibit
10988 && rcStrict == VINF_SUCCESS
10989 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10990 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10991 {
10992 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10993 if (rcStrict == VINF_SUCCESS)
10994 {
10995# ifdef LOG_ENABLED
10996 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10997# endif
10998 IEM_OPCODE_GET_NEXT_U8(&b);
10999 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
11000 if (rcStrict == VINF_SUCCESS)
11001 pIemCpu->cInstructions++;
11002 if (pIemCpu->cActiveMappings > 0)
11003 iemMemRollback(pIemCpu);
11004 }
11005 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
11006 }
11007
11008 /*
11009 * Return value fiddling, statistics and sanity assertions.
11010 */
11011 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11012
11013 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
11014 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
11015#if defined(IEM_VERIFICATION_MODE_FULL)
11016 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
11017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
11018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
11019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
11020#endif
11021 return rcStrict;
11022}
11023
11024
11025#ifdef IN_RC
11026/**
11027 * Re-enters raw-mode or ensure we return to ring-3.
11028 *
11029 * @returns rcStrict, maybe modified.
11030 * @param pIemCpu The IEM CPU structure.
11031 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11032 * @param pCtx The current CPU context.
11033 * @param rcStrict The status code returne by the interpreter.
11034 */
11035DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
11036{
11037 if (!pIemCpu->fInPatchCode)
11038 CPUMRawEnter(pVCpu);
11039 return rcStrict;
11040}
11041#endif
11042
11043
11044/**
11045 * Execute one instruction.
11046 *
11047 * @return Strict VBox status code.
11048 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11049 */
11050VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
11051{
11052 PIEMCPU pIemCpu = &pVCpu->iem.s;
11053
11054#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11055 iemExecVerificationModeSetup(pIemCpu);
11056#endif
11057#ifdef LOG_ENABLED
11058 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11059 iemLogCurInstr(pVCpu, pCtx, true);
11060#endif
11061
11062 /*
11063 * Do the decoding and emulation.
11064 */
11065 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11066 if (rcStrict == VINF_SUCCESS)
11067 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11068
11069#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11070 /*
11071 * Assert some sanity.
11072 */
11073 iemExecVerificationModeCheck(pIemCpu);
11074#endif
11075#ifdef IN_RC
11076 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11077#endif
11078 if (rcStrict != VINF_SUCCESS)
11079 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11080 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11081 return rcStrict;
11082}
11083
11084
11085VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11086{
11087 PIEMCPU pIemCpu = &pVCpu->iem.s;
11088 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11089 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11090
11091 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11092 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11093 if (rcStrict == VINF_SUCCESS)
11094 {
11095 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11096 if (pcbWritten)
11097 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11098 }
11099
11100#ifdef IN_RC
11101 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11102#endif
11103 return rcStrict;
11104}
11105
11106
11107VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11108 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11109{
11110 PIEMCPU pIemCpu = &pVCpu->iem.s;
11111 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11112 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11113
11114 VBOXSTRICTRC rcStrict;
11115 if ( cbOpcodeBytes
11116 && pCtx->rip == OpcodeBytesPC)
11117 {
11118 iemInitDecoder(pIemCpu, false);
11119 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11120 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11121 rcStrict = VINF_SUCCESS;
11122 }
11123 else
11124 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11125 if (rcStrict == VINF_SUCCESS)
11126 {
11127 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11128 }
11129
11130#ifdef IN_RC
11131 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11132#endif
11133 return rcStrict;
11134}
11135
11136
11137VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
11138{
11139 PIEMCPU pIemCpu = &pVCpu->iem.s;
11140 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11141 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11142
11143 uint32_t const cbOldWritten = pIemCpu->cbWritten;
11144 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11145 if (rcStrict == VINF_SUCCESS)
11146 {
11147 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11148 if (pcbWritten)
11149 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
11150 }
11151
11152#ifdef IN_RC
11153 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11154#endif
11155 return rcStrict;
11156}
11157
11158
11159VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
11160 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
11161{
11162 PIEMCPU pIemCpu = &pVCpu->iem.s;
11163 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11164 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
11165
11166 VBOXSTRICTRC rcStrict;
11167 if ( cbOpcodeBytes
11168 && pCtx->rip == OpcodeBytesPC)
11169 {
11170 iemInitDecoder(pIemCpu, true);
11171 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
11172 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
11173 rcStrict = VINF_SUCCESS;
11174 }
11175 else
11176 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
11177 if (rcStrict == VINF_SUCCESS)
11178 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
11179
11180#ifdef IN_RC
11181 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
11182#endif
11183 return rcStrict;
11184}
11185
11186
11187VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11188{
11189 PIEMCPU pIemCpu = &pVCpu->iem.s;
11190
11191 /*
11192 * See if there is an interrupt pending in TRPM and inject it if we can.
11193 */
11194#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11195 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11196# ifdef IEM_VERIFICATION_MODE_FULL
11197 pIemCpu->uInjectCpl = UINT8_MAX;
11198# endif
11199 if ( pCtx->eflags.Bits.u1IF
11200 && TRPMHasTrap(pVCpu)
11201 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11202 {
11203 uint8_t u8TrapNo;
11204 TRPMEVENT enmType;
11205 RTGCUINT uErrCode;
11206 RTGCPTR uCr2;
11207 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11208 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11209 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11210 TRPMResetTrap(pVCpu);
11211 }
11212#else
11213 iemExecVerificationModeSetup(pIemCpu);
11214 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11215#endif
11216
11217 /*
11218 * Log the state.
11219 */
11220#ifdef LOG_ENABLED
11221 iemLogCurInstr(pVCpu, pCtx, true);
11222#endif
11223
11224 /*
11225 * Do the decoding and emulation.
11226 */
11227 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11228 if (rcStrict == VINF_SUCCESS)
11229 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11230
11231#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11232 /*
11233 * Assert some sanity.
11234 */
11235 iemExecVerificationModeCheck(pIemCpu);
11236#endif
11237
11238 /*
11239 * Maybe re-enter raw-mode and log.
11240 */
11241#ifdef IN_RC
11242 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11243#endif
11244 if (rcStrict != VINF_SUCCESS)
11245 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11246 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11247 return rcStrict;
11248}
11249
11250
11251
11252/**
11253 * Injects a trap, fault, abort, software interrupt or external interrupt.
11254 *
11255 * The parameter list matches TRPMQueryTrapAll pretty closely.
11256 *
11257 * @returns Strict VBox status code.
11258 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11259 * @param u8TrapNo The trap number.
11260 * @param enmType What type is it (trap/fault/abort), software
11261 * interrupt or hardware interrupt.
11262 * @param uErrCode The error code if applicable.
11263 * @param uCr2 The CR2 value if applicable.
11264 * @param cbInstr The instruction length (only relevant for
11265 * software interrupts).
11266 */
11267VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11268 uint8_t cbInstr)
11269{
11270 iemInitDecoder(&pVCpu->iem.s, false);
11271#ifdef DBGFTRACE_ENABLED
11272 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11273 u8TrapNo, enmType, uErrCode, uCr2);
11274#endif
11275
11276 uint32_t fFlags;
11277 switch (enmType)
11278 {
11279 case TRPM_HARDWARE_INT:
11280 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11281 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11282 uErrCode = uCr2 = 0;
11283 break;
11284
11285 case TRPM_SOFTWARE_INT:
11286 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11287 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11288 uErrCode = uCr2 = 0;
11289 break;
11290
11291 case TRPM_TRAP:
11292 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11293 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11294 if (u8TrapNo == X86_XCPT_PF)
11295 fFlags |= IEM_XCPT_FLAGS_CR2;
11296 switch (u8TrapNo)
11297 {
11298 case X86_XCPT_DF:
11299 case X86_XCPT_TS:
11300 case X86_XCPT_NP:
11301 case X86_XCPT_SS:
11302 case X86_XCPT_PF:
11303 case X86_XCPT_AC:
11304 fFlags |= IEM_XCPT_FLAGS_ERR;
11305 break;
11306
11307 case X86_XCPT_NMI:
11308 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11309 break;
11310 }
11311 break;
11312
11313 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11314 }
11315
11316 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11317}
11318
11319
11320/**
11321 * Injects the active TRPM event.
11322 *
11323 * @returns Strict VBox status code.
11324 * @param pVCpu The cross context virtual CPU structure.
11325 */
11326VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11327{
11328#ifndef IEM_IMPLEMENTS_TASKSWITCH
11329 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11330#else
11331 uint8_t u8TrapNo;
11332 TRPMEVENT enmType;
11333 RTGCUINT uErrCode;
11334 RTGCUINTPTR uCr2;
11335 uint8_t cbInstr;
11336 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11337 if (RT_FAILURE(rc))
11338 return rc;
11339
11340 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11341
11342 /** @todo Are there any other codes that imply the event was successfully
11343 * delivered to the guest? See @bugref{6607}. */
11344 if ( rcStrict == VINF_SUCCESS
11345 || rcStrict == VINF_IEM_RAISED_XCPT)
11346 {
11347 TRPMResetTrap(pVCpu);
11348 }
11349 return rcStrict;
11350#endif
11351}
11352
11353
11354VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11355{
11356 return VERR_NOT_IMPLEMENTED;
11357}
11358
11359
11360VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11361{
11362 return VERR_NOT_IMPLEMENTED;
11363}
11364
11365
11366#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11367/**
11368 * Executes a IRET instruction with default operand size.
11369 *
11370 * This is for PATM.
11371 *
11372 * @returns VBox status code.
11373 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11374 * @param pCtxCore The register frame.
11375 */
11376VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11377{
11378 PIEMCPU pIemCpu = &pVCpu->iem.s;
11379 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11380
11381 iemCtxCoreToCtx(pCtx, pCtxCore);
11382 iemInitDecoder(pIemCpu);
11383 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11384 if (rcStrict == VINF_SUCCESS)
11385 iemCtxToCtxCore(pCtxCore, pCtx);
11386 else
11387 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11388 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11389 return rcStrict;
11390}
11391#endif
11392
11393
11394/**
11395 * Macro used by the IEMExec* method to check the given instruction length.
11396 *
11397 * Will return on failure!
11398 *
11399 * @param a_cbInstr The given instruction length.
11400 * @param a_cbMin The minimum length.
11401 */
11402#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11403 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11404 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11405
11406
11407/**
11408 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11409 *
11410 * This API ASSUMES that the caller has already verified that the guest code is
11411 * allowed to access the I/O port. (The I/O port is in the DX register in the
11412 * guest state.)
11413 *
11414 * @returns Strict VBox status code.
11415 * @param pVCpu The cross context virtual CPU structure.
11416 * @param cbValue The size of the I/O port access (1, 2, or 4).
11417 * @param enmAddrMode The addressing mode.
11418 * @param fRepPrefix Indicates whether a repeat prefix is used
11419 * (doesn't matter which for this instruction).
11420 * @param cbInstr The instruction length in bytes.
11421 * @param iEffSeg The effective segment address.
11422 */
11423VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11424 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11425{
11426 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11427 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11428
11429 /*
11430 * State init.
11431 */
11432 PIEMCPU pIemCpu = &pVCpu->iem.s;
11433 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11434
11435 /*
11436 * Switch orgy for getting to the right handler.
11437 */
11438 VBOXSTRICTRC rcStrict;
11439 if (fRepPrefix)
11440 {
11441 switch (enmAddrMode)
11442 {
11443 case IEMMODE_16BIT:
11444 switch (cbValue)
11445 {
11446 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11447 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11448 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11449 default:
11450 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11451 }
11452 break;
11453
11454 case IEMMODE_32BIT:
11455 switch (cbValue)
11456 {
11457 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11458 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11459 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11460 default:
11461 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11462 }
11463 break;
11464
11465 case IEMMODE_64BIT:
11466 switch (cbValue)
11467 {
11468 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11469 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11470 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11471 default:
11472 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11473 }
11474 break;
11475
11476 default:
11477 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11478 }
11479 }
11480 else
11481 {
11482 switch (enmAddrMode)
11483 {
11484 case IEMMODE_16BIT:
11485 switch (cbValue)
11486 {
11487 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11488 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11489 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11490 default:
11491 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11492 }
11493 break;
11494
11495 case IEMMODE_32BIT:
11496 switch (cbValue)
11497 {
11498 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11499 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11500 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11501 default:
11502 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11503 }
11504 break;
11505
11506 case IEMMODE_64BIT:
11507 switch (cbValue)
11508 {
11509 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11510 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11511 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11512 default:
11513 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11514 }
11515 break;
11516
11517 default:
11518 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11519 }
11520 }
11521
11522 iemUninitExec(pIemCpu);
11523 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11524}
11525
11526
11527/**
11528 * Interface for HM and EM for executing string I/O IN (read) instructions.
11529 *
11530 * This API ASSUMES that the caller has already verified that the guest code is
11531 * allowed to access the I/O port. (The I/O port is in the DX register in the
11532 * guest state.)
11533 *
11534 * @returns Strict VBox status code.
11535 * @param pVCpu The cross context virtual CPU structure.
11536 * @param cbValue The size of the I/O port access (1, 2, or 4).
11537 * @param enmAddrMode The addressing mode.
11538 * @param fRepPrefix Indicates whether a repeat prefix is used
11539 * (doesn't matter which for this instruction).
11540 * @param cbInstr The instruction length in bytes.
11541 */
11542VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11543 bool fRepPrefix, uint8_t cbInstr)
11544{
11545 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11546
11547 /*
11548 * State init.
11549 */
11550 PIEMCPU pIemCpu = &pVCpu->iem.s;
11551 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11552
11553 /*
11554 * Switch orgy for getting to the right handler.
11555 */
11556 VBOXSTRICTRC rcStrict;
11557 if (fRepPrefix)
11558 {
11559 switch (enmAddrMode)
11560 {
11561 case IEMMODE_16BIT:
11562 switch (cbValue)
11563 {
11564 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11565 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11566 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11567 default:
11568 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11569 }
11570 break;
11571
11572 case IEMMODE_32BIT:
11573 switch (cbValue)
11574 {
11575 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11576 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11577 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11578 default:
11579 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11580 }
11581 break;
11582
11583 case IEMMODE_64BIT:
11584 switch (cbValue)
11585 {
11586 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11587 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11588 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11589 default:
11590 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11591 }
11592 break;
11593
11594 default:
11595 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11596 }
11597 }
11598 else
11599 {
11600 switch (enmAddrMode)
11601 {
11602 case IEMMODE_16BIT:
11603 switch (cbValue)
11604 {
11605 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11606 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11607 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11608 default:
11609 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11610 }
11611 break;
11612
11613 case IEMMODE_32BIT:
11614 switch (cbValue)
11615 {
11616 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11617 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11618 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11619 default:
11620 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11621 }
11622 break;
11623
11624 case IEMMODE_64BIT:
11625 switch (cbValue)
11626 {
11627 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11628 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11629 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11630 default:
11631 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11632 }
11633 break;
11634
11635 default:
11636 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11637 }
11638 }
11639
11640 iemUninitExec(pIemCpu);
11641 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11642}
11643
11644
11645
11646/**
11647 * Interface for HM and EM to write to a CRx register.
11648 *
11649 * @returns Strict VBox status code.
11650 * @param pVCpu The cross context virtual CPU structure.
11651 * @param cbInstr The instruction length in bytes.
11652 * @param iCrReg The control register number (destination).
11653 * @param iGReg The general purpose register number (source).
11654 *
11655 * @remarks In ring-0 not all of the state needs to be synced in.
11656 */
11657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11658{
11659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11660 Assert(iCrReg < 16);
11661 Assert(iGReg < 16);
11662
11663 PIEMCPU pIemCpu = &pVCpu->iem.s;
11664 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11665 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11666 iemUninitExec(pIemCpu);
11667 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11668}
11669
11670
11671/**
11672 * Interface for HM and EM to read from a CRx register.
11673 *
11674 * @returns Strict VBox status code.
11675 * @param pVCpu The cross context virtual CPU structure.
11676 * @param cbInstr The instruction length in bytes.
11677 * @param iGReg The general purpose register number (destination).
11678 * @param iCrReg The control register number (source).
11679 *
11680 * @remarks In ring-0 not all of the state needs to be synced in.
11681 */
11682VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11683{
11684 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11685 Assert(iCrReg < 16);
11686 Assert(iGReg < 16);
11687
11688 PIEMCPU pIemCpu = &pVCpu->iem.s;
11689 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11690 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11691 iemUninitExec(pIemCpu);
11692 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11693}
11694
11695
11696/**
11697 * Interface for HM and EM to clear the CR0[TS] bit.
11698 *
11699 * @returns Strict VBox status code.
11700 * @param pVCpu The cross context virtual CPU structure.
11701 * @param cbInstr The instruction length in bytes.
11702 *
11703 * @remarks In ring-0 not all of the state needs to be synced in.
11704 */
11705VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11706{
11707 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11708
11709 PIEMCPU pIemCpu = &pVCpu->iem.s;
11710 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11711 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11712 iemUninitExec(pIemCpu);
11713 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11714}
11715
11716
11717/**
11718 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11719 *
11720 * @returns Strict VBox status code.
11721 * @param pVCpu The cross context virtual CPU structure.
11722 * @param cbInstr The instruction length in bytes.
11723 * @param uValue The value to load into CR0.
11724 *
11725 * @remarks In ring-0 not all of the state needs to be synced in.
11726 */
11727VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11728{
11729 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11730
11731 PIEMCPU pIemCpu = &pVCpu->iem.s;
11732 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11733 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11734 iemUninitExec(pIemCpu);
11735 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11736}
11737
11738
11739/**
11740 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11741 *
11742 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11743 *
11744 * @returns Strict VBox status code.
11745 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11746 * @param cbInstr The instruction length in bytes.
11747 * @remarks In ring-0 not all of the state needs to be synced in.
11748 * @thread EMT(pVCpu)
11749 */
11750VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11751{
11752 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11753
11754 PIEMCPU pIemCpu = &pVCpu->iem.s;
11755 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11756 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11757 iemUninitExec(pIemCpu);
11758 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11759}
11760
11761#ifdef IN_RING3
11762
11763/**
11764 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11765 *
11766 * @returns Merge between @a rcStrict and what the commit operation returned.
11767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11768 * @param rcStrict The status code returned by ring-0 or raw-mode.
11769 */
11770VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11771{
11772 PIEMCPU pIemCpu = &pVCpu->iem.s;
11773
11774 /*
11775 * Retrieve and reset the pending commit.
11776 */
11777 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11778 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11779 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11780
11781 /*
11782 * Must reset pass-up status code.
11783 */
11784 pIemCpu->rcPassUp = VINF_SUCCESS;
11785
11786 /*
11787 * Call the function. Currently using switch here instead of function
11788 * pointer table as a switch won't get skewed.
11789 */
11790 VBOXSTRICTRC rcStrictCommit;
11791 switch (enmFn)
11792 {
11793 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11794 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11795 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11796 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11797 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11798 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11799 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11800 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11801 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11802 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11803 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11804 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11805 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11806 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11807 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11808 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11809 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11810 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11811 default:
11812 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11813 }
11814
11815 /*
11816 * Merge status code (if any) with the incomming one.
11817 */
11818 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11819 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11820 return rcStrict;
11821 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11822 return rcStrictCommit;
11823
11824 /* Complicated. */
11825 if (RT_FAILURE(rcStrict))
11826 return rcStrict;
11827 if (RT_FAILURE(rcStrictCommit))
11828 return rcStrictCommit;
11829 if ( rcStrict >= VINF_EM_FIRST
11830 && rcStrict <= VINF_EM_LAST)
11831 {
11832 if ( rcStrictCommit >= VINF_EM_FIRST
11833 && rcStrictCommit <= VINF_EM_LAST)
11834 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11835
11836 /* This really shouldn't happen. Check PGM + handler code! */
11837 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11838 }
11839 /* This shouldn't really happen either, see IOM_SUCCESS. */
11840 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11841}
11842
11843#endif /* IN_RING */
11844
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette