VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 58170

Last change on this file since 58170 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 442.9 KB
Line 
1/* $Id: IEMAll.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 *
71 */
72
73/** @def IEM_VERIFICATION_MODE_MINIMAL
74 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
75 * context. */
76#if defined(DOXYGEN_RUNNING)
77# define IEM_VERIFICATION_MODE_MINIMAL
78#endif
79//#define IEM_LOG_MEMORY_WRITES
80#define IEM_IMPLEMENTS_TASKSWITCH
81
82
83/*********************************************************************************************************************************
84* Header Files *
85*********************************************************************************************************************************/
86#define LOG_GROUP LOG_GROUP_IEM
87#include <VBox/vmm/iem.h>
88#include <VBox/vmm/cpum.h>
89#include <VBox/vmm/pdm.h>
90#include <VBox/vmm/pgm.h>
91#include <internal/pgm.h>
92#include <VBox/vmm/iom.h>
93#include <VBox/vmm/em.h>
94#include <VBox/vmm/hm.h>
95#include <VBox/vmm/tm.h>
96#include <VBox/vmm/dbgf.h>
97#include <VBox/vmm/dbgftrace.h>
98#ifdef VBOX_WITH_RAW_MODE_NOT_R0
99# include <VBox/vmm/patm.h>
100# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
101# include <VBox/vmm/csam.h>
102# endif
103#endif
104#include "IEMInternal.h"
105#ifdef IEM_VERIFICATION_MODE_FULL
106# include <VBox/vmm/rem.h>
107# include <VBox/vmm/mm.h>
108#endif
109#include <VBox/vmm/vm.h>
110#include <VBox/log.h>
111#include <VBox/err.h>
112#include <VBox/param.h>
113#include <VBox/dis.h>
114#include <VBox/disopcode.h>
115#include <iprt/assert.h>
116#include <iprt/string.h>
117#include <iprt/x86.h>
118
119
120
121/*********************************************************************************************************************************
122* Structures and Typedefs *
123*********************************************************************************************************************************/
124/** @typedef PFNIEMOP
125 * Pointer to an opcode decoder function.
126 */
127
128/** @def FNIEMOP_DEF
129 * Define an opcode decoder function.
130 *
131 * We're using macors for this so that adding and removing parameters as well as
132 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
133 *
134 * @param a_Name The function name.
135 */
136
137
138#if defined(__GNUC__) && defined(RT_ARCH_X86)
139typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
140# define FNIEMOP_DEF(a_Name) \
141 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
142# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
143 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
144# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
145 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
146
147#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
148typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
149# define FNIEMOP_DEF(a_Name) \
150 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
151# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
152 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
153# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
154 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
155
156#elif defined(__GNUC__)
157typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
158# define FNIEMOP_DEF(a_Name) \
159 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
160# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
161 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
162# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
163 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
164
165#else
166typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#endif
175
176
177/**
178 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
179 */
180typedef union IEMSELDESC
181{
182 /** The legacy view. */
183 X86DESC Legacy;
184 /** The long mode view. */
185 X86DESC64 Long;
186} IEMSELDESC;
187/** Pointer to a selector descriptor table entry. */
188typedef IEMSELDESC *PIEMSELDESC;
189
190
191/*********************************************************************************************************************************
192* Defined Constants And Macros *
193*********************************************************************************************************************************/
194/** Temporary hack to disable the double execution. Will be removed in favor
195 * of a dedicated execution mode in EM. */
196//#define IEM_VERIFICATION_MODE_NO_REM
197
198/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
199 * due to GCC lacking knowledge about the value range of a switch. */
200#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
201
202/**
203 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
204 * occation.
205 */
206#ifdef LOG_ENABLED
207# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
208 do { \
209 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
210 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
211 } while (0)
212#else
213# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
214 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
215#endif
216
217/**
218 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
219 * occation using the supplied logger statement.
220 *
221 * @param a_LoggerArgs What to log on failure.
222 */
223#ifdef LOG_ENABLED
224# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
225 do { \
226 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
227 /*LogFunc(a_LoggerArgs);*/ \
228 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
229 } while (0)
230#else
231# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
232 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
233#endif
234
235/**
236 * Call an opcode decoder function.
237 *
238 * We're using macors for this so that adding and removing parameters can be
239 * done as we please. See FNIEMOP_DEF.
240 */
241#define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)
242
243/**
244 * Call a common opcode decoder function taking one extra argument.
245 *
246 * We're using macors for this so that adding and removing parameters can be
247 * done as we please. See FNIEMOP_DEF_1.
248 */
249#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
250
251/**
252 * Call a common opcode decoder function taking one extra argument.
253 *
254 * We're using macors for this so that adding and removing parameters can be
255 * done as we please. See FNIEMOP_DEF_1.
256 */
257#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
258
259/**
260 * Check if we're currently executing in real or virtual 8086 mode.
261 *
262 * @returns @c true if it is, @c false if not.
263 * @param a_pIemCpu The IEM state of the current CPU.
264 */
265#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
266
267/**
268 * Check if we're currently executing in virtual 8086 mode.
269 *
270 * @returns @c true if it is, @c false if not.
271 * @param a_pIemCpu The IEM state of the current CPU.
272 */
273#define IEM_IS_V86_MODE(a_pIemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
274
275/**
276 * Check if we're currently executing in long mode.
277 *
278 * @returns @c true if it is, @c false if not.
279 * @param a_pIemCpu The IEM state of the current CPU.
280 */
281#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
282
283/**
284 * Check if we're currently executing in real mode.
285 *
286 * @returns @c true if it is, @c false if not.
287 * @param a_pIemCpu The IEM state of the current CPU.
288 */
289#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
290
291/**
292 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
293 * @returns PCCPUMFEATURES
294 * @param a_pIemCpu The IEM state of the current CPU.
295 */
296#define IEM_GET_GUEST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))
297
298/**
299 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
300 * @returns PCCPUMFEATURES
301 * @param a_pIemCpu The IEM state of the current CPU.
302 */
303#define IEM_GET_HOST_CPU_FEATURES(a_pIemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))
304
305/**
306 * Evaluates to true if we're presenting an Intel CPU to the guest.
307 */
308#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )
309
310/**
311 * Evaluates to true if we're presenting an AMD CPU to the guest.
312 */
313#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )
314
315/**
316 * Check if the address is canonical.
317 */
318#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
319
320
321/*********************************************************************************************************************************
322* Global Variables *
323*********************************************************************************************************************************/
324extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
325
326
327/** Function table for the ADD instruction. */
328IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
329{
330 iemAImpl_add_u8, iemAImpl_add_u8_locked,
331 iemAImpl_add_u16, iemAImpl_add_u16_locked,
332 iemAImpl_add_u32, iemAImpl_add_u32_locked,
333 iemAImpl_add_u64, iemAImpl_add_u64_locked
334};
335
336/** Function table for the ADC instruction. */
337IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
338{
339 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
340 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
341 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
342 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
343};
344
345/** Function table for the SUB instruction. */
346IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
347{
348 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
349 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
350 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
351 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
352};
353
354/** Function table for the SBB instruction. */
355IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
356{
357 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
358 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
359 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
360 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
361};
362
363/** Function table for the OR instruction. */
364IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
365{
366 iemAImpl_or_u8, iemAImpl_or_u8_locked,
367 iemAImpl_or_u16, iemAImpl_or_u16_locked,
368 iemAImpl_or_u32, iemAImpl_or_u32_locked,
369 iemAImpl_or_u64, iemAImpl_or_u64_locked
370};
371
372/** Function table for the XOR instruction. */
373IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
374{
375 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
376 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
377 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
378 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
379};
380
381/** Function table for the AND instruction. */
382IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
383{
384 iemAImpl_and_u8, iemAImpl_and_u8_locked,
385 iemAImpl_and_u16, iemAImpl_and_u16_locked,
386 iemAImpl_and_u32, iemAImpl_and_u32_locked,
387 iemAImpl_and_u64, iemAImpl_and_u64_locked
388};
389
390/** Function table for the CMP instruction.
391 * @remarks Making operand order ASSUMPTIONS.
392 */
393IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
394{
395 iemAImpl_cmp_u8, NULL,
396 iemAImpl_cmp_u16, NULL,
397 iemAImpl_cmp_u32, NULL,
398 iemAImpl_cmp_u64, NULL
399};
400
401/** Function table for the TEST instruction.
402 * @remarks Making operand order ASSUMPTIONS.
403 */
404IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
405{
406 iemAImpl_test_u8, NULL,
407 iemAImpl_test_u16, NULL,
408 iemAImpl_test_u32, NULL,
409 iemAImpl_test_u64, NULL
410};
411
412/** Function table for the BT instruction. */
413IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
414{
415 NULL, NULL,
416 iemAImpl_bt_u16, NULL,
417 iemAImpl_bt_u32, NULL,
418 iemAImpl_bt_u64, NULL
419};
420
421/** Function table for the BTC instruction. */
422IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
423{
424 NULL, NULL,
425 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
426 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
427 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
428};
429
430/** Function table for the BTR instruction. */
431IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
432{
433 NULL, NULL,
434 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
435 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
436 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
437};
438
439/** Function table for the BTS instruction. */
440IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
441{
442 NULL, NULL,
443 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
444 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
445 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
446};
447
448/** Function table for the BSF instruction. */
449IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
450{
451 NULL, NULL,
452 iemAImpl_bsf_u16, NULL,
453 iemAImpl_bsf_u32, NULL,
454 iemAImpl_bsf_u64, NULL
455};
456
457/** Function table for the BSR instruction. */
458IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
459{
460 NULL, NULL,
461 iemAImpl_bsr_u16, NULL,
462 iemAImpl_bsr_u32, NULL,
463 iemAImpl_bsr_u64, NULL
464};
465
466/** Function table for the IMUL instruction. */
467IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
468{
469 NULL, NULL,
470 iemAImpl_imul_two_u16, NULL,
471 iemAImpl_imul_two_u32, NULL,
472 iemAImpl_imul_two_u64, NULL
473};
474
475/** Group 1 /r lookup table. */
476IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
477{
478 &g_iemAImpl_add,
479 &g_iemAImpl_or,
480 &g_iemAImpl_adc,
481 &g_iemAImpl_sbb,
482 &g_iemAImpl_and,
483 &g_iemAImpl_sub,
484 &g_iemAImpl_xor,
485 &g_iemAImpl_cmp
486};
487
488/** Function table for the INC instruction. */
489IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
490{
491 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
492 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
493 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
494 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
495};
496
497/** Function table for the DEC instruction. */
498IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
499{
500 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
501 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
502 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
503 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
504};
505
506/** Function table for the NEG instruction. */
507IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
508{
509 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
510 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
511 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
512 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
513};
514
515/** Function table for the NOT instruction. */
516IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
517{
518 iemAImpl_not_u8, iemAImpl_not_u8_locked,
519 iemAImpl_not_u16, iemAImpl_not_u16_locked,
520 iemAImpl_not_u32, iemAImpl_not_u32_locked,
521 iemAImpl_not_u64, iemAImpl_not_u64_locked
522};
523
524
525/** Function table for the ROL instruction. */
526IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
527{
528 iemAImpl_rol_u8,
529 iemAImpl_rol_u16,
530 iemAImpl_rol_u32,
531 iemAImpl_rol_u64
532};
533
534/** Function table for the ROR instruction. */
535IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
536{
537 iemAImpl_ror_u8,
538 iemAImpl_ror_u16,
539 iemAImpl_ror_u32,
540 iemAImpl_ror_u64
541};
542
543/** Function table for the RCL instruction. */
544IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
545{
546 iemAImpl_rcl_u8,
547 iemAImpl_rcl_u16,
548 iemAImpl_rcl_u32,
549 iemAImpl_rcl_u64
550};
551
552/** Function table for the RCR instruction. */
553IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
554{
555 iemAImpl_rcr_u8,
556 iemAImpl_rcr_u16,
557 iemAImpl_rcr_u32,
558 iemAImpl_rcr_u64
559};
560
561/** Function table for the SHL instruction. */
562IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
563{
564 iemAImpl_shl_u8,
565 iemAImpl_shl_u16,
566 iemAImpl_shl_u32,
567 iemAImpl_shl_u64
568};
569
570/** Function table for the SHR instruction. */
571IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
572{
573 iemAImpl_shr_u8,
574 iemAImpl_shr_u16,
575 iemAImpl_shr_u32,
576 iemAImpl_shr_u64
577};
578
579/** Function table for the SAR instruction. */
580IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
581{
582 iemAImpl_sar_u8,
583 iemAImpl_sar_u16,
584 iemAImpl_sar_u32,
585 iemAImpl_sar_u64
586};
587
588
589/** Function table for the MUL instruction. */
590IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
591{
592 iemAImpl_mul_u8,
593 iemAImpl_mul_u16,
594 iemAImpl_mul_u32,
595 iemAImpl_mul_u64
596};
597
598/** Function table for the IMUL instruction working implicitly on rAX. */
599IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
600{
601 iemAImpl_imul_u8,
602 iemAImpl_imul_u16,
603 iemAImpl_imul_u32,
604 iemAImpl_imul_u64
605};
606
607/** Function table for the DIV instruction. */
608IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
609{
610 iemAImpl_div_u8,
611 iemAImpl_div_u16,
612 iemAImpl_div_u32,
613 iemAImpl_div_u64
614};
615
616/** Function table for the MUL instruction. */
617IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
618{
619 iemAImpl_idiv_u8,
620 iemAImpl_idiv_u16,
621 iemAImpl_idiv_u32,
622 iemAImpl_idiv_u64
623};
624
625/** Function table for the SHLD instruction */
626IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
627{
628 iemAImpl_shld_u16,
629 iemAImpl_shld_u32,
630 iemAImpl_shld_u64,
631};
632
633/** Function table for the SHRD instruction */
634IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
635{
636 iemAImpl_shrd_u16,
637 iemAImpl_shrd_u32,
638 iemAImpl_shrd_u64,
639};
640
641
642/** Function table for the PUNPCKLBW instruction */
643IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
644/** Function table for the PUNPCKLBD instruction */
645IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
646/** Function table for the PUNPCKLDQ instruction */
647IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
648/** Function table for the PUNPCKLQDQ instruction */
649IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
650
651/** Function table for the PUNPCKHBW instruction */
652IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
653/** Function table for the PUNPCKHBD instruction */
654IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
655/** Function table for the PUNPCKHDQ instruction */
656IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
657/** Function table for the PUNPCKHQDQ instruction */
658IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
659
660/** Function table for the PXOR instruction */
661IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
662/** Function table for the PCMPEQB instruction */
663IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
664/** Function table for the PCMPEQW instruction */
665IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
666/** Function table for the PCMPEQD instruction */
667IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
668
669
670#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
671/** What IEM just wrote. */
672uint8_t g_abIemWrote[256];
673/** How much IEM just wrote. */
674size_t g_cbIemWrote;
675#endif
676
677
678/*********************************************************************************************************************************
679* Internal Functions *
680*********************************************************************************************************************************/
681IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr);
682IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
683IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu);
684IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel);
685/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
686IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
687IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
688IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
689IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
690IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
691IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
692IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
693IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
694IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
695IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
696IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
697IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
698IEM_STATIC VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
699IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
700IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
701IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
702IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
703IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
704IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
705IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
706IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
707IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
708IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
709IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
710IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value);
711IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value);
712IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
713IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
714
715#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
716IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
717#endif
718IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
719IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
720
721
722
723/**
724 * Sets the pass up status.
725 *
726 * @returns VINF_SUCCESS.
727 * @param pIemCpu The per CPU IEM state of the calling thread.
728 * @param rcPassUp The pass up status. Must be informational.
729 * VINF_SUCCESS is not allowed.
730 */
731IEM_STATIC int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
732{
733 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
734
735 int32_t const rcOldPassUp = pIemCpu->rcPassUp;
736 if (rcOldPassUp == VINF_SUCCESS)
737 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
738 /* If both are EM scheduling codes, use EM priority rules. */
739 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
740 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
741 {
742 if (rcPassUp < rcOldPassUp)
743 {
744 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
745 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
746 }
747 else
748 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
749 }
750 /* Override EM scheduling with specific status code. */
751 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
752 {
753 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
754 pIemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
755 }
756 /* Don't override specific status code, first come first served. */
757 else
758 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
759 return VINF_SUCCESS;
760}
761
762
763/**
764 * Initializes the execution state.
765 *
766 * @param pIemCpu The per CPU IEM state.
767 * @param fBypassHandlers Whether to bypass access handlers.
768 */
769DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers)
770{
771 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
772 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
773
774 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
775 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
776
777#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
778 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
781 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
782 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
783 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
785 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
786#endif
787
788#ifdef VBOX_WITH_RAW_MODE_NOT_R0
789 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
790#endif
791 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
792 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
793 ? IEMMODE_64BIT
794 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
795 ? IEMMODE_32BIT
796 : IEMMODE_16BIT;
797 pIemCpu->enmCpuMode = enmMode;
798#ifdef VBOX_STRICT
799 pIemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;
800 pIemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;
801 pIemCpu->enmDefOpSize = (IEMMODE)0xc0fe;
802 pIemCpu->enmEffOpSize = (IEMMODE)0xc0fe;
803 pIemCpu->fPrefixes = (IEMMODE)0xfeedbeef;
804 pIemCpu->uRexReg = 127;
805 pIemCpu->uRexB = 127;
806 pIemCpu->uRexIndex = 127;
807 pIemCpu->iEffSeg = 127;
808 pIemCpu->offOpcode = 127;
809 pIemCpu->cbOpcode = 127;
810#endif
811
812 pIemCpu->cActiveMappings = 0;
813 pIemCpu->iNextMapping = 0;
814 pIemCpu->rcPassUp = VINF_SUCCESS;
815 pIemCpu->fBypassHandlers = fBypassHandlers;
816#ifdef VBOX_WITH_RAW_MODE_NOT_R0
817 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
818 && pCtx->cs.u64Base == 0
819 && pCtx->cs.u32Limit == UINT32_MAX
820 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
821 if (!pIemCpu->fInPatchCode)
822 CPUMRawLeave(pVCpu, VINF_SUCCESS);
823#endif
824}
825
826
827/**
828 * Initializes the decoder state.
829 *
830 * @param pIemCpu The per CPU IEM state.
831 * @param fBypassHandlers Whether to bypass access handlers.
832 */
833DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
834{
835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
836 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
837
838 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
839 Assert(pIemCpu->PendingCommit.enmFn == IEMCOMMIT_INVALID);
840
841#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
843 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
844 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
850#endif
851
852#ifdef VBOX_WITH_RAW_MODE_NOT_R0
853 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
854#endif
855 pIemCpu->uCpl = CPUMGetGuestCPL(pVCpu);
856#ifdef IEM_VERIFICATION_MODE_FULL
857 if (pIemCpu->uInjectCpl != UINT8_MAX)
858 pIemCpu->uCpl = pIemCpu->uInjectCpl;
859#endif
860 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)
861 ? IEMMODE_64BIT
862 : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
863 ? IEMMODE_32BIT
864 : IEMMODE_16BIT;
865 pIemCpu->enmCpuMode = enmMode;
866 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
867 pIemCpu->enmEffAddrMode = enmMode;
868 if (enmMode != IEMMODE_64BIT)
869 {
870 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
871 pIemCpu->enmEffOpSize = enmMode;
872 }
873 else
874 {
875 pIemCpu->enmDefOpSize = IEMMODE_32BIT;
876 pIemCpu->enmEffOpSize = IEMMODE_32BIT;
877 }
878 pIemCpu->fPrefixes = 0;
879 pIemCpu->uRexReg = 0;
880 pIemCpu->uRexB = 0;
881 pIemCpu->uRexIndex = 0;
882 pIemCpu->iEffSeg = X86_SREG_DS;
883 pIemCpu->offOpcode = 0;
884 pIemCpu->cbOpcode = 0;
885 pIemCpu->cActiveMappings = 0;
886 pIemCpu->iNextMapping = 0;
887 pIemCpu->rcPassUp = VINF_SUCCESS;
888 pIemCpu->fBypassHandlers = fBypassHandlers;
889#ifdef VBOX_WITH_RAW_MODE_NOT_R0
890 pIemCpu->fInPatchCode = pIemCpu->uCpl == 0
891 && pCtx->cs.u64Base == 0
892 && pCtx->cs.u32Limit == UINT32_MAX
893 && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
894 if (!pIemCpu->fInPatchCode)
895 CPUMRawLeave(pVCpu, VINF_SUCCESS);
896#endif
897
898#ifdef DBGFTRACE_ENABLED
899 switch (enmMode)
900 {
901 case IEMMODE_64BIT:
902 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip);
903 break;
904 case IEMMODE_32BIT:
905 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
906 break;
907 case IEMMODE_16BIT:
908 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);
909 break;
910 }
911#endif
912}
913
914
915/**
916 * Prefetch opcodes the first time when starting executing.
917 *
918 * @returns Strict VBox status code.
919 * @param pIemCpu The IEM state.
920 * @param fBypassHandlers Whether to bypass access handlers.
921 */
922IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
923{
924#ifdef IEM_VERIFICATION_MODE_FULL
925 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;
926#endif
927 iemInitDecoder(pIemCpu, fBypassHandlers);
928
929 /*
930 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
931 *
932 * First translate CS:rIP to a physical address.
933 */
934 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
935 uint32_t cbToTryRead;
936 RTGCPTR GCPtrPC;
937 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
938 {
939 cbToTryRead = PAGE_SIZE;
940 GCPtrPC = pCtx->rip;
941 if (!IEM_IS_CANONICAL(GCPtrPC))
942 return iemRaiseGeneralProtectionFault0(pIemCpu);
943 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
944 }
945 else
946 {
947 uint32_t GCPtrPC32 = pCtx->eip;
948 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
949 if (GCPtrPC32 > pCtx->cs.u32Limit)
950 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
951 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
952 if (!cbToTryRead) /* overflowed */
953 {
954 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
955 cbToTryRead = UINT32_MAX;
956 }
957 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
958 Assert(GCPtrPC <= UINT32_MAX);
959 }
960
961#ifdef VBOX_WITH_RAW_MODE_NOT_R0
962 /* Allow interpretation of patch manager code blocks since they can for
963 instance throw #PFs for perfectly good reasons. */
964 if (pIemCpu->fInPatchCode)
965 {
966 size_t cbRead = 0;
967 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);
968 AssertRCReturn(rc, rc);
969 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
970 return VINF_SUCCESS;
971 }
972#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
973
974 RTGCPHYS GCPhys;
975 uint64_t fFlags;
976 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
977 if (RT_FAILURE(rc))
978 {
979 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
980 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
981 }
982 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
983 {
984 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
985 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
986 }
987 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
988 {
989 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
990 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
991 }
992 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
993 /** @todo Check reserved bits and such stuff. PGM is better at doing
994 * that, so do it when implementing the guest virtual address
995 * TLB... */
996
997#ifdef IEM_VERIFICATION_MODE_FULL
998 /*
999 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1000 * instruction.
1001 */
1002 /** @todo optimize this differently by not using PGMPhysRead. */
1003 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
1004 pIemCpu->GCPhysOpcodes = GCPhys;
1005 if ( offPrevOpcodes < cbOldOpcodes
1006 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
1007 {
1008 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1009 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
1010 pIemCpu->cbOpcode = cbNew;
1011 return VINF_SUCCESS;
1012 }
1013#endif
1014
1015 /*
1016 * Read the bytes at this address.
1017 */
1018 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1019#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1020 size_t cbActual;
1021 if ( PATMIsEnabled(pVM)
1022 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))
1023 {
1024 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1025 Assert(cbActual > 0);
1026 pIemCpu->cbOpcode = (uint8_t)cbActual;
1027 }
1028 else
1029#endif
1030 {
1031 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1032 if (cbToTryRead > cbLeftOnPage)
1033 cbToTryRead = cbLeftOnPage;
1034 if (cbToTryRead > sizeof(pIemCpu->abOpcode))
1035 cbToTryRead = sizeof(pIemCpu->abOpcode);
1036
1037 if (!pIemCpu->fBypassHandlers)
1038 {
1039 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pIemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1040 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1041 { /* likely */ }
1042 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1043 {
1044 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1045 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1046 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1047 }
1048 else
1049 {
1050 Log((RT_SUCCESS(rcStrict)
1051 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1052 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1053 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1054 return rcStrict;
1055 }
1056 }
1057 else
1058 {
1059 rc = PGMPhysSimpleReadGCPhys(pVM, pIemCpu->abOpcode, GCPhys, cbToTryRead);
1060 if (RT_SUCCESS(rc))
1061 { /* likely */ }
1062 else
1063 {
1064 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1065 GCPtrPC, GCPhys, rc, cbToTryRead));
1066 return rc;
1067 }
1068 }
1069 pIemCpu->cbOpcode = cbToTryRead;
1070 }
1071
1072 return VINF_SUCCESS;
1073}
1074
1075
1076/**
1077 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1078 * exception if it fails.
1079 *
1080 * @returns Strict VBox status code.
1081 * @param pIemCpu The IEM state.
1082 * @param cbMin The minimum number of bytes relative offOpcode
1083 * that must be read.
1084 */
1085IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
1086{
1087 /*
1088 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1089 *
1090 * First translate CS:rIP to a physical address.
1091 */
1092 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1093 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
1094 uint32_t cbToTryRead;
1095 RTGCPTR GCPtrNext;
1096 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1097 {
1098 cbToTryRead = PAGE_SIZE;
1099 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;
1100 if (!IEM_IS_CANONICAL(GCPtrNext))
1101 return iemRaiseGeneralProtectionFault0(pIemCpu);
1102 }
1103 else
1104 {
1105 uint32_t GCPtrNext32 = pCtx->eip;
1106 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
1107 GCPtrNext32 += pIemCpu->cbOpcode;
1108 if (GCPtrNext32 > pCtx->cs.u32Limit)
1109 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1110 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1111 if (!cbToTryRead) /* overflowed */
1112 {
1113 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1114 cbToTryRead = UINT32_MAX;
1115 /** @todo check out wrapping around the code segment. */
1116 }
1117 if (cbToTryRead < cbMin - cbLeft)
1118 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1119 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1120 }
1121
1122 /* Only read up to the end of the page, and make sure we don't read more
1123 than the opcode buffer can hold. */
1124 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1125 if (cbToTryRead > cbLeftOnPage)
1126 cbToTryRead = cbLeftOnPage;
1127 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
1128 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
1129/** @todo r=bird: Convert assertion into undefined opcode exception? */
1130 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1131
1132#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1133 /* Allow interpretation of patch manager code blocks since they can for
1134 instance throw #PFs for perfectly good reasons. */
1135 if (pIemCpu->fInPatchCode)
1136 {
1137 size_t cbRead = 0;
1138 int rc = PATMReadPatchCode(IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);
1139 AssertRCReturn(rc, rc);
1140 pIemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);
1141 return VINF_SUCCESS;
1142 }
1143#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1144
1145 RTGCPHYS GCPhys;
1146 uint64_t fFlags;
1147 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
1148 if (RT_FAILURE(rc))
1149 {
1150 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1151 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1152 }
1153 if (!(fFlags & X86_PTE_US) && pIemCpu->uCpl == 3)
1154 {
1155 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1156 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1157 }
1158 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1159 {
1160 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1161 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1162 }
1163 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1164 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
1165 /** @todo Check reserved bits and such stuff. PGM is better at doing
1166 * that, so do it when implementing the guest virtual address
1167 * TLB... */
1168
1169 /*
1170 * Read the bytes at this address.
1171 *
1172 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1173 * and since PATM should only patch the start of an instruction there
1174 * should be no need to check again here.
1175 */
1176 if (!pIemCpu->fBypassHandlers)
1177 {
1178 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],
1179 cbToTryRead, PGMACCESSORIGIN_IEM);
1180 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1181 { /* likely */ }
1182 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1183 {
1184 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1185 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1186 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1187 }
1188 else
1189 {
1190 Log((RT_SUCCESS(rcStrict)
1191 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1192 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1193 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1194 return rcStrict;
1195 }
1196 }
1197 else
1198 {
1199 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
1200 if (RT_SUCCESS(rc))
1201 { /* likely */ }
1202 else
1203 {
1204 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1205 return rc;
1206 }
1207 }
1208 pIemCpu->cbOpcode += cbToTryRead;
1209 Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
1210
1211 return VINF_SUCCESS;
1212}
1213
1214
1215/**
1216 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1217 *
1218 * @returns Strict VBox status code.
1219 * @param pIemCpu The IEM state.
1220 * @param pb Where to return the opcode byte.
1221 */
1222DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
1223{
1224 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
1225 if (rcStrict == VINF_SUCCESS)
1226 {
1227 uint8_t offOpcode = pIemCpu->offOpcode;
1228 *pb = pIemCpu->abOpcode[offOpcode];
1229 pIemCpu->offOpcode = offOpcode + 1;
1230 }
1231 else
1232 *pb = 0;
1233 return rcStrict;
1234}
1235
1236
1237/**
1238 * Fetches the next opcode byte.
1239 *
1240 * @returns Strict VBox status code.
1241 * @param pIemCpu The IEM state.
1242 * @param pu8 Where to return the opcode byte.
1243 */
1244DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
1245{
1246 uint8_t const offOpcode = pIemCpu->offOpcode;
1247 if (RT_LIKELY(offOpcode < pIemCpu->cbOpcode))
1248 {
1249 *pu8 = pIemCpu->abOpcode[offOpcode];
1250 pIemCpu->offOpcode = offOpcode + 1;
1251 return VINF_SUCCESS;
1252 }
1253 return iemOpcodeGetNextU8Slow(pIemCpu, pu8);
1254}
1255
1256
1257/**
1258 * Fetches the next opcode byte, returns automatically on failure.
1259 *
1260 * @param a_pu8 Where to return the opcode byte.
1261 * @remark Implicitly references pIemCpu.
1262 */
1263#define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1264 do \
1265 { \
1266 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
1267 if (rcStrict2 != VINF_SUCCESS) \
1268 return rcStrict2; \
1269 } while (0)
1270
1271
1272/**
1273 * Fetches the next signed byte from the opcode stream.
1274 *
1275 * @returns Strict VBox status code.
1276 * @param pIemCpu The IEM state.
1277 * @param pi8 Where to return the signed byte.
1278 */
1279DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
1280{
1281 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
1282}
1283
1284
1285/**
1286 * Fetches the next signed byte from the opcode stream, returning automatically
1287 * on failure.
1288 *
1289 * @param a_pi8 Where to return the signed byte.
1290 * @remark Implicitly references pIemCpu.
1291 */
1292#define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1293 do \
1294 { \
1295 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
1296 if (rcStrict2 != VINF_SUCCESS) \
1297 return rcStrict2; \
1298 } while (0)
1299
1300
1301/**
1302 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1303 *
1304 * @returns Strict VBox status code.
1305 * @param pIemCpu The IEM state.
1306 * @param pu16 Where to return the opcode dword.
1307 */
1308DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1309{
1310 uint8_t u8;
1311 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1312 if (rcStrict == VINF_SUCCESS)
1313 *pu16 = (int8_t)u8;
1314 return rcStrict;
1315}
1316
1317
1318/**
1319 * Fetches the next signed byte from the opcode stream, extending it to
1320 * unsigned 16-bit.
1321 *
1322 * @returns Strict VBox status code.
1323 * @param pIemCpu The IEM state.
1324 * @param pu16 Where to return the unsigned word.
1325 */
1326DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
1327{
1328 uint8_t const offOpcode = pIemCpu->offOpcode;
1329 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1330 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);
1331
1332 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];
1333 pIemCpu->offOpcode = offOpcode + 1;
1334 return VINF_SUCCESS;
1335}
1336
1337
1338/**
1339 * Fetches the next signed byte from the opcode stream and sign-extending it to
1340 * a word, returning automatically on failure.
1341 *
1342 * @param a_pu16 Where to return the word.
1343 * @remark Implicitly references pIemCpu.
1344 */
1345#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1346 do \
1347 { \
1348 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
1349 if (rcStrict2 != VINF_SUCCESS) \
1350 return rcStrict2; \
1351 } while (0)
1352
1353
1354/**
1355 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1356 *
1357 * @returns Strict VBox status code.
1358 * @param pIemCpu The IEM state.
1359 * @param pu32 Where to return the opcode dword.
1360 */
1361DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1362{
1363 uint8_t u8;
1364 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1365 if (rcStrict == VINF_SUCCESS)
1366 *pu32 = (int8_t)u8;
1367 return rcStrict;
1368}
1369
1370
1371/**
1372 * Fetches the next signed byte from the opcode stream, extending it to
1373 * unsigned 32-bit.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pIemCpu The IEM state.
1377 * @param pu32 Where to return the unsigned dword.
1378 */
1379DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1380{
1381 uint8_t const offOpcode = pIemCpu->offOpcode;
1382 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1383 return iemOpcodeGetNextS8SxU32Slow(pIemCpu, pu32);
1384
1385 *pu32 = (int8_t)pIemCpu->abOpcode[offOpcode];
1386 pIemCpu->offOpcode = offOpcode + 1;
1387 return VINF_SUCCESS;
1388}
1389
1390
1391/**
1392 * Fetches the next signed byte from the opcode stream and sign-extending it to
1393 * a word, returning automatically on failure.
1394 *
1395 * @param a_pu32 Where to return the word.
1396 * @remark Implicitly references pIemCpu.
1397 */
1398#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
1399 do \
1400 { \
1401 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
1402 if (rcStrict2 != VINF_SUCCESS) \
1403 return rcStrict2; \
1404 } while (0)
1405
1406
1407/**
1408 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1409 *
1410 * @returns Strict VBox status code.
1411 * @param pIemCpu The IEM state.
1412 * @param pu64 Where to return the opcode qword.
1413 */
1414DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1415{
1416 uint8_t u8;
1417 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
1418 if (rcStrict == VINF_SUCCESS)
1419 *pu64 = (int8_t)u8;
1420 return rcStrict;
1421}
1422
1423
1424/**
1425 * Fetches the next signed byte from the opcode stream, extending it to
1426 * unsigned 64-bit.
1427 *
1428 * @returns Strict VBox status code.
1429 * @param pIemCpu The IEM state.
1430 * @param pu64 Where to return the unsigned qword.
1431 */
1432DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1433{
1434 uint8_t const offOpcode = pIemCpu->offOpcode;
1435 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))
1436 return iemOpcodeGetNextS8SxU64Slow(pIemCpu, pu64);
1437
1438 *pu64 = (int8_t)pIemCpu->abOpcode[offOpcode];
1439 pIemCpu->offOpcode = offOpcode + 1;
1440 return VINF_SUCCESS;
1441}
1442
1443
1444/**
1445 * Fetches the next signed byte from the opcode stream and sign-extending it to
1446 * a word, returning automatically on failure.
1447 *
1448 * @param a_pu64 Where to return the word.
1449 * @remark Implicitly references pIemCpu.
1450 */
1451#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
1452 do \
1453 { \
1454 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
1455 if (rcStrict2 != VINF_SUCCESS) \
1456 return rcStrict2; \
1457 } while (0)
1458
1459
1460/**
1461 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1462 *
1463 * @returns Strict VBox status code.
1464 * @param pIemCpu The IEM state.
1465 * @param pu16 Where to return the opcode word.
1466 */
1467DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
1468{
1469 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1470 if (rcStrict == VINF_SUCCESS)
1471 {
1472 uint8_t offOpcode = pIemCpu->offOpcode;
1473 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1474 pIemCpu->offOpcode = offOpcode + 2;
1475 }
1476 else
1477 *pu16 = 0;
1478 return rcStrict;
1479}
1480
1481
1482/**
1483 * Fetches the next opcode word.
1484 *
1485 * @returns Strict VBox status code.
1486 * @param pIemCpu The IEM state.
1487 * @param pu16 Where to return the opcode word.
1488 */
1489DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
1490{
1491 uint8_t const offOpcode = pIemCpu->offOpcode;
1492 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1493 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);
1494
1495 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1496 pIemCpu->offOpcode = offOpcode + 2;
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Fetches the next opcode word, returns automatically on failure.
1503 *
1504 * @param a_pu16 Where to return the opcode word.
1505 * @remark Implicitly references pIemCpu.
1506 */
1507#define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1508 do \
1509 { \
1510 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
1511 if (rcStrict2 != VINF_SUCCESS) \
1512 return rcStrict2; \
1513 } while (0)
1514
1515
1516/**
1517 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1518 *
1519 * @returns Strict VBox status code.
1520 * @param pIemCpu The IEM state.
1521 * @param pu32 Where to return the opcode double word.
1522 */
1523DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1524{
1525 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1526 if (rcStrict == VINF_SUCCESS)
1527 {
1528 uint8_t offOpcode = pIemCpu->offOpcode;
1529 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1530 pIemCpu->offOpcode = offOpcode + 2;
1531 }
1532 else
1533 *pu32 = 0;
1534 return rcStrict;
1535}
1536
1537
1538/**
1539 * Fetches the next opcode word, zero extending it to a double word.
1540 *
1541 * @returns Strict VBox status code.
1542 * @param pIemCpu The IEM state.
1543 * @param pu32 Where to return the opcode double word.
1544 */
1545DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
1546{
1547 uint8_t const offOpcode = pIemCpu->offOpcode;
1548 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1549 return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
1550
1551 *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1552 pIemCpu->offOpcode = offOpcode + 2;
1553 return VINF_SUCCESS;
1554}
1555
1556
1557/**
1558 * Fetches the next opcode word and zero extends it to a double word, returns
1559 * automatically on failure.
1560 *
1561 * @param a_pu32 Where to return the opcode double word.
1562 * @remark Implicitly references pIemCpu.
1563 */
1564#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1565 do \
1566 { \
1567 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
1568 if (rcStrict2 != VINF_SUCCESS) \
1569 return rcStrict2; \
1570 } while (0)
1571
1572
1573/**
1574 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1575 *
1576 * @returns Strict VBox status code.
1577 * @param pIemCpu The IEM state.
1578 * @param pu64 Where to return the opcode quad word.
1579 */
1580DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1581{
1582 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
1583 if (rcStrict == VINF_SUCCESS)
1584 {
1585 uint8_t offOpcode = pIemCpu->offOpcode;
1586 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1587 pIemCpu->offOpcode = offOpcode + 2;
1588 }
1589 else
1590 *pu64 = 0;
1591 return rcStrict;
1592}
1593
1594
1595/**
1596 * Fetches the next opcode word, zero extending it to a quad word.
1597 *
1598 * @returns Strict VBox status code.
1599 * @param pIemCpu The IEM state.
1600 * @param pu64 Where to return the opcode quad word.
1601 */
1602DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1603{
1604 uint8_t const offOpcode = pIemCpu->offOpcode;
1605 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
1606 return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
1607
1608 *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
1609 pIemCpu->offOpcode = offOpcode + 2;
1610 return VINF_SUCCESS;
1611}
1612
1613
1614/**
1615 * Fetches the next opcode word and zero extends it to a quad word, returns
1616 * automatically on failure.
1617 *
1618 * @param a_pu64 Where to return the opcode quad word.
1619 * @remark Implicitly references pIemCpu.
1620 */
1621#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1622 do \
1623 { \
1624 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
1625 if (rcStrict2 != VINF_SUCCESS) \
1626 return rcStrict2; \
1627 } while (0)
1628
1629
1630/**
1631 * Fetches the next signed word from the opcode stream.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pIemCpu The IEM state.
1635 * @param pi16 Where to return the signed word.
1636 */
1637DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
1638{
1639 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
1640}
1641
1642
1643/**
1644 * Fetches the next signed word from the opcode stream, returning automatically
1645 * on failure.
1646 *
1647 * @param a_pi16 Where to return the signed word.
1648 * @remark Implicitly references pIemCpu.
1649 */
1650#define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1651 do \
1652 { \
1653 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
1654 if (rcStrict2 != VINF_SUCCESS) \
1655 return rcStrict2; \
1656 } while (0)
1657
1658
1659/**
1660 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1661 *
1662 * @returns Strict VBox status code.
1663 * @param pIemCpu The IEM state.
1664 * @param pu32 Where to return the opcode dword.
1665 */
1666DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
1667{
1668 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1669 if (rcStrict == VINF_SUCCESS)
1670 {
1671 uint8_t offOpcode = pIemCpu->offOpcode;
1672 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1673 pIemCpu->abOpcode[offOpcode + 1],
1674 pIemCpu->abOpcode[offOpcode + 2],
1675 pIemCpu->abOpcode[offOpcode + 3]);
1676 pIemCpu->offOpcode = offOpcode + 4;
1677 }
1678 else
1679 *pu32 = 0;
1680 return rcStrict;
1681}
1682
1683
1684/**
1685 * Fetches the next opcode dword.
1686 *
1687 * @returns Strict VBox status code.
1688 * @param pIemCpu The IEM state.
1689 * @param pu32 Where to return the opcode double word.
1690 */
1691DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
1692{
1693 uint8_t const offOpcode = pIemCpu->offOpcode;
1694 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1695 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);
1696
1697 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1698 pIemCpu->abOpcode[offOpcode + 1],
1699 pIemCpu->abOpcode[offOpcode + 2],
1700 pIemCpu->abOpcode[offOpcode + 3]);
1701 pIemCpu->offOpcode = offOpcode + 4;
1702 return VINF_SUCCESS;
1703}
1704
1705
1706/**
1707 * Fetches the next opcode dword, returns automatically on failure.
1708 *
1709 * @param a_pu32 Where to return the opcode dword.
1710 * @remark Implicitly references pIemCpu.
1711 */
1712#define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1713 do \
1714 { \
1715 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
1716 if (rcStrict2 != VINF_SUCCESS) \
1717 return rcStrict2; \
1718 } while (0)
1719
1720
1721/**
1722 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1723 *
1724 * @returns Strict VBox status code.
1725 * @param pIemCpu The IEM state.
1726 * @param pu64 Where to return the opcode dword.
1727 */
1728DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1729{
1730 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1731 if (rcStrict == VINF_SUCCESS)
1732 {
1733 uint8_t offOpcode = pIemCpu->offOpcode;
1734 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1735 pIemCpu->abOpcode[offOpcode + 1],
1736 pIemCpu->abOpcode[offOpcode + 2],
1737 pIemCpu->abOpcode[offOpcode + 3]);
1738 pIemCpu->offOpcode = offOpcode + 4;
1739 }
1740 else
1741 *pu64 = 0;
1742 return rcStrict;
1743}
1744
1745
1746/**
1747 * Fetches the next opcode dword, zero extending it to a quad word.
1748 *
1749 * @returns Strict VBox status code.
1750 * @param pIemCpu The IEM state.
1751 * @param pu64 Where to return the opcode quad word.
1752 */
1753DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1754{
1755 uint8_t const offOpcode = pIemCpu->offOpcode;
1756 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1757 return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
1758
1759 *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1760 pIemCpu->abOpcode[offOpcode + 1],
1761 pIemCpu->abOpcode[offOpcode + 2],
1762 pIemCpu->abOpcode[offOpcode + 3]);
1763 pIemCpu->offOpcode = offOpcode + 4;
1764 return VINF_SUCCESS;
1765}
1766
1767
1768/**
1769 * Fetches the next opcode dword and zero extends it to a quad word, returns
1770 * automatically on failure.
1771 *
1772 * @param a_pu64 Where to return the opcode quad word.
1773 * @remark Implicitly references pIemCpu.
1774 */
1775#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1776 do \
1777 { \
1778 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
1779 if (rcStrict2 != VINF_SUCCESS) \
1780 return rcStrict2; \
1781 } while (0)
1782
1783
1784/**
1785 * Fetches the next signed double word from the opcode stream.
1786 *
1787 * @returns Strict VBox status code.
1788 * @param pIemCpu The IEM state.
1789 * @param pi32 Where to return the signed double word.
1790 */
1791DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
1792{
1793 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
1794}
1795
1796/**
1797 * Fetches the next signed double word from the opcode stream, returning
1798 * automatically on failure.
1799 *
1800 * @param a_pi32 Where to return the signed double word.
1801 * @remark Implicitly references pIemCpu.
1802 */
1803#define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1804 do \
1805 { \
1806 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
1807 if (rcStrict2 != VINF_SUCCESS) \
1808 return rcStrict2; \
1809 } while (0)
1810
1811
1812/**
1813 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1814 *
1815 * @returns Strict VBox status code.
1816 * @param pIemCpu The IEM state.
1817 * @param pu64 Where to return the opcode qword.
1818 */
1819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1820{
1821 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
1822 if (rcStrict == VINF_SUCCESS)
1823 {
1824 uint8_t offOpcode = pIemCpu->offOpcode;
1825 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1826 pIemCpu->abOpcode[offOpcode + 1],
1827 pIemCpu->abOpcode[offOpcode + 2],
1828 pIemCpu->abOpcode[offOpcode + 3]);
1829 pIemCpu->offOpcode = offOpcode + 4;
1830 }
1831 else
1832 *pu64 = 0;
1833 return rcStrict;
1834}
1835
1836
1837/**
1838 * Fetches the next opcode dword, sign extending it into a quad word.
1839 *
1840 * @returns Strict VBox status code.
1841 * @param pIemCpu The IEM state.
1842 * @param pu64 Where to return the opcode quad word.
1843 */
1844DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
1845{
1846 uint8_t const offOpcode = pIemCpu->offOpcode;
1847 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
1848 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
1849
1850 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
1851 pIemCpu->abOpcode[offOpcode + 1],
1852 pIemCpu->abOpcode[offOpcode + 2],
1853 pIemCpu->abOpcode[offOpcode + 3]);
1854 *pu64 = i32;
1855 pIemCpu->offOpcode = offOpcode + 4;
1856 return VINF_SUCCESS;
1857}
1858
1859
1860/**
1861 * Fetches the next opcode double word and sign extends it to a quad word,
1862 * returns automatically on failure.
1863 *
1864 * @param a_pu64 Where to return the opcode quad word.
1865 * @remark Implicitly references pIemCpu.
1866 */
1867#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1868 do \
1869 { \
1870 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
1871 if (rcStrict2 != VINF_SUCCESS) \
1872 return rcStrict2; \
1873 } while (0)
1874
1875
1876/**
1877 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1878 *
1879 * @returns Strict VBox status code.
1880 * @param pIemCpu The IEM state.
1881 * @param pu64 Where to return the opcode qword.
1882 */
1883DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
1884{
1885 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
1886 if (rcStrict == VINF_SUCCESS)
1887 {
1888 uint8_t offOpcode = pIemCpu->offOpcode;
1889 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1890 pIemCpu->abOpcode[offOpcode + 1],
1891 pIemCpu->abOpcode[offOpcode + 2],
1892 pIemCpu->abOpcode[offOpcode + 3],
1893 pIemCpu->abOpcode[offOpcode + 4],
1894 pIemCpu->abOpcode[offOpcode + 5],
1895 pIemCpu->abOpcode[offOpcode + 6],
1896 pIemCpu->abOpcode[offOpcode + 7]);
1897 pIemCpu->offOpcode = offOpcode + 8;
1898 }
1899 else
1900 *pu64 = 0;
1901 return rcStrict;
1902}
1903
1904
1905/**
1906 * Fetches the next opcode qword.
1907 *
1908 * @returns Strict VBox status code.
1909 * @param pIemCpu The IEM state.
1910 * @param pu64 Where to return the opcode qword.
1911 */
1912DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
1913{
1914 uint8_t const offOpcode = pIemCpu->offOpcode;
1915 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
1916 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);
1917
1918 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
1919 pIemCpu->abOpcode[offOpcode + 1],
1920 pIemCpu->abOpcode[offOpcode + 2],
1921 pIemCpu->abOpcode[offOpcode + 3],
1922 pIemCpu->abOpcode[offOpcode + 4],
1923 pIemCpu->abOpcode[offOpcode + 5],
1924 pIemCpu->abOpcode[offOpcode + 6],
1925 pIemCpu->abOpcode[offOpcode + 7]);
1926 pIemCpu->offOpcode = offOpcode + 8;
1927 return VINF_SUCCESS;
1928}
1929
1930
1931/**
1932 * Fetches the next opcode quad word, returns automatically on failure.
1933 *
1934 * @param a_pu64 Where to return the opcode quad word.
1935 * @remark Implicitly references pIemCpu.
1936 */
1937#define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1938 do \
1939 { \
1940 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
1941 if (rcStrict2 != VINF_SUCCESS) \
1942 return rcStrict2; \
1943 } while (0)
1944
1945
1946/** @name Misc Worker Functions.
1947 * @{
1948 */
1949
1950
1951/**
1952 * Validates a new SS segment.
1953 *
1954 * @returns VBox strict status code.
1955 * @param pIemCpu The IEM per CPU instance data.
1956 * @param pCtx The CPU context.
1957 * @param NewSS The new SS selctor.
1958 * @param uCpl The CPL to load the stack for.
1959 * @param pDesc Where to return the descriptor.
1960 */
1961IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
1962{
1963 NOREF(pCtx);
1964
1965 /* Null selectors are not allowed (we're not called for dispatching
1966 interrupts with SS=0 in long mode). */
1967 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1968 {
1969 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1970 return iemRaiseTaskSwitchFault0(pIemCpu);
1971 }
1972
1973 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1974 if ((NewSS & X86_SEL_RPL) != uCpl)
1975 {
1976 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1977 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1978 }
1979
1980 /*
1981 * Read the descriptor.
1982 */
1983 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS, X86_XCPT_TS);
1984 if (rcStrict != VINF_SUCCESS)
1985 return rcStrict;
1986
1987 /*
1988 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1989 */
1990 if (!pDesc->Legacy.Gen.u1DescType)
1991 {
1992 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1993 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
1994 }
1995
1996 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1997 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1998 {
1999 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2000 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2001 }
2002 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2003 {
2004 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2005 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, NewSS);
2006 }
2007
2008 /* Is it there? */
2009 /** @todo testcase: Is this checked before the canonical / limit check below? */
2010 if (!pDesc->Legacy.Gen.u1Present)
2011 {
2012 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2013 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
2014 }
2015
2016 return VINF_SUCCESS;
2017}
2018
2019
2020/**
2021 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2022 * not.
2023 *
2024 * @param a_pIemCpu The IEM per CPU data.
2025 * @param a_pCtx The CPU context.
2026 */
2027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2028# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2029 ( IEM_VERIFICATION_ENABLED(a_pIemCpu) \
2030 ? (a_pCtx)->eflags.u \
2031 : CPUMRawGetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu)) )
2032#else
2033# define IEMMISC_GET_EFL(a_pIemCpu, a_pCtx) \
2034 ( (a_pCtx)->eflags.u )
2035#endif
2036
2037/**
2038 * Updates the EFLAGS in the correct manner wrt. PATM.
2039 *
2040 * @param a_pIemCpu The IEM per CPU data.
2041 * @param a_pCtx The CPU context.
2042 * @param a_fEfl The new EFLAGS.
2043 */
2044#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2045# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2046 do { \
2047 if (IEM_VERIFICATION_ENABLED(a_pIemCpu)) \
2048 (a_pCtx)->eflags.u = (a_fEfl); \
2049 else \
2050 CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
2051 } while (0)
2052#else
2053# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
2054 do { \
2055 (a_pCtx)->eflags.u = (a_fEfl); \
2056 } while (0)
2057#endif
2058
2059
2060/** @} */
2061
2062/** @name Raising Exceptions.
2063 *
2064 * @{
2065 */
2066
2067/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
2068 * @{ */
2069/** CPU exception. */
2070#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
2071/** External interrupt (from PIC, APIC, whatever). */
2072#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
2073/** Software interrupt (int or into, not bound).
2074 * Returns to the following instruction */
2075#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
2076/** Takes an error code. */
2077#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
2078/** Takes a CR2. */
2079#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
2080/** Generated by the breakpoint instruction. */
2081#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
2082/** Generated by a DRx instruction breakpoint and RF should be cleared. */
2083#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
2084/** @} */
2085
2086
2087/**
2088 * Loads the specified stack far pointer from the TSS.
2089 *
2090 * @returns VBox strict status code.
2091 * @param pIemCpu The IEM per CPU instance data.
2092 * @param pCtx The CPU context.
2093 * @param uCpl The CPL to load the stack for.
2094 * @param pSelSS Where to return the new stack segment.
2095 * @param puEsp Where to return the new stack pointer.
2096 */
2097IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
2098 PRTSEL pSelSS, uint32_t *puEsp)
2099{
2100 VBOXSTRICTRC rcStrict;
2101 Assert(uCpl < 4);
2102 *puEsp = 0; /* make gcc happy */
2103 *pSelSS = 0; /* make gcc happy */
2104
2105 switch (pCtx->tr.Attr.n.u4Type)
2106 {
2107 /*
2108 * 16-bit TSS (X86TSS16).
2109 */
2110 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
2111 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2112 {
2113 uint32_t off = uCpl * 4 + 2;
2114 if (off + 4 > pCtx->tr.u32Limit)
2115 {
2116 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2117 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2118 }
2119
2120 uint32_t u32Tmp = 0; /* gcc maybe... */
2121 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2122 if (rcStrict == VINF_SUCCESS)
2123 {
2124 *puEsp = RT_LOWORD(u32Tmp);
2125 *pSelSS = RT_HIWORD(u32Tmp);
2126 return VINF_SUCCESS;
2127 }
2128 break;
2129 }
2130
2131 /*
2132 * 32-bit TSS (X86TSS32).
2133 */
2134 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2135 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2136 {
2137 uint32_t off = uCpl * 8 + 4;
2138 if (off + 7 > pCtx->tr.u32Limit)
2139 {
2140 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2141 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2142 }
2143
2144 uint64_t u64Tmp;
2145 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
2146 if (rcStrict == VINF_SUCCESS)
2147 {
2148 *puEsp = u64Tmp & UINT32_MAX;
2149 *pSelSS = (RTSEL)(u64Tmp >> 32);
2150 return VINF_SUCCESS;
2151 }
2152 break;
2153 }
2154
2155 default:
2156 AssertFailedReturn(VERR_IEM_IPE_4);
2157 }
2158 return rcStrict;
2159}
2160
2161
2162/**
2163 * Loads the specified stack pointer from the 64-bit TSS.
2164 *
2165 * @returns VBox strict status code.
2166 * @param pIemCpu The IEM per CPU instance data.
2167 * @param pCtx The CPU context.
2168 * @param uCpl The CPL to load the stack for.
2169 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2170 * @param puRsp Where to return the new stack pointer.
2171 */
2172IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
2173{
2174 Assert(uCpl < 4);
2175 Assert(uIst < 8);
2176 *puRsp = 0; /* make gcc happy */
2177
2178 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2179
2180 uint32_t off;
2181 if (uIst)
2182 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
2183 else
2184 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
2185 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
2186 {
2187 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
2188 return iemRaiseTaskSwitchFaultCurrentTSS(pIemCpu);
2189 }
2190
2191 return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
2192}
2193
2194
2195/**
2196 * Adjust the CPU state according to the exception being raised.
2197 *
2198 * @param pCtx The CPU context.
2199 * @param u8Vector The exception that has been raised.
2200 */
2201DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
2202{
2203 switch (u8Vector)
2204 {
2205 case X86_XCPT_DB:
2206 pCtx->dr[7] &= ~X86_DR7_GD;
2207 break;
2208 /** @todo Read the AMD and Intel exception reference... */
2209 }
2210}
2211
2212
2213/**
2214 * Implements exceptions and interrupts for real mode.
2215 *
2216 * @returns VBox strict status code.
2217 * @param pIemCpu The IEM per CPU instance data.
2218 * @param pCtx The CPU context.
2219 * @param cbInstr The number of bytes to offset rIP by in the return
2220 * address.
2221 * @param u8Vector The interrupt / exception vector number.
2222 * @param fFlags The flags.
2223 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2224 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2225 */
2226IEM_STATIC VBOXSTRICTRC
2227iemRaiseXcptOrIntInRealMode(PIEMCPU pIemCpu,
2228 PCPUMCTX pCtx,
2229 uint8_t cbInstr,
2230 uint8_t u8Vector,
2231 uint32_t fFlags,
2232 uint16_t uErr,
2233 uint64_t uCr2)
2234{
2235 AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
2236 NOREF(uErr); NOREF(uCr2);
2237
2238 /*
2239 * Read the IDT entry.
2240 */
2241 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2242 {
2243 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
2244 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2245 }
2246 RTFAR16 Idte;
2247 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
2248 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
2249 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2250 return rcStrict;
2251
2252 /*
2253 * Push the stack frame.
2254 */
2255 uint16_t *pu16Frame;
2256 uint64_t uNewRsp;
2257 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2258 if (rcStrict != VINF_SUCCESS)
2259 return rcStrict;
2260
2261 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
2262 pu16Frame[2] = (uint16_t)fEfl;
2263 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
2264 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2265 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2266 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2267 return rcStrict;
2268
2269 /*
2270 * Load the vector address into cs:ip and make exception specific state
2271 * adjustments.
2272 */
2273 pCtx->cs.Sel = Idte.sel;
2274 pCtx->cs.ValidSel = Idte.sel;
2275 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2276 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
2277 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2278 pCtx->rip = Idte.off;
2279 fEfl &= ~X86_EFL_IF;
2280 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
2281
2282 /** @todo do we actually do this in real mode? */
2283 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2284 iemRaiseXcptAdjustState(pCtx, u8Vector);
2285
2286 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2287}
2288
2289
2290/**
2291 * Loads a NULL data selector into when coming from V8086 mode.
2292 *
2293 * @param pIemCpu The IEM per CPU instance data.
2294 * @param pSReg Pointer to the segment register.
2295 */
2296IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PIEMCPU pIemCpu, PCPUMSELREG pSReg)
2297{
2298 pSReg->Sel = 0;
2299 pSReg->ValidSel = 0;
2300 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2301 {
2302 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2303 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2304 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2305 }
2306 else
2307 {
2308 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2309 /** @todo check this on AMD-V */
2310 pSReg->u64Base = 0;
2311 pSReg->u32Limit = 0;
2312 }
2313}
2314
2315
2316/**
2317 * Loads a segment selector during a task switch in V8086 mode.
2318 *
2319 * @param pIemCpu The IEM per CPU instance data.
2320 * @param pSReg Pointer to the segment register.
2321 * @param uSel The selector value to load.
2322 */
2323IEM_STATIC void iemHlpLoadSelectorInV86Mode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2324{
2325 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2326 pSReg->Sel = uSel;
2327 pSReg->ValidSel = uSel;
2328 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2329 pSReg->u64Base = uSel << 4;
2330 pSReg->u32Limit = 0xffff;
2331 pSReg->Attr.u = 0xf3;
2332}
2333
2334
2335/**
2336 * Loads a NULL data selector into a selector register, both the hidden and
2337 * visible parts, in protected mode.
2338 *
2339 * @param pIemCpu The IEM state of the calling EMT.
2340 * @param pSReg Pointer to the segment register.
2341 * @param uRpl The RPL.
2342 */
2343IEM_STATIC void iemHlpLoadNullDataSelectorProt(PIEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)
2344{
2345 /** @todo Testcase: write a testcase checking what happends when loading a NULL
2346 * data selector in protected mode. */
2347 pSReg->Sel = uRpl;
2348 pSReg->ValidSel = uRpl;
2349 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2350 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2351 {
2352 /* VT-x (Intel 3960x) observed doing something like this. */
2353 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT);
2354 pSReg->u32Limit = UINT32_MAX;
2355 pSReg->u64Base = 0;
2356 }
2357 else
2358 {
2359 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
2360 pSReg->u32Limit = 0;
2361 pSReg->u64Base = 0;
2362 }
2363}
2364
2365
2366/**
2367 * Loads a segment selector during a task switch in protected mode.
2368 *
2369 * In this task switch scenario, we would throw \#TS exceptions rather than
2370 * \#GPs.
2371 *
2372 * @returns VBox strict status code.
2373 * @param pIemCpu The IEM per CPU instance data.
2374 * @param pSReg Pointer to the segment register.
2375 * @param uSel The new selector value.
2376 *
2377 * @remarks This does _not_ handle CS or SS.
2378 * @remarks This expects pIemCpu->uCpl to be up to date.
2379 */
2380IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PIEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)
2381{
2382 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2383
2384 /* Null data selector. */
2385 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2386 {
2387 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, uSel);
2388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2389 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2390 return VINF_SUCCESS;
2391 }
2392
2393 /* Fetch the descriptor. */
2394 IEMSELDESC Desc;
2395 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_TS);
2396 if (rcStrict != VINF_SUCCESS)
2397 {
2398 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2399 VBOXSTRICTRC_VAL(rcStrict)));
2400 return rcStrict;
2401 }
2402
2403 /* Must be a data segment or readable code segment. */
2404 if ( !Desc.Legacy.Gen.u1DescType
2405 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2406 {
2407 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2408 Desc.Legacy.Gen.u4Type));
2409 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2410 }
2411
2412 /* Check privileges for data segments and non-conforming code segments. */
2413 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2414 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2415 {
2416 /* The RPL and the new CPL must be less than or equal to the DPL. */
2417 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2418 || (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))
2419 {
2420 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2421 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2422 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2423 }
2424 }
2425
2426 /* Is it there? */
2427 if (!Desc.Legacy.Gen.u1Present)
2428 {
2429 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2430 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
2431 }
2432
2433 /* The base and limit. */
2434 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2435 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2436
2437 /*
2438 * Ok, everything checked out fine. Now set the accessed bit before
2439 * committing the result into the registers.
2440 */
2441 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2442 {
2443 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2444 if (rcStrict != VINF_SUCCESS)
2445 return rcStrict;
2446 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2447 }
2448
2449 /* Commit */
2450 pSReg->Sel = uSel;
2451 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2452 pSReg->u32Limit = cbLimit;
2453 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2454 pSReg->ValidSel = uSel;
2455 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2456 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2457 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2458
2459 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
2460 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2461 return VINF_SUCCESS;
2462}
2463
2464
2465/**
2466 * Performs a task switch.
2467 *
2468 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2469 * caller is responsible for performing the necessary checks (like DPL, TSS
2470 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2471 * reference for JMP, CALL, IRET.
2472 *
2473 * If the task switch is the due to a software interrupt or hardware exception,
2474 * the caller is responsible for validating the TSS selector and descriptor. See
2475 * Intel Instruction reference for INT n.
2476 *
2477 * @returns VBox strict status code.
2478 * @param pIemCpu The IEM per CPU instance data.
2479 * @param pCtx The CPU context.
2480 * @param enmTaskSwitch What caused this task switch.
2481 * @param uNextEip The EIP effective after the task switch.
2482 * @param fFlags The flags.
2483 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2484 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2485 * @param SelTSS The TSS selector of the new task.
2486 * @param pNewDescTSS Pointer to the new TSS descriptor.
2487 */
2488IEM_STATIC VBOXSTRICTRC
2489iemTaskSwitch(PIEMCPU pIemCpu,
2490 PCPUMCTX pCtx,
2491 IEMTASKSWITCH enmTaskSwitch,
2492 uint32_t uNextEip,
2493 uint32_t fFlags,
2494 uint16_t uErr,
2495 uint64_t uCr2,
2496 RTSEL SelTSS,
2497 PIEMSELDESC pNewDescTSS)
2498{
2499 Assert(!IEM_IS_REAL_MODE(pIemCpu));
2500 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
2501
2502 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2503 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2504 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2505 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2506 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2507
2508 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2509 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2510
2511 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RGv uNextEip=%#RGv\n", enmTaskSwitch, SelTSS,
2512 fIsNewTSS386, pCtx->eip, uNextEip));
2513
2514 /* Update CR2 in case it's a page-fault. */
2515 /** @todo This should probably be done much earlier in IEM/PGM. See
2516 * @bugref{5653#c49}. */
2517 if (fFlags & IEM_XCPT_FLAGS_CR2)
2518 pCtx->cr2 = uCr2;
2519
2520 /*
2521 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2522 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2523 */
2524 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2525 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2526 if (uNewTSSLimit < uNewTSSLimitMin)
2527 {
2528 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2529 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2530 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2531 }
2532
2533 /*
2534 * Check the current TSS limit. The last written byte to the current TSS during the
2535 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2536 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2537 *
2538 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2539 * end up with smaller than "legal" TSS limits.
2540 */
2541 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
2542 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2543 if (uCurTSSLimit < uCurTSSLimitMin)
2544 {
2545 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2546 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2547 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2548 }
2549
2550 /*
2551 * Verify that the new TSS can be accessed and map it. Map only the required contents
2552 * and not the entire TSS.
2553 */
2554 void *pvNewTSS;
2555 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
2556 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2557 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, IntRedirBitmap) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2558 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2559 * not perform correct translation if this happens. See Intel spec. 7.2.1
2560 * "Task-State Segment" */
2561 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2562 if (rcStrict != VINF_SUCCESS)
2563 {
2564 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2565 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2566 return rcStrict;
2567 }
2568
2569 /*
2570 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2571 */
2572 uint32_t u32EFlags = pCtx->eflags.u32;
2573 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2574 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2575 {
2576 PX86DESC pDescCurTSS;
2577 rcStrict = iemMemMap(pIemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2578 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2579 if (rcStrict != VINF_SUCCESS)
2580 {
2581 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2582 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2583 return rcStrict;
2584 }
2585
2586 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2587 rcStrict = iemMemCommitAndUnmap(pIemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2588 if (rcStrict != VINF_SUCCESS)
2589 {
2590 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2591 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2592 return rcStrict;
2593 }
2594
2595 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2596 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2597 {
2598 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2599 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2600 u32EFlags &= ~X86_EFL_NT;
2601 }
2602 }
2603
2604 /*
2605 * Save the CPU state into the current TSS.
2606 */
2607 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
2608 if (GCPtrNewTSS == GCPtrCurTSS)
2609 {
2610 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2611 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2612 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
2613 }
2614 if (fIsNewTSS386)
2615 {
2616 /*
2617 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2618 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2619 */
2620 void *pvCurTSS32;
2621 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
2622 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
2623 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2624 rcStrict = iemMemMap(pIemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2625 if (rcStrict != VINF_SUCCESS)
2626 {
2627 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2628 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2629 return rcStrict;
2630 }
2631
2632 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2633 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2634 pCurTSS32->eip = uNextEip;
2635 pCurTSS32->eflags = u32EFlags;
2636 pCurTSS32->eax = pCtx->eax;
2637 pCurTSS32->ecx = pCtx->ecx;
2638 pCurTSS32->edx = pCtx->edx;
2639 pCurTSS32->ebx = pCtx->ebx;
2640 pCurTSS32->esp = pCtx->esp;
2641 pCurTSS32->ebp = pCtx->ebp;
2642 pCurTSS32->esi = pCtx->esi;
2643 pCurTSS32->edi = pCtx->edi;
2644 pCurTSS32->es = pCtx->es.Sel;
2645 pCurTSS32->cs = pCtx->cs.Sel;
2646 pCurTSS32->ss = pCtx->ss.Sel;
2647 pCurTSS32->ds = pCtx->ds.Sel;
2648 pCurTSS32->fs = pCtx->fs.Sel;
2649 pCurTSS32->gs = pCtx->gs.Sel;
2650
2651 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2652 if (rcStrict != VINF_SUCCESS)
2653 {
2654 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2655 VBOXSTRICTRC_VAL(rcStrict)));
2656 return rcStrict;
2657 }
2658 }
2659 else
2660 {
2661 /*
2662 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2663 */
2664 void *pvCurTSS16;
2665 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
2666 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
2667 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2668 rcStrict = iemMemMap(pIemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2669 if (rcStrict != VINF_SUCCESS)
2670 {
2671 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2672 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2673 return rcStrict;
2674 }
2675
2676 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2677 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2678 pCurTSS16->ip = uNextEip;
2679 pCurTSS16->flags = u32EFlags;
2680 pCurTSS16->ax = pCtx->ax;
2681 pCurTSS16->cx = pCtx->cx;
2682 pCurTSS16->dx = pCtx->dx;
2683 pCurTSS16->bx = pCtx->bx;
2684 pCurTSS16->sp = pCtx->sp;
2685 pCurTSS16->bp = pCtx->bp;
2686 pCurTSS16->si = pCtx->si;
2687 pCurTSS16->di = pCtx->di;
2688 pCurTSS16->es = pCtx->es.Sel;
2689 pCurTSS16->cs = pCtx->cs.Sel;
2690 pCurTSS16->ss = pCtx->ss.Sel;
2691 pCurTSS16->ds = pCtx->ds.Sel;
2692
2693 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2694 if (rcStrict != VINF_SUCCESS)
2695 {
2696 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2697 VBOXSTRICTRC_VAL(rcStrict)));
2698 return rcStrict;
2699 }
2700 }
2701
2702 /*
2703 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2704 */
2705 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2706 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2707 {
2708 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2709 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2710 pNewTSS->selPrev = pCtx->tr.Sel;
2711 }
2712
2713 /*
2714 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2715 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2716 */
2717 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2718 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2719 bool fNewDebugTrap;
2720 if (fIsNewTSS386)
2721 {
2722 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
2723 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2724 uNewEip = pNewTSS32->eip;
2725 uNewEflags = pNewTSS32->eflags;
2726 uNewEax = pNewTSS32->eax;
2727 uNewEcx = pNewTSS32->ecx;
2728 uNewEdx = pNewTSS32->edx;
2729 uNewEbx = pNewTSS32->ebx;
2730 uNewEsp = pNewTSS32->esp;
2731 uNewEbp = pNewTSS32->ebp;
2732 uNewEsi = pNewTSS32->esi;
2733 uNewEdi = pNewTSS32->edi;
2734 uNewES = pNewTSS32->es;
2735 uNewCS = pNewTSS32->cs;
2736 uNewSS = pNewTSS32->ss;
2737 uNewDS = pNewTSS32->ds;
2738 uNewFS = pNewTSS32->fs;
2739 uNewGS = pNewTSS32->gs;
2740 uNewLdt = pNewTSS32->selLdt;
2741 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2742 }
2743 else
2744 {
2745 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
2746 uNewCr3 = 0;
2747 uNewEip = pNewTSS16->ip;
2748 uNewEflags = pNewTSS16->flags;
2749 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2750 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2751 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2752 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2753 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2754 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2755 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2756 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2757 uNewES = pNewTSS16->es;
2758 uNewCS = pNewTSS16->cs;
2759 uNewSS = pNewTSS16->ss;
2760 uNewDS = pNewTSS16->ds;
2761 uNewFS = 0;
2762 uNewGS = 0;
2763 uNewLdt = pNewTSS16->selLdt;
2764 fNewDebugTrap = false;
2765 }
2766
2767 if (GCPtrNewTSS == GCPtrCurTSS)
2768 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2769 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2770
2771 /*
2772 * We're done accessing the new TSS.
2773 */
2774 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2775 if (rcStrict != VINF_SUCCESS)
2776 {
2777 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2778 return rcStrict;
2779 }
2780
2781 /*
2782 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2783 */
2784 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2785 {
2786 rcStrict = iemMemMap(pIemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2787 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2788 if (rcStrict != VINF_SUCCESS)
2789 {
2790 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2791 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2792 return rcStrict;
2793 }
2794
2795 /* Check that the descriptor indicates the new TSS is available (not busy). */
2796 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2797 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2798 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2799
2800 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2801 rcStrict = iemMemCommitAndUnmap(pIemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2802 if (rcStrict != VINF_SUCCESS)
2803 {
2804 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2805 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2806 return rcStrict;
2807 }
2808 }
2809
2810 /*
2811 * From this point on, we're technically in the new task. We will defer exceptions
2812 * until the completion of the task switch but before executing any instructions in the new task.
2813 */
2814 pCtx->tr.Sel = SelTSS;
2815 pCtx->tr.ValidSel = SelTSS;
2816 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2817 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2818 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2819 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2820 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);
2821
2822 /* Set the busy bit in TR. */
2823 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2824 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2825 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2826 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2827 {
2828 uNewEflags |= X86_EFL_NT;
2829 }
2830
2831 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2832 pCtx->cr0 |= X86_CR0_TS;
2833 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);
2834
2835 pCtx->eip = uNewEip;
2836 pCtx->eax = uNewEax;
2837 pCtx->ecx = uNewEcx;
2838 pCtx->edx = uNewEdx;
2839 pCtx->ebx = uNewEbx;
2840 pCtx->esp = uNewEsp;
2841 pCtx->ebp = uNewEbp;
2842 pCtx->esi = uNewEsi;
2843 pCtx->edi = uNewEdi;
2844
2845 uNewEflags &= X86_EFL_LIVE_MASK;
2846 uNewEflags |= X86_EFL_RA1_MASK;
2847 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewEflags);
2848
2849 /*
2850 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2851 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2852 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2853 */
2854 pCtx->es.Sel = uNewES;
2855 pCtx->es.fFlags = CPUMSELREG_FLAGS_STALE;
2856 pCtx->es.Attr.u &= ~X86DESCATTR_P;
2857
2858 pCtx->cs.Sel = uNewCS;
2859 pCtx->cs.fFlags = CPUMSELREG_FLAGS_STALE;
2860 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
2861
2862 pCtx->ss.Sel = uNewSS;
2863 pCtx->ss.fFlags = CPUMSELREG_FLAGS_STALE;
2864 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
2865
2866 pCtx->ds.Sel = uNewDS;
2867 pCtx->ds.fFlags = CPUMSELREG_FLAGS_STALE;
2868 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
2869
2870 pCtx->fs.Sel = uNewFS;
2871 pCtx->fs.fFlags = CPUMSELREG_FLAGS_STALE;
2872 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
2873
2874 pCtx->gs.Sel = uNewGS;
2875 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE;
2876 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
2877 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2878
2879 pCtx->ldtr.Sel = uNewLdt;
2880 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2881 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
2882 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);
2883
2884 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2885 {
2886 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
2887 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
2888 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
2889 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
2890 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
2891 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
2892 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2893 }
2894
2895 /*
2896 * Switch CR3 for the new task.
2897 */
2898 if ( fIsNewTSS386
2899 && (pCtx->cr0 & X86_CR0_PG))
2900 {
2901 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2902 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2903 {
2904 int rc = CPUMSetGuestCR3(IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);
2905 AssertRCSuccessReturn(rc, rc);
2906 }
2907 else
2908 pCtx->cr3 = uNewCr3;
2909
2910 /* Inform PGM. */
2911 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2912 {
2913 int rc = PGMFlushTLB(IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
2914 AssertRCReturn(rc, rc);
2915 /* ignore informational status codes */
2916 }
2917 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);
2918 }
2919
2920 /*
2921 * Switch LDTR for the new task.
2922 */
2923 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2924 iemHlpLoadNullDataSelectorProt(pIemCpu, &pCtx->ldtr, uNewLdt);
2925 else
2926 {
2927 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2928
2929 IEMSELDESC DescNewLdt;
2930 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2931 if (rcStrict != VINF_SUCCESS)
2932 {
2933 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2934 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2935 return rcStrict;
2936 }
2937 if ( !DescNewLdt.Legacy.Gen.u1Present
2938 || DescNewLdt.Legacy.Gen.u1DescType
2939 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2940 {
2941 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2942 uNewLdt, DescNewLdt.Legacy.u));
2943 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2944 }
2945
2946 pCtx->ldtr.ValidSel = uNewLdt;
2947 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2948 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2949 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2950 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2951 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
2952 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2953 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));
2954 }
2955
2956 IEMSELDESC DescSS;
2957 if (IEM_IS_V86_MODE(pIemCpu))
2958 {
2959 pIemCpu->uCpl = 3;
2960 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->es, uNewES);
2961 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->cs, uNewCS);
2962 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ss, uNewSS);
2963 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->ds, uNewDS);
2964 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->fs, uNewFS);
2965 iemHlpLoadSelectorInV86Mode(pIemCpu, &pCtx->gs, uNewGS);
2966 }
2967 else
2968 {
2969 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
2970
2971 /*
2972 * Load the stack segment for the new task.
2973 */
2974 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2975 {
2976 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2977 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2978 }
2979
2980 /* Fetch the descriptor. */
2981 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_TS);
2982 if (rcStrict != VINF_SUCCESS)
2983 {
2984 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2985 VBOXSTRICTRC_VAL(rcStrict)));
2986 return rcStrict;
2987 }
2988
2989 /* SS must be a data segment and writable. */
2990 if ( !DescSS.Legacy.Gen.u1DescType
2991 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2992 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2993 {
2994 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2995 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2996 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2997 }
2998
2999 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3000 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3001 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3002 {
3003 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3004 uNewCpl));
3005 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3006 }
3007
3008 /* Is it there? */
3009 if (!DescSS.Legacy.Gen.u1Present)
3010 {
3011 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3012 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3013 }
3014
3015 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3016 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3017
3018 /* Set the accessed bit before committing the result into SS. */
3019 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3020 {
3021 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3022 if (rcStrict != VINF_SUCCESS)
3023 return rcStrict;
3024 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3025 }
3026
3027 /* Commit SS. */
3028 pCtx->ss.Sel = uNewSS;
3029 pCtx->ss.ValidSel = uNewSS;
3030 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3031 pCtx->ss.u32Limit = cbLimit;
3032 pCtx->ss.u64Base = u64Base;
3033 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
3035
3036 /* CPL has changed, update IEM before loading rest of segments. */
3037 pIemCpu->uCpl = uNewCpl;
3038
3039 /*
3040 * Load the data segments for the new task.
3041 */
3042 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->es, uNewES);
3043 if (rcStrict != VINF_SUCCESS)
3044 return rcStrict;
3045 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->ds, uNewDS);
3046 if (rcStrict != VINF_SUCCESS)
3047 return rcStrict;
3048 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->fs, uNewFS);
3049 if (rcStrict != VINF_SUCCESS)
3050 return rcStrict;
3051 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pIemCpu, &pCtx->gs, uNewGS);
3052 if (rcStrict != VINF_SUCCESS)
3053 return rcStrict;
3054
3055 /*
3056 * Load the code segment for the new task.
3057 */
3058 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3059 {
3060 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3061 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3062 }
3063
3064 /* Fetch the descriptor. */
3065 IEMSELDESC DescCS;
3066 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_TS);
3067 if (rcStrict != VINF_SUCCESS)
3068 {
3069 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3070 return rcStrict;
3071 }
3072
3073 /* CS must be a code segment. */
3074 if ( !DescCS.Legacy.Gen.u1DescType
3075 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3076 {
3077 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3078 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3079 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3080 }
3081
3082 /* For conforming CS, DPL must be less than or equal to the RPL. */
3083 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3084 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3085 {
3086 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3087 DescCS.Legacy.Gen.u2Dpl));
3088 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3089 }
3090
3091 /* For non-conforming CS, DPL must match RPL. */
3092 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3093 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3094 {
3095 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3096 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3097 return iemRaiseTaskSwitchFaultWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3098 }
3099
3100 /* Is it there? */
3101 if (!DescCS.Legacy.Gen.u1Present)
3102 {
3103 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3104 return iemRaiseSelectorNotPresentWithErr(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3105 }
3106
3107 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3108 u64Base = X86DESC_BASE(&DescCS.Legacy);
3109
3110 /* Set the accessed bit before committing the result into CS. */
3111 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3112 {
3113 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
3114 if (rcStrict != VINF_SUCCESS)
3115 return rcStrict;
3116 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3117 }
3118
3119 /* Commit CS. */
3120 pCtx->cs.Sel = uNewCS;
3121 pCtx->cs.ValidSel = uNewCS;
3122 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3123 pCtx->cs.u32Limit = cbLimit;
3124 pCtx->cs.u64Base = u64Base;
3125 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));
3127 }
3128
3129 /** @todo Debug trap. */
3130 if (fIsNewTSS386 && fNewDebugTrap)
3131 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3132
3133 /*
3134 * Construct the error code masks based on what caused this task switch.
3135 * See Intel Instruction reference for INT.
3136 */
3137 uint16_t uExt;
3138 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3139 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
3140 {
3141 uExt = 1;
3142 }
3143 else
3144 uExt = 0;
3145
3146 /*
3147 * Push any error code on to the new stack.
3148 */
3149 if (fFlags & IEM_XCPT_FLAGS_ERR)
3150 {
3151 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3152 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3153 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3154
3155 /* Check that there is sufficient space on the stack. */
3156 /** @todo Factor out segment limit checking for normal/expand down segments
3157 * into a separate function. */
3158 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3159 {
3160 if ( pCtx->esp - 1 > cbLimitSS
3161 || pCtx->esp < cbStackFrame)
3162 {
3163 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3164 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3165 cbStackFrame));
3166 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3167 }
3168 }
3169 else
3170 {
3171 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3172 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3173 {
3174 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
3175 cbStackFrame));
3176 return iemRaiseStackSelectorNotPresentWithErr(pIemCpu, uExt);
3177 }
3178 }
3179
3180
3181 if (fIsNewTSS386)
3182 rcStrict = iemMemStackPushU32(pIemCpu, uErr);
3183 else
3184 rcStrict = iemMemStackPushU16(pIemCpu, uErr);
3185 if (rcStrict != VINF_SUCCESS)
3186 {
3187 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
3188 VBOXSTRICTRC_VAL(rcStrict)));
3189 return rcStrict;
3190 }
3191 }
3192
3193 /* Check the new EIP against the new CS limit. */
3194 if (pCtx->eip > pCtx->cs.u32Limit)
3195 {
3196 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RGv CS limit=%u -> #GP(0)\n",
3197 pCtx->eip, pCtx->cs.u32Limit));
3198 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3199 return iemRaiseGeneralProtectionFault(pIemCpu, uExt);
3200 }
3201
3202 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
3203 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3204}
3205
3206
3207/**
3208 * Implements exceptions and interrupts for protected mode.
3209 *
3210 * @returns VBox strict status code.
3211 * @param pIemCpu The IEM per CPU instance data.
3212 * @param pCtx The CPU context.
3213 * @param cbInstr The number of bytes to offset rIP by in the return
3214 * address.
3215 * @param u8Vector The interrupt / exception vector number.
3216 * @param fFlags The flags.
3217 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3218 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3219 */
3220IEM_STATIC VBOXSTRICTRC
3221iemRaiseXcptOrIntInProtMode(PIEMCPU pIemCpu,
3222 PCPUMCTX pCtx,
3223 uint8_t cbInstr,
3224 uint8_t u8Vector,
3225 uint32_t fFlags,
3226 uint16_t uErr,
3227 uint64_t uCr2)
3228{
3229 /*
3230 * Read the IDT entry.
3231 */
3232 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3233 {
3234 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3235 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3236 }
3237 X86DESC Idte;
3238 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
3239 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
3240 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3241 return rcStrict;
3242 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3243 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3244 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3245
3246 /*
3247 * Check the descriptor type, DPL and such.
3248 * ASSUMES this is done in the same order as described for call-gate calls.
3249 */
3250 if (Idte.Gate.u1DescType)
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3253 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3254 }
3255 bool fTaskGate = false;
3256 uint8_t f32BitGate = true;
3257 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3258 switch (Idte.Gate.u4Type)
3259 {
3260 case X86_SEL_TYPE_SYS_UNDEFINED:
3261 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3262 case X86_SEL_TYPE_SYS_LDT:
3263 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3264 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3265 case X86_SEL_TYPE_SYS_UNDEFINED2:
3266 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3267 case X86_SEL_TYPE_SYS_UNDEFINED3:
3268 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3269 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3270 case X86_SEL_TYPE_SYS_UNDEFINED4:
3271 {
3272 /** @todo check what actually happens when the type is wrong...
3273 * esp. call gates. */
3274 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3275 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3276 }
3277
3278 case X86_SEL_TYPE_SYS_286_INT_GATE:
3279 f32BitGate = false;
3280 case X86_SEL_TYPE_SYS_386_INT_GATE:
3281 fEflToClear |= X86_EFL_IF;
3282 break;
3283
3284 case X86_SEL_TYPE_SYS_TASK_GATE:
3285 fTaskGate = true;
3286#ifndef IEM_IMPLEMENTS_TASKSWITCH
3287 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3288#endif
3289 break;
3290
3291 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3292 f32BitGate = false;
3293 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3294 break;
3295
3296 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3297 }
3298
3299 /* Check DPL against CPL if applicable. */
3300 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3301 {
3302 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3303 {
3304 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3305 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3306 }
3307 }
3308
3309 /* Is it there? */
3310 if (!Idte.Gate.u1Present)
3311 {
3312 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3313 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3314 }
3315
3316 /* Is it a task-gate? */
3317 if (fTaskGate)
3318 {
3319 /*
3320 * Construct the error code masks based on what caused this task switch.
3321 * See Intel Instruction reference for INT.
3322 */
3323 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
3324 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3325 RTSEL SelTSS = Idte.Gate.u16Sel;
3326
3327 /*
3328 * Fetch the TSS descriptor in the GDT.
3329 */
3330 IEMSELDESC DescTSS;
3331 rcStrict = iemMemFetchSelDescWithErr(pIemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3332 if (rcStrict != VINF_SUCCESS)
3333 {
3334 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3335 VBOXSTRICTRC_VAL(rcStrict)));
3336 return rcStrict;
3337 }
3338
3339 /* The TSS descriptor must be a system segment and be available (not busy). */
3340 if ( DescTSS.Legacy.Gen.u1DescType
3341 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3342 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3343 {
3344 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3345 u8Vector, SelTSS, DescTSS.Legacy.au64));
3346 return iemRaiseGeneralProtectionFault(pIemCpu, (SelTSS & uSelMask) | uExt);
3347 }
3348
3349 /* The TSS must be present. */
3350 if (!DescTSS.Legacy.Gen.u1Present)
3351 {
3352 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3353 return iemRaiseSelectorNotPresentWithErr(pIemCpu, (SelTSS & uSelMask) | uExt);
3354 }
3355
3356 /* Do the actual task switch. */
3357 return iemTaskSwitch(pIemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
3358 }
3359
3360 /* A null CS is bad. */
3361 RTSEL NewCS = Idte.Gate.u16Sel;
3362 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3363 {
3364 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3365 return iemRaiseGeneralProtectionFault0(pIemCpu);
3366 }
3367
3368 /* Fetch the descriptor for the new CS. */
3369 IEMSELDESC DescCS;
3370 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3371 if (rcStrict != VINF_SUCCESS)
3372 {
3373 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3374 return rcStrict;
3375 }
3376
3377 /* Must be a code segment. */
3378 if (!DescCS.Legacy.Gen.u1DescType)
3379 {
3380 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3381 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3382 }
3383 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3384 {
3385 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3386 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3387 }
3388
3389 /* Don't allow lowering the privilege level. */
3390 /** @todo Does the lowering of privileges apply to software interrupts
3391 * only? This has bearings on the more-privileged or
3392 * same-privilege stack behavior further down. A testcase would
3393 * be nice. */
3394 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3395 {
3396 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3397 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3398 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3399 }
3400
3401 /* Make sure the selector is present. */
3402 if (!DescCS.Legacy.Gen.u1Present)
3403 {
3404 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3405 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3406 }
3407
3408 /* Check the new EIP against the new CS limit. */
3409 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3410 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3411 ? Idte.Gate.u16OffsetLow
3412 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3413 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3414 if (uNewEip > cbLimitCS)
3415 {
3416 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3417 u8Vector, uNewEip, cbLimitCS, NewCS));
3418 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3419 }
3420
3421 /* Calc the flag image to push. */
3422 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3423 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3424 fEfl &= ~X86_EFL_RF;
3425 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3426 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3427
3428 /* From V8086 mode only go to CPL 0. */
3429 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3430 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3431 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3432 {
3433 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3434 return iemRaiseGeneralProtectionFault(pIemCpu, 0);
3435 }
3436
3437 /*
3438 * If the privilege level changes, we need to get a new stack from the TSS.
3439 * This in turns means validating the new SS and ESP...
3440 */
3441 if (uNewCpl != pIemCpu->uCpl)
3442 {
3443 RTSEL NewSS;
3444 uint32_t uNewEsp;
3445 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
3446 if (rcStrict != VINF_SUCCESS)
3447 return rcStrict;
3448
3449 IEMSELDESC DescSS;
3450 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
3451 if (rcStrict != VINF_SUCCESS)
3452 return rcStrict;
3453
3454 /* Check that there is sufficient space for the stack frame. */
3455 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3456 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3457 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3458 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3459
3460 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3461 {
3462 if ( uNewEsp - 1 > cbLimitSS
3463 || uNewEsp < cbStackFrame)
3464 {
3465 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3466 u8Vector, NewSS, uNewEsp, cbStackFrame));
3467 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3468 }
3469 }
3470 else
3471 {
3472 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
3473 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3474 {
3475 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3476 u8Vector, NewSS, uNewEsp, cbStackFrame));
3477 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
3478 }
3479 }
3480
3481 /*
3482 * Start making changes.
3483 */
3484
3485 /* Create the stack frame. */
3486 RTPTRUNION uStackFrame;
3487 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3488 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3489 if (rcStrict != VINF_SUCCESS)
3490 return rcStrict;
3491 void * const pvStackFrame = uStackFrame.pv;
3492 if (f32BitGate)
3493 {
3494 if (fFlags & IEM_XCPT_FLAGS_ERR)
3495 *uStackFrame.pu32++ = uErr;
3496 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
3497 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3498 uStackFrame.pu32[2] = fEfl;
3499 uStackFrame.pu32[3] = pCtx->esp;
3500 uStackFrame.pu32[4] = pCtx->ss.Sel;
3501 if (fEfl & X86_EFL_VM)
3502 {
3503 uStackFrame.pu32[1] = pCtx->cs.Sel;
3504 uStackFrame.pu32[5] = pCtx->es.Sel;
3505 uStackFrame.pu32[6] = pCtx->ds.Sel;
3506 uStackFrame.pu32[7] = pCtx->fs.Sel;
3507 uStackFrame.pu32[8] = pCtx->gs.Sel;
3508 }
3509 }
3510 else
3511 {
3512 if (fFlags & IEM_XCPT_FLAGS_ERR)
3513 *uStackFrame.pu16++ = uErr;
3514 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3515 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3516 uStackFrame.pu16[2] = fEfl;
3517 uStackFrame.pu16[3] = pCtx->sp;
3518 uStackFrame.pu16[4] = pCtx->ss.Sel;
3519 if (fEfl & X86_EFL_VM)
3520 {
3521 uStackFrame.pu16[1] = pCtx->cs.Sel;
3522 uStackFrame.pu16[5] = pCtx->es.Sel;
3523 uStackFrame.pu16[6] = pCtx->ds.Sel;
3524 uStackFrame.pu16[7] = pCtx->fs.Sel;
3525 uStackFrame.pu16[8] = pCtx->gs.Sel;
3526 }
3527 }
3528 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3529 if (rcStrict != VINF_SUCCESS)
3530 return rcStrict;
3531
3532 /* Mark the selectors 'accessed' (hope this is the correct time). */
3533 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3534 * after pushing the stack frame? (Write protect the gdt + stack to
3535 * find out.) */
3536 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3537 {
3538 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3539 if (rcStrict != VINF_SUCCESS)
3540 return rcStrict;
3541 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3542 }
3543
3544 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3545 {
3546 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
3547 if (rcStrict != VINF_SUCCESS)
3548 return rcStrict;
3549 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3550 }
3551
3552 /*
3553 * Start comitting the register changes (joins with the DPL=CPL branch).
3554 */
3555 pCtx->ss.Sel = NewSS;
3556 pCtx->ss.ValidSel = NewSS;
3557 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3558 pCtx->ss.u32Limit = cbLimitSS;
3559 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3560 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3561 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
3562 pIemCpu->uCpl = uNewCpl;
3563
3564 if (fEfl & X86_EFL_VM)
3565 {
3566 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->gs);
3567 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->fs);
3568 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->es);
3569 iemHlpLoadNullDataSelectorOnV86Xcpt(pIemCpu, &pCtx->ds);
3570 }
3571 }
3572 /*
3573 * Same privilege, no stack change and smaller stack frame.
3574 */
3575 else
3576 {
3577 uint64_t uNewRsp;
3578 RTPTRUNION uStackFrame;
3579 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3580 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3581 if (rcStrict != VINF_SUCCESS)
3582 return rcStrict;
3583 void * const pvStackFrame = uStackFrame.pv;
3584
3585 if (f32BitGate)
3586 {
3587 if (fFlags & IEM_XCPT_FLAGS_ERR)
3588 *uStackFrame.pu32++ = uErr;
3589 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3590 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3591 uStackFrame.pu32[2] = fEfl;
3592 }
3593 else
3594 {
3595 if (fFlags & IEM_XCPT_FLAGS_ERR)
3596 *uStackFrame.pu16++ = uErr;
3597 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
3598 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
3599 uStackFrame.pu16[2] = fEfl;
3600 }
3601 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3602 if (rcStrict != VINF_SUCCESS)
3603 return rcStrict;
3604
3605 /* Mark the CS selector as 'accessed'. */
3606 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3607 {
3608 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3609 if (rcStrict != VINF_SUCCESS)
3610 return rcStrict;
3611 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3612 }
3613
3614 /*
3615 * Start committing the register changes (joins with the other branch).
3616 */
3617 pCtx->rsp = uNewRsp;
3618 }
3619
3620 /* ... register committing continues. */
3621 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3622 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3623 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3624 pCtx->cs.u32Limit = cbLimitCS;
3625 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3626 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3627
3628 pCtx->rip = uNewEip;
3629 fEfl &= ~fEflToClear;
3630 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3631
3632 if (fFlags & IEM_XCPT_FLAGS_CR2)
3633 pCtx->cr2 = uCr2;
3634
3635 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3636 iemRaiseXcptAdjustState(pCtx, u8Vector);
3637
3638 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3639}
3640
3641
3642/**
3643 * Implements exceptions and interrupts for long mode.
3644 *
3645 * @returns VBox strict status code.
3646 * @param pIemCpu The IEM per CPU instance data.
3647 * @param pCtx The CPU context.
3648 * @param cbInstr The number of bytes to offset rIP by in the return
3649 * address.
3650 * @param u8Vector The interrupt / exception vector number.
3651 * @param fFlags The flags.
3652 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3653 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3654 */
3655IEM_STATIC VBOXSTRICTRC
3656iemRaiseXcptOrIntInLongMode(PIEMCPU pIemCpu,
3657 PCPUMCTX pCtx,
3658 uint8_t cbInstr,
3659 uint8_t u8Vector,
3660 uint32_t fFlags,
3661 uint16_t uErr,
3662 uint64_t uCr2)
3663{
3664 /*
3665 * Read the IDT entry.
3666 */
3667 uint16_t offIdt = (uint16_t)u8Vector << 4;
3668 if (pCtx->idtr.cbIdt < offIdt + 7)
3669 {
3670 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3671 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3672 }
3673 X86DESC64 Idte;
3674 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
3675 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3676 rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
3677 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3678 return rcStrict;
3679 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3680 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3681 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3682
3683 /*
3684 * Check the descriptor type, DPL and such.
3685 * ASSUMES this is done in the same order as described for call-gate calls.
3686 */
3687 if (Idte.Gate.u1DescType)
3688 {
3689 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3690 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3691 }
3692 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3693 switch (Idte.Gate.u4Type)
3694 {
3695 case AMD64_SEL_TYPE_SYS_INT_GATE:
3696 fEflToClear |= X86_EFL_IF;
3697 break;
3698 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3699 break;
3700
3701 default:
3702 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3703 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3704 }
3705
3706 /* Check DPL against CPL if applicable. */
3707 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3708 {
3709 if (pIemCpu->uCpl > Idte.Gate.u2Dpl)
3710 {
3711 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
3712 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3713 }
3714 }
3715
3716 /* Is it there? */
3717 if (!Idte.Gate.u1Present)
3718 {
3719 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3720 return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3721 }
3722
3723 /* A null CS is bad. */
3724 RTSEL NewCS = Idte.Gate.u16Sel;
3725 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3726 {
3727 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3728 return iemRaiseGeneralProtectionFault0(pIemCpu);
3729 }
3730
3731 /* Fetch the descriptor for the new CS. */
3732 IEMSELDESC DescCS;
3733 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS, X86_XCPT_GP);
3734 if (rcStrict != VINF_SUCCESS)
3735 {
3736 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3737 return rcStrict;
3738 }
3739
3740 /* Must be a 64-bit code segment. */
3741 if (!DescCS.Long.Gen.u1DescType)
3742 {
3743 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3744 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3745 }
3746 if ( !DescCS.Long.Gen.u1Long
3747 || DescCS.Long.Gen.u1DefBig
3748 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3749 {
3750 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3751 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3752 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3753 }
3754
3755 /* Don't allow lowering the privilege level. For non-conforming CS
3756 selectors, the CS.DPL sets the privilege level the trap/interrupt
3757 handler runs at. For conforming CS selectors, the CPL remains
3758 unchanged, but the CS.DPL must be <= CPL. */
3759 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3760 * when CPU in Ring-0. Result \#GP? */
3761 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
3762 {
3763 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3764 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3765 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3766 }
3767
3768
3769 /* Make sure the selector is present. */
3770 if (!DescCS.Legacy.Gen.u1Present)
3771 {
3772 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3773 return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
3774 }
3775
3776 /* Check that the new RIP is canonical. */
3777 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3778 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3779 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3780 if (!IEM_IS_CANONICAL(uNewRip))
3781 {
3782 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3783 return iemRaiseGeneralProtectionFault0(pIemCpu);
3784 }
3785
3786 /*
3787 * If the privilege level changes or if the IST isn't zero, we need to get
3788 * a new stack from the TSS.
3789 */
3790 uint64_t uNewRsp;
3791 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3792 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;
3793 if ( uNewCpl != pIemCpu->uCpl
3794 || Idte.Gate.u3IST != 0)
3795 {
3796 rcStrict = iemRaiseLoadStackFromTss64(pIemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3797 if (rcStrict != VINF_SUCCESS)
3798 return rcStrict;
3799 }
3800 else
3801 uNewRsp = pCtx->rsp;
3802 uNewRsp &= ~(uint64_t)0xf;
3803
3804 /*
3805 * Calc the flag image to push.
3806 */
3807 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3808 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3809 fEfl &= ~X86_EFL_RF;
3810 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
3811 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3812
3813 /*
3814 * Start making changes.
3815 */
3816
3817 /* Create the stack frame. */
3818 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3819 RTPTRUNION uStackFrame;
3820 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3821 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3822 if (rcStrict != VINF_SUCCESS)
3823 return rcStrict;
3824 void * const pvStackFrame = uStackFrame.pv;
3825
3826 if (fFlags & IEM_XCPT_FLAGS_ERR)
3827 *uStackFrame.pu64++ = uErr;
3828 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
3829 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl; /* CPL paranoia */
3830 uStackFrame.pu64[2] = fEfl;
3831 uStackFrame.pu64[3] = pCtx->rsp;
3832 uStackFrame.pu64[4] = pCtx->ss.Sel;
3833 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3834 if (rcStrict != VINF_SUCCESS)
3835 return rcStrict;
3836
3837 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3838 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3839 * after pushing the stack frame? (Write protect the gdt + stack to
3840 * find out.) */
3841 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3842 {
3843 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
3844 if (rcStrict != VINF_SUCCESS)
3845 return rcStrict;
3846 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3847 }
3848
3849 /*
3850 * Start comitting the register changes.
3851 */
3852 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3853 * hidden registers when interrupting 32-bit or 16-bit code! */
3854 if (uNewCpl != pIemCpu->uCpl)
3855 {
3856 pCtx->ss.Sel = 0 | uNewCpl;
3857 pCtx->ss.ValidSel = 0 | uNewCpl;
3858 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3859 pCtx->ss.u32Limit = UINT32_MAX;
3860 pCtx->ss.u64Base = 0;
3861 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3862 }
3863 pCtx->rsp = uNewRsp - cbStackFrame;
3864 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3865 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3866 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3867 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3868 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3869 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3870 pCtx->rip = uNewRip;
3871 pIemCpu->uCpl = uNewCpl;
3872
3873 fEfl &= ~fEflToClear;
3874 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3875
3876 if (fFlags & IEM_XCPT_FLAGS_CR2)
3877 pCtx->cr2 = uCr2;
3878
3879 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3880 iemRaiseXcptAdjustState(pCtx, u8Vector);
3881
3882 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3883}
3884
3885
3886/**
3887 * Implements exceptions and interrupts.
3888 *
3889 * All exceptions and interrupts goes thru this function!
3890 *
3891 * @returns VBox strict status code.
3892 * @param pIemCpu The IEM per CPU instance data.
3893 * @param cbInstr The number of bytes to offset rIP by in the return
3894 * address.
3895 * @param u8Vector The interrupt / exception vector number.
3896 * @param fFlags The flags.
3897 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3898 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3899 */
3900DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
3901iemRaiseXcptOrInt(PIEMCPU pIemCpu,
3902 uint8_t cbInstr,
3903 uint8_t u8Vector,
3904 uint32_t fFlags,
3905 uint16_t uErr,
3906 uint64_t uCr2)
3907{
3908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3909#ifdef IN_RING0
3910 int rc = HMR0EnsureCompleteBasicContext(IEMCPU_TO_VMCPU(pIemCpu), pCtx);
3911 AssertRCReturn(rc, rc);
3912#endif
3913
3914 /*
3915 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3916 */
3917 if ( pCtx->eflags.Bits.u1VM
3918 && pCtx->eflags.Bits.u2IOPL != 3
3919 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3920 && (pCtx->cr0 & X86_CR0_PE) )
3921 {
3922 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3923 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3924 u8Vector = X86_XCPT_GP;
3925 uErr = 0;
3926 }
3927#ifdef DBGFTRACE_ENABLED
3928 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3929 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3930 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
3931#endif
3932
3933 /*
3934 * Do recursion accounting.
3935 */
3936 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt;
3937 uint32_t const fPrevXcpt = pIemCpu->fCurXcpt;
3938 if (pIemCpu->cXcptRecursions == 0)
3939 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3940 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
3941 else
3942 {
3943 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3944 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
3945
3946 /** @todo double and tripple faults. */
3947 if (pIemCpu->cXcptRecursions >= 3)
3948 {
3949#ifdef DEBUG_bird
3950 AssertFailed();
3951#endif
3952 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3953 }
3954
3955 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
3956 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
3957 {
3958 ....
3959 } */
3960 }
3961 pIemCpu->cXcptRecursions++;
3962 pIemCpu->uCurXcpt = u8Vector;
3963 pIemCpu->fCurXcpt = fFlags;
3964
3965 /*
3966 * Extensive logging.
3967 */
3968#if defined(LOG_ENABLED) && defined(IN_RING3)
3969 if (LogIs3Enabled())
3970 {
3971 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3972 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3973 char szRegs[4096];
3974 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3975 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3976 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3977 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3978 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3979 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3980 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3981 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3982 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3983 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3984 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3985 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3986 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3987 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3988 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3989 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3990 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3991 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3992 " efer=%016VR{efer}\n"
3993 " pat=%016VR{pat}\n"
3994 " sf_mask=%016VR{sf_mask}\n"
3995 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3996 " lstar=%016VR{lstar}\n"
3997 " star=%016VR{star} cstar=%016VR{cstar}\n"
3998 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3999 );
4000
4001 char szInstr[256];
4002 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4003 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4004 szInstr, sizeof(szInstr), NULL);
4005 Log3(("%s%s\n", szRegs, szInstr));
4006 }
4007#endif /* LOG_ENABLED */
4008
4009 /*
4010 * Call the mode specific worker function.
4011 */
4012 VBOXSTRICTRC rcStrict;
4013 if (!(pCtx->cr0 & X86_CR0_PE))
4014 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4015 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4016 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4017 else
4018 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4019
4020 /*
4021 * Unwind.
4022 */
4023 pIemCpu->cXcptRecursions--;
4024 pIemCpu->uCurXcpt = uPrevXcpt;
4025 pIemCpu->fCurXcpt = fPrevXcpt;
4026 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
4027 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
4028 return rcStrict;
4029}
4030
4031
4032/** \#DE - 00. */
4033DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PIEMCPU pIemCpu)
4034{
4035 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4036}
4037
4038
4039/** \#DB - 01.
4040 * @note This automatically clear DR7.GD. */
4041DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PIEMCPU pIemCpu)
4042{
4043 /** @todo set/clear RF. */
4044 pIemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;
4045 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4046}
4047
4048
4049/** \#UD - 06. */
4050DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PIEMCPU pIemCpu)
4051{
4052 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4053}
4054
4055
4056/** \#NM - 07. */
4057DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PIEMCPU pIemCpu)
4058{
4059 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4060}
4061
4062
4063/** \#TS(err) - 0a. */
4064DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4065{
4066 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4067}
4068
4069
4070/** \#TS(tr) - 0a. */
4071DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu)
4072{
4073 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4074 pIemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);
4075}
4076
4077
4078/** \#TS(0) - 0a. */
4079DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PIEMCPU pIemCpu)
4080{
4081 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4082 0, 0);
4083}
4084
4085
4086/** \#TS(err) - 0a. */
4087DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4088{
4089 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4090 uSel & X86_SEL_MASK_OFF_RPL, 0);
4091}
4092
4093
4094/** \#NP(err) - 0b. */
4095DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4096{
4097 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4098}
4099
4100
4101/** \#NP(seg) - 0b. */
4102DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
4103{
4104 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4105 iemSRegFetchU16(pIemCpu, iSegReg) & ~X86_SEL_RPL, 0);
4106}
4107
4108
4109/** \#NP(sel) - 0b. */
4110DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4111{
4112 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4113 uSel & ~X86_SEL_RPL, 0);
4114}
4115
4116
4117/** \#SS(seg) - 0c. */
4118DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
4119{
4120 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4121 uSel & ~X86_SEL_RPL, 0);
4122}
4123
4124
4125/** \#SS(err) - 0c. */
4126DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
4127{
4128 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4129}
4130
4131
4132/** \#GP(n) - 0d. */
4133DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)
4134{
4135 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4136}
4137
4138
4139/** \#GP(0) - 0d. */
4140DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)
4141{
4142 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4143}
4144
4145
4146/** \#GP(sel) - 0d. */
4147DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4148{
4149 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4150 Sel & ~X86_SEL_RPL, 0);
4151}
4152
4153
4154/** \#GP(0) - 0d. */
4155DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PIEMCPU pIemCpu)
4156{
4157 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4158}
4159
4160
4161/** \#GP(sel) - 0d. */
4162DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4163{
4164 NOREF(iSegReg); NOREF(fAccess);
4165 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4166 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4167}
4168
4169
4170/** \#GP(sel) - 0d. */
4171DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel)
4172{
4173 NOREF(Sel);
4174 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4175}
4176
4177
4178/** \#GP(sel) - 0d. */
4179DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
4180{
4181 NOREF(iSegReg); NOREF(fAccess);
4182 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4183}
4184
4185
4186/** \#PF(n) - 0e. */
4187DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
4188{
4189 uint16_t uErr;
4190 switch (rc)
4191 {
4192 case VERR_PAGE_NOT_PRESENT:
4193 case VERR_PAGE_TABLE_NOT_PRESENT:
4194 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4195 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4196 uErr = 0;
4197 break;
4198
4199 default:
4200 AssertMsgFailed(("%Rrc\n", rc));
4201 case VERR_ACCESS_DENIED:
4202 uErr = X86_TRAP_PF_P;
4203 break;
4204
4205 /** @todo reserved */
4206 }
4207
4208 if (pIemCpu->uCpl == 3)
4209 uErr |= X86_TRAP_PF_US;
4210
4211 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4212 && ( (pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)
4213 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )
4214 uErr |= X86_TRAP_PF_ID;
4215
4216#if 0 /* This is so much non-sense, really. Why was it done like that? */
4217 /* Note! RW access callers reporting a WRITE protection fault, will clear
4218 the READ flag before calling. So, read-modify-write accesses (RW)
4219 can safely be reported as READ faults. */
4220 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4221 uErr |= X86_TRAP_PF_RW;
4222#else
4223 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4224 {
4225 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
4226 uErr |= X86_TRAP_PF_RW;
4227 }
4228#endif
4229
4230 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4231 uErr, GCPtrWhere);
4232}
4233
4234
4235/** \#MF(0) - 10. */
4236DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PIEMCPU pIemCpu)
4237{
4238 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4239}
4240
4241
4242/** \#AC(0) - 11. */
4243DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PIEMCPU pIemCpu)
4244{
4245 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4246}
4247
4248
4249/**
4250 * Macro for calling iemCImplRaiseDivideError().
4251 *
4252 * This enables us to add/remove arguments and force different levels of
4253 * inlining as we wish.
4254 *
4255 * @return Strict VBox status code.
4256 */
4257#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
4258IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4259{
4260 NOREF(cbInstr);
4261 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4262}
4263
4264
4265/**
4266 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4267 *
4268 * This enables us to add/remove arguments and force different levels of
4269 * inlining as we wish.
4270 *
4271 * @return Strict VBox status code.
4272 */
4273#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
4274IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4275{
4276 NOREF(cbInstr);
4277 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4278}
4279
4280
4281/**
4282 * Macro for calling iemCImplRaiseInvalidOpcode().
4283 *
4284 * This enables us to add/remove arguments and force different levels of
4285 * inlining as we wish.
4286 *
4287 * @return Strict VBox status code.
4288 */
4289#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
4290IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4291{
4292 NOREF(cbInstr);
4293 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4294}
4295
4296
4297/** @} */
4298
4299
4300/*
4301 *
4302 * Helpers routines.
4303 * Helpers routines.
4304 * Helpers routines.
4305 *
4306 */
4307
4308/**
4309 * Recalculates the effective operand size.
4310 *
4311 * @param pIemCpu The IEM state.
4312 */
4313IEM_STATIC void iemRecalEffOpSize(PIEMCPU pIemCpu)
4314{
4315 switch (pIemCpu->enmCpuMode)
4316 {
4317 case IEMMODE_16BIT:
4318 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
4319 break;
4320 case IEMMODE_32BIT:
4321 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
4322 break;
4323 case IEMMODE_64BIT:
4324 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
4325 {
4326 case 0:
4327 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;
4328 break;
4329 case IEM_OP_PRF_SIZE_OP:
4330 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4331 break;
4332 case IEM_OP_PRF_SIZE_REX_W:
4333 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
4334 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4335 break;
4336 }
4337 break;
4338 default:
4339 AssertFailed();
4340 }
4341}
4342
4343
4344/**
4345 * Sets the default operand size to 64-bit and recalculates the effective
4346 * operand size.
4347 *
4348 * @param pIemCpu The IEM state.
4349 */
4350IEM_STATIC void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)
4351{
4352 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4353 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
4354 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
4355 pIemCpu->enmEffOpSize = IEMMODE_64BIT;
4356 else
4357 pIemCpu->enmEffOpSize = IEMMODE_16BIT;
4358}
4359
4360
4361/*
4362 *
4363 * Common opcode decoders.
4364 * Common opcode decoders.
4365 * Common opcode decoders.
4366 *
4367 */
4368//#include <iprt/mem.h>
4369
4370/**
4371 * Used to add extra details about a stub case.
4372 * @param pIemCpu The IEM per CPU state.
4373 */
4374IEM_STATIC void iemOpStubMsg2(PIEMCPU pIemCpu)
4375{
4376#if defined(LOG_ENABLED) && defined(IN_RING3)
4377 PVM pVM = IEMCPU_TO_VM(pIemCpu);
4378 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4379 char szRegs[4096];
4380 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4381 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4382 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4383 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4384 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4385 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4386 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4387 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4388 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4389 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4390 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4391 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4392 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4393 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4394 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4395 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4396 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4397 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4398 " efer=%016VR{efer}\n"
4399 " pat=%016VR{pat}\n"
4400 " sf_mask=%016VR{sf_mask}\n"
4401 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4402 " lstar=%016VR{lstar}\n"
4403 " star=%016VR{star} cstar=%016VR{cstar}\n"
4404 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4405 );
4406
4407 char szInstr[256];
4408 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4409 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4410 szInstr, sizeof(szInstr), NULL);
4411
4412 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4413#else
4414 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);
4415#endif
4416}
4417
4418/**
4419 * Complains about a stub.
4420 *
4421 * Providing two versions of this macro, one for daily use and one for use when
4422 * working on IEM.
4423 */
4424#if 0
4425# define IEMOP_BITCH_ABOUT_STUB() \
4426 do { \
4427 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
4428 iemOpStubMsg2(pIemCpu); \
4429 RTAssertPanic(); \
4430 } while (0)
4431#else
4432# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
4433#endif
4434
4435/** Stubs an opcode. */
4436#define FNIEMOP_STUB(a_Name) \
4437 FNIEMOP_DEF(a_Name) \
4438 { \
4439 IEMOP_BITCH_ABOUT_STUB(); \
4440 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4441 } \
4442 typedef int ignore_semicolon
4443
4444/** Stubs an opcode. */
4445#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
4446 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4447 { \
4448 IEMOP_BITCH_ABOUT_STUB(); \
4449 NOREF(a_Name0); \
4450 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
4451 } \
4452 typedef int ignore_semicolon
4453
4454/** Stubs an opcode which currently should raise \#UD. */
4455#define FNIEMOP_UD_STUB(a_Name) \
4456 FNIEMOP_DEF(a_Name) \
4457 { \
4458 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4459 return IEMOP_RAISE_INVALID_OPCODE(); \
4460 } \
4461 typedef int ignore_semicolon
4462
4463/** Stubs an opcode which currently should raise \#UD. */
4464#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
4465 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4466 { \
4467 NOREF(a_Name0); \
4468 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
4469 return IEMOP_RAISE_INVALID_OPCODE(); \
4470 } \
4471 typedef int ignore_semicolon
4472
4473
4474
4475/** @name Register Access.
4476 * @{
4477 */
4478
4479/**
4480 * Gets a reference (pointer) to the specified hidden segment register.
4481 *
4482 * @returns Hidden register reference.
4483 * @param pIemCpu The per CPU data.
4484 * @param iSegReg The segment register.
4485 */
4486IEM_STATIC PCPUMSELREG iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)
4487{
4488 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4489 PCPUMSELREG pSReg;
4490 switch (iSegReg)
4491 {
4492 case X86_SREG_ES: pSReg = &pCtx->es; break;
4493 case X86_SREG_CS: pSReg = &pCtx->cs; break;
4494 case X86_SREG_SS: pSReg = &pCtx->ss; break;
4495 case X86_SREG_DS: pSReg = &pCtx->ds; break;
4496 case X86_SREG_FS: pSReg = &pCtx->fs; break;
4497 case X86_SREG_GS: pSReg = &pCtx->gs; break;
4498 default:
4499 AssertFailedReturn(NULL);
4500 }
4501#ifdef VBOX_WITH_RAW_MODE_NOT_R0
4502 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
4503 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
4504#else
4505 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
4506#endif
4507 return pSReg;
4508}
4509
4510
4511/**
4512 * Gets a reference (pointer) to the specified segment register (the selector
4513 * value).
4514 *
4515 * @returns Pointer to the selector variable.
4516 * @param pIemCpu The per CPU data.
4517 * @param iSegReg The segment register.
4518 */
4519IEM_STATIC uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)
4520{
4521 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4522 switch (iSegReg)
4523 {
4524 case X86_SREG_ES: return &pCtx->es.Sel;
4525 case X86_SREG_CS: return &pCtx->cs.Sel;
4526 case X86_SREG_SS: return &pCtx->ss.Sel;
4527 case X86_SREG_DS: return &pCtx->ds.Sel;
4528 case X86_SREG_FS: return &pCtx->fs.Sel;
4529 case X86_SREG_GS: return &pCtx->gs.Sel;
4530 }
4531 AssertFailedReturn(NULL);
4532}
4533
4534
4535/**
4536 * Fetches the selector value of a segment register.
4537 *
4538 * @returns The selector value.
4539 * @param pIemCpu The per CPU data.
4540 * @param iSegReg The segment register.
4541 */
4542IEM_STATIC uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)
4543{
4544 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4545 switch (iSegReg)
4546 {
4547 case X86_SREG_ES: return pCtx->es.Sel;
4548 case X86_SREG_CS: return pCtx->cs.Sel;
4549 case X86_SREG_SS: return pCtx->ss.Sel;
4550 case X86_SREG_DS: return pCtx->ds.Sel;
4551 case X86_SREG_FS: return pCtx->fs.Sel;
4552 case X86_SREG_GS: return pCtx->gs.Sel;
4553 }
4554 AssertFailedReturn(0xffff);
4555}
4556
4557
4558/**
4559 * Gets a reference (pointer) to the specified general register.
4560 *
4561 * @returns Register reference.
4562 * @param pIemCpu The per CPU data.
4563 * @param iReg The general register.
4564 */
4565IEM_STATIC void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)
4566{
4567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4568 switch (iReg)
4569 {
4570 case X86_GREG_xAX: return &pCtx->rax;
4571 case X86_GREG_xCX: return &pCtx->rcx;
4572 case X86_GREG_xDX: return &pCtx->rdx;
4573 case X86_GREG_xBX: return &pCtx->rbx;
4574 case X86_GREG_xSP: return &pCtx->rsp;
4575 case X86_GREG_xBP: return &pCtx->rbp;
4576 case X86_GREG_xSI: return &pCtx->rsi;
4577 case X86_GREG_xDI: return &pCtx->rdi;
4578 case X86_GREG_x8: return &pCtx->r8;
4579 case X86_GREG_x9: return &pCtx->r9;
4580 case X86_GREG_x10: return &pCtx->r10;
4581 case X86_GREG_x11: return &pCtx->r11;
4582 case X86_GREG_x12: return &pCtx->r12;
4583 case X86_GREG_x13: return &pCtx->r13;
4584 case X86_GREG_x14: return &pCtx->r14;
4585 case X86_GREG_x15: return &pCtx->r15;
4586 }
4587 AssertFailedReturn(NULL);
4588}
4589
4590
4591/**
4592 * Gets a reference (pointer) to the specified 8-bit general register.
4593 *
4594 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
4595 *
4596 * @returns Register reference.
4597 * @param pIemCpu The per CPU data.
4598 * @param iReg The register.
4599 */
4600IEM_STATIC uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)
4601{
4602 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)
4603 return (uint8_t *)iemGRegRef(pIemCpu, iReg);
4604
4605 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);
4606 if (iReg >= 4)
4607 pu8Reg++;
4608 return pu8Reg;
4609}
4610
4611
4612/**
4613 * Fetches the value of a 8-bit general register.
4614 *
4615 * @returns The register value.
4616 * @param pIemCpu The per CPU data.
4617 * @param iReg The register.
4618 */
4619IEM_STATIC uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)
4620{
4621 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);
4622 return *pbSrc;
4623}
4624
4625
4626/**
4627 * Fetches the value of a 16-bit general register.
4628 *
4629 * @returns The register value.
4630 * @param pIemCpu The per CPU data.
4631 * @param iReg The register.
4632 */
4633IEM_STATIC uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)
4634{
4635 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);
4636}
4637
4638
4639/**
4640 * Fetches the value of a 32-bit general register.
4641 *
4642 * @returns The register value.
4643 * @param pIemCpu The per CPU data.
4644 * @param iReg The register.
4645 */
4646IEM_STATIC uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)
4647{
4648 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);
4649}
4650
4651
4652/**
4653 * Fetches the value of a 64-bit general register.
4654 *
4655 * @returns The register value.
4656 * @param pIemCpu The per CPU data.
4657 * @param iReg The register.
4658 */
4659IEM_STATIC uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)
4660{
4661 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);
4662}
4663
4664
4665/**
4666 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4667 *
4668 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4669 * segment limit.
4670 *
4671 * @param pIemCpu The per CPU data.
4672 * @param offNextInstr The offset of the next instruction.
4673 */
4674IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)
4675{
4676 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4677 switch (pIemCpu->enmEffOpSize)
4678 {
4679 case IEMMODE_16BIT:
4680 {
4681 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4682 if ( uNewIp > pCtx->cs.u32Limit
4683 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4684 return iemRaiseGeneralProtectionFault0(pIemCpu);
4685 pCtx->rip = uNewIp;
4686 break;
4687 }
4688
4689 case IEMMODE_32BIT:
4690 {
4691 Assert(pCtx->rip <= UINT32_MAX);
4692 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4693
4694 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4695 if (uNewEip > pCtx->cs.u32Limit)
4696 return iemRaiseGeneralProtectionFault0(pIemCpu);
4697 pCtx->rip = uNewEip;
4698 break;
4699 }
4700
4701 case IEMMODE_64BIT:
4702 {
4703 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4704
4705 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4706 if (!IEM_IS_CANONICAL(uNewRip))
4707 return iemRaiseGeneralProtectionFault0(pIemCpu);
4708 pCtx->rip = uNewRip;
4709 break;
4710 }
4711
4712 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4713 }
4714
4715 pCtx->eflags.Bits.u1RF = 0;
4716 return VINF_SUCCESS;
4717}
4718
4719
4720/**
4721 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4722 *
4723 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4724 * segment limit.
4725 *
4726 * @returns Strict VBox status code.
4727 * @param pIemCpu The per CPU data.
4728 * @param offNextInstr The offset of the next instruction.
4729 */
4730IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)
4731{
4732 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4733 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);
4734
4735 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;
4736 if ( uNewIp > pCtx->cs.u32Limit
4737 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4738 return iemRaiseGeneralProtectionFault0(pIemCpu);
4739 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4740 pCtx->rip = uNewIp;
4741 pCtx->eflags.Bits.u1RF = 0;
4742
4743 return VINF_SUCCESS;
4744}
4745
4746
4747/**
4748 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4749 *
4750 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4751 * segment limit.
4752 *
4753 * @returns Strict VBox status code.
4754 * @param pIemCpu The per CPU data.
4755 * @param offNextInstr The offset of the next instruction.
4756 */
4757IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)
4758{
4759 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4760 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);
4761
4762 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)
4763 {
4764 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4765
4766 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;
4767 if (uNewEip > pCtx->cs.u32Limit)
4768 return iemRaiseGeneralProtectionFault0(pIemCpu);
4769 pCtx->rip = uNewEip;
4770 }
4771 else
4772 {
4773 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4774
4775 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;
4776 if (!IEM_IS_CANONICAL(uNewRip))
4777 return iemRaiseGeneralProtectionFault0(pIemCpu);
4778 pCtx->rip = uNewRip;
4779 }
4780 pCtx->eflags.Bits.u1RF = 0;
4781 return VINF_SUCCESS;
4782}
4783
4784
4785/**
4786 * Performs a near jump to the specified address.
4787 *
4788 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4789 * segment limit.
4790 *
4791 * @param pIemCpu The per CPU data.
4792 * @param uNewRip The new RIP value.
4793 */
4794IEM_STATIC VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)
4795{
4796 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4797 switch (pIemCpu->enmEffOpSize)
4798 {
4799 case IEMMODE_16BIT:
4800 {
4801 Assert(uNewRip <= UINT16_MAX);
4802 if ( uNewRip > pCtx->cs.u32Limit
4803 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4804 return iemRaiseGeneralProtectionFault0(pIemCpu);
4805 /** @todo Test 16-bit jump in 64-bit mode. */
4806 pCtx->rip = uNewRip;
4807 break;
4808 }
4809
4810 case IEMMODE_32BIT:
4811 {
4812 Assert(uNewRip <= UINT32_MAX);
4813 Assert(pCtx->rip <= UINT32_MAX);
4814 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);
4815
4816 if (uNewRip > pCtx->cs.u32Limit)
4817 return iemRaiseGeneralProtectionFault0(pIemCpu);
4818 pCtx->rip = uNewRip;
4819 break;
4820 }
4821
4822 case IEMMODE_64BIT:
4823 {
4824 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);
4825
4826 if (!IEM_IS_CANONICAL(uNewRip))
4827 return iemRaiseGeneralProtectionFault0(pIemCpu);
4828 pCtx->rip = uNewRip;
4829 break;
4830 }
4831
4832 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4833 }
4834
4835 pCtx->eflags.Bits.u1RF = 0;
4836 return VINF_SUCCESS;
4837}
4838
4839
4840/**
4841 * Get the address of the top of the stack.
4842 *
4843 * @param pIemCpu The per CPU data.
4844 * @param pCtx The CPU context which SP/ESP/RSP should be
4845 * read.
4846 */
4847DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCIEMCPU pIemCpu, PCCPUMCTX pCtx)
4848{
4849 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4850 return pCtx->rsp;
4851 if (pCtx->ss.Attr.n.u1DefBig)
4852 return pCtx->esp;
4853 return pCtx->sp;
4854}
4855
4856
4857/**
4858 * Updates the RIP/EIP/IP to point to the next instruction.
4859 *
4860 * This function leaves the EFLAGS.RF flag alone.
4861 *
4862 * @param pIemCpu The per CPU data.
4863 * @param cbInstr The number of bytes to add.
4864 */
4865IEM_STATIC void iemRegAddToRipKeepRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4866{
4867 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4868 switch (pIemCpu->enmCpuMode)
4869 {
4870 case IEMMODE_16BIT:
4871 Assert(pCtx->rip <= UINT16_MAX);
4872 pCtx->eip += cbInstr;
4873 pCtx->eip &= UINT32_C(0xffff);
4874 break;
4875
4876 case IEMMODE_32BIT:
4877 pCtx->eip += cbInstr;
4878 Assert(pCtx->rip <= UINT32_MAX);
4879 break;
4880
4881 case IEMMODE_64BIT:
4882 pCtx->rip += cbInstr;
4883 break;
4884 default: AssertFailed();
4885 }
4886}
4887
4888
4889#if 0
4890/**
4891 * Updates the RIP/EIP/IP to point to the next instruction.
4892 *
4893 * @param pIemCpu The per CPU data.
4894 */
4895IEM_STATIC void iemRegUpdateRipKeepRF(PIEMCPU pIemCpu)
4896{
4897 return iemRegAddToRipKeepRF(pIemCpu, pIemCpu->offOpcode);
4898}
4899#endif
4900
4901
4902
4903/**
4904 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4905 *
4906 * @param pIemCpu The per CPU data.
4907 * @param cbInstr The number of bytes to add.
4908 */
4909IEM_STATIC void iemRegAddToRipAndClearRF(PIEMCPU pIemCpu, uint8_t cbInstr)
4910{
4911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4912
4913 pCtx->eflags.Bits.u1RF = 0;
4914
4915 /* NB: Must be kept in sync with HM (xxxAdvanceGuestRip). */
4916 switch (pIemCpu->enmCpuMode)
4917 {
4918 /** @todo investigate if EIP or RIP is really incremented. */
4919 case IEMMODE_16BIT:
4920 case IEMMODE_32BIT:
4921 pCtx->eip += cbInstr;
4922 Assert(pCtx->rip <= UINT32_MAX);
4923 break;
4924
4925 case IEMMODE_64BIT:
4926 pCtx->rip += cbInstr;
4927 break;
4928 default: AssertFailed();
4929 }
4930}
4931
4932
4933/**
4934 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
4935 *
4936 * @param pIemCpu The per CPU data.
4937 */
4938IEM_STATIC void iemRegUpdateRipAndClearRF(PIEMCPU pIemCpu)
4939{
4940 return iemRegAddToRipAndClearRF(pIemCpu, pIemCpu->offOpcode);
4941}
4942
4943
4944/**
4945 * Adds to the stack pointer.
4946 *
4947 * @param pIemCpu The per CPU data.
4948 * @param pCtx The CPU context which SP/ESP/RSP should be
4949 * updated.
4950 * @param cbToAdd The number of bytes to add.
4951 */
4952DECLINLINE(void) iemRegAddToRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
4953{
4954 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4955 pCtx->rsp += cbToAdd;
4956 else if (pCtx->ss.Attr.n.u1DefBig)
4957 pCtx->esp += cbToAdd;
4958 else
4959 pCtx->sp += cbToAdd;
4960}
4961
4962
4963/**
4964 * Subtracts from the stack pointer.
4965 *
4966 * @param pIemCpu The per CPU data.
4967 * @param pCtx The CPU context which SP/ESP/RSP should be
4968 * updated.
4969 * @param cbToSub The number of bytes to subtract.
4970 */
4971DECLINLINE(void) iemRegSubFromRsp(PCIEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)
4972{
4973 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4974 pCtx->rsp -= cbToSub;
4975 else if (pCtx->ss.Attr.n.u1DefBig)
4976 pCtx->esp -= cbToSub;
4977 else
4978 pCtx->sp -= cbToSub;
4979}
4980
4981
4982/**
4983 * Adds to the temporary stack pointer.
4984 *
4985 * @param pIemCpu The per CPU data.
4986 * @param pTmpRsp The temporary SP/ESP/RSP to update.
4987 * @param cbToAdd The number of bytes to add.
4988 * @param pCtx Where to get the current stack mode.
4989 */
4990DECLINLINE(void) iemRegAddToRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
4991{
4992 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4993 pTmpRsp->u += cbToAdd;
4994 else if (pCtx->ss.Attr.n.u1DefBig)
4995 pTmpRsp->DWords.dw0 += cbToAdd;
4996 else
4997 pTmpRsp->Words.w0 += cbToAdd;
4998}
4999
5000
5001/**
5002 * Subtracts from the temporary stack pointer.
5003 *
5004 * @param pIemCpu The per CPU data.
5005 * @param pTmpRsp The temporary SP/ESP/RSP to update.
5006 * @param cbToSub The number of bytes to subtract.
5007 * @param pCtx Where to get the current stack mode.
5008 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
5009 * expecting that.
5010 */
5011DECLINLINE(void) iemRegSubFromRspEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
5012{
5013 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5014 pTmpRsp->u -= cbToSub;
5015 else if (pCtx->ss.Attr.n.u1DefBig)
5016 pTmpRsp->DWords.dw0 -= cbToSub;
5017 else
5018 pTmpRsp->Words.w0 -= cbToSub;
5019}
5020
5021
5022/**
5023 * Calculates the effective stack address for a push of the specified size as
5024 * well as the new RSP value (upper bits may be masked).
5025 *
5026 * @returns Effective stack addressf for the push.
5027 * @param pIemCpu The IEM per CPU data.
5028 * @param pCtx Where to get the current stack mode.
5029 * @param cbItem The size of the stack item to pop.
5030 * @param puNewRsp Where to return the new RSP value.
5031 */
5032DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5033{
5034 RTUINT64U uTmpRsp;
5035 RTGCPTR GCPtrTop;
5036 uTmpRsp.u = pCtx->rsp;
5037
5038 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5039 GCPtrTop = uTmpRsp.u -= cbItem;
5040 else if (pCtx->ss.Attr.n.u1DefBig)
5041 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
5042 else
5043 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
5044 *puNewRsp = uTmpRsp.u;
5045 return GCPtrTop;
5046}
5047
5048
5049/**
5050 * Gets the current stack pointer and calculates the value after a pop of the
5051 * specified size.
5052 *
5053 * @returns Current stack pointer.
5054 * @param pIemCpu The per CPU data.
5055 * @param pCtx Where to get the current stack mode.
5056 * @param cbItem The size of the stack item to pop.
5057 * @param puNewRsp Where to return the new RSP value.
5058 */
5059DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
5060{
5061 RTUINT64U uTmpRsp;
5062 RTGCPTR GCPtrTop;
5063 uTmpRsp.u = pCtx->rsp;
5064
5065 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5066 {
5067 GCPtrTop = uTmpRsp.u;
5068 uTmpRsp.u += cbItem;
5069 }
5070 else if (pCtx->ss.Attr.n.u1DefBig)
5071 {
5072 GCPtrTop = uTmpRsp.DWords.dw0;
5073 uTmpRsp.DWords.dw0 += cbItem;
5074 }
5075 else
5076 {
5077 GCPtrTop = uTmpRsp.Words.w0;
5078 uTmpRsp.Words.w0 += cbItem;
5079 }
5080 *puNewRsp = uTmpRsp.u;
5081 return GCPtrTop;
5082}
5083
5084
5085/**
5086 * Calculates the effective stack address for a push of the specified size as
5087 * well as the new temporary RSP value (upper bits may be masked).
5088 *
5089 * @returns Effective stack addressf for the push.
5090 * @param pIemCpu The per CPU data.
5091 * @param pCtx Where to get the current stack mode.
5092 * @param pTmpRsp The temporary stack pointer. This is updated.
5093 * @param cbItem The size of the stack item to pop.
5094 */
5095DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5096{
5097 RTGCPTR GCPtrTop;
5098
5099 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5100 GCPtrTop = pTmpRsp->u -= cbItem;
5101 else if (pCtx->ss.Attr.n.u1DefBig)
5102 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
5103 else
5104 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
5105 return GCPtrTop;
5106}
5107
5108
5109/**
5110 * Gets the effective stack address for a pop of the specified size and
5111 * calculates and updates the temporary RSP.
5112 *
5113 * @returns Current stack pointer.
5114 * @param pIemCpu The per CPU data.
5115 * @param pCtx Where to get the current stack mode.
5116 * @param pTmpRsp The temporary stack pointer. This is updated.
5117 * @param cbItem The size of the stack item to pop.
5118 */
5119DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCIEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
5120{
5121 RTGCPTR GCPtrTop;
5122 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5123 {
5124 GCPtrTop = pTmpRsp->u;
5125 pTmpRsp->u += cbItem;
5126 }
5127 else if (pCtx->ss.Attr.n.u1DefBig)
5128 {
5129 GCPtrTop = pTmpRsp->DWords.dw0;
5130 pTmpRsp->DWords.dw0 += cbItem;
5131 }
5132 else
5133 {
5134 GCPtrTop = pTmpRsp->Words.w0;
5135 pTmpRsp->Words.w0 += cbItem;
5136 }
5137 return GCPtrTop;
5138}
5139
5140/** @} */
5141
5142
5143/** @name FPU access and helpers.
5144 *
5145 * @{
5146 */
5147
5148
5149/**
5150 * Hook for preparing to use the host FPU.
5151 *
5152 * This is necessary in ring-0 and raw-mode context.
5153 *
5154 * @param pIemCpu The IEM per CPU data.
5155 */
5156DECLINLINE(void) iemFpuPrepareUsage(PIEMCPU pIemCpu)
5157{
5158#ifdef IN_RING3
5159 NOREF(pIemCpu);
5160#else
5161/** @todo RZ: FIXME */
5162//# error "Implement me"
5163#endif
5164}
5165
5166
5167/**
5168 * Hook for preparing to use the host FPU for SSE
5169 *
5170 * This is necessary in ring-0 and raw-mode context.
5171 *
5172 * @param pIemCpu The IEM per CPU data.
5173 */
5174DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu)
5175{
5176 iemFpuPrepareUsage(pIemCpu);
5177}
5178
5179
5180/**
5181 * Stores a QNaN value into a FPU register.
5182 *
5183 * @param pReg Pointer to the register.
5184 */
5185DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
5186{
5187 pReg->au32[0] = UINT32_C(0x00000000);
5188 pReg->au32[1] = UINT32_C(0xc0000000);
5189 pReg->au16[4] = UINT16_C(0xffff);
5190}
5191
5192
5193/**
5194 * Updates the FOP, FPU.CS and FPUIP registers.
5195 *
5196 * @param pIemCpu The IEM per CPU data.
5197 * @param pCtx The CPU context.
5198 * @param pFpuCtx The FPU context.
5199 */
5200DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
5201{
5202 pFpuCtx->FOP = pIemCpu->abOpcode[pIemCpu->offFpuOpcode]
5203 | ((uint16_t)(pIemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);
5204 /** @todo x87.CS and FPUIP needs to be kept seperately. */
5205 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5206 {
5207 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
5208 * happens in real mode here based on the fnsave and fnstenv images. */
5209 pFpuCtx->CS = 0;
5210 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
5211 }
5212 else
5213 {
5214 pFpuCtx->CS = pCtx->cs.Sel;
5215 pFpuCtx->FPUIP = pCtx->rip;
5216 }
5217}
5218
5219
5220/**
5221 * Updates the x87.DS and FPUDP registers.
5222 *
5223 * @param pIemCpu The IEM per CPU data.
5224 * @param pCtx The CPU context.
5225 * @param pFpuCtx The FPU context.
5226 * @param iEffSeg The effective segment register.
5227 * @param GCPtrEff The effective address relative to @a iEffSeg.
5228 */
5229DECLINLINE(void) iemFpuUpdateDP(PIEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5230{
5231 RTSEL sel;
5232 switch (iEffSeg)
5233 {
5234 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
5235 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
5236 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
5237 case X86_SREG_ES: sel = pCtx->es.Sel; break;
5238 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
5239 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
5240 default:
5241 AssertMsgFailed(("%d\n", iEffSeg));
5242 sel = pCtx->ds.Sel;
5243 }
5244 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5245 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
5246 {
5247 pFpuCtx->DS = 0;
5248 pFpuCtx->FPUDP = (uint32_t)GCPtrEff | ((uint32_t)sel << 4);
5249 }
5250 else
5251 {
5252 pFpuCtx->DS = sel;
5253 pFpuCtx->FPUDP = GCPtrEff;
5254 }
5255}
5256
5257
5258/**
5259 * Rotates the stack registers in the push direction.
5260 *
5261 * @param pFpuCtx The FPU context.
5262 * @remarks This is a complete waste of time, but fxsave stores the registers in
5263 * stack order.
5264 */
5265DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5266{
5267 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5268 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5269 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5270 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5271 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5272 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5273 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5274 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5275 pFpuCtx->aRegs[0].r80 = r80Tmp;
5276}
5277
5278
5279/**
5280 * Rotates the stack registers in the pop direction.
5281 *
5282 * @param pFpuCtx The FPU context.
5283 * @remarks This is a complete waste of time, but fxsave stores the registers in
5284 * stack order.
5285 */
5286DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5287{
5288 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5289 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5290 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5291 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5292 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5293 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5294 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5295 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5296 pFpuCtx->aRegs[7].r80 = r80Tmp;
5297}
5298
5299
5300/**
5301 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5302 * exception prevents it.
5303 *
5304 * @param pIemCpu The IEM per CPU data.
5305 * @param pResult The FPU operation result to push.
5306 * @param pFpuCtx The FPU context.
5307 */
5308IEM_STATIC void iemFpuMaybePushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
5309{
5310 /* Update FSW and bail if there are pending exceptions afterwards. */
5311 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5312 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5313 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5314 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5315 {
5316 pFpuCtx->FSW = fFsw;
5317 return;
5318 }
5319
5320 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5321 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5322 {
5323 /* All is fine, push the actual value. */
5324 pFpuCtx->FTW |= RT_BIT(iNewTop);
5325 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5326 }
5327 else if (pFpuCtx->FCW & X86_FCW_IM)
5328 {
5329 /* Masked stack overflow, push QNaN. */
5330 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5331 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5332 }
5333 else
5334 {
5335 /* Raise stack overflow, don't push anything. */
5336 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5337 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5338 return;
5339 }
5340
5341 fFsw &= ~X86_FSW_TOP_MASK;
5342 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5343 pFpuCtx->FSW = fFsw;
5344
5345 iemFpuRotateStackPush(pFpuCtx);
5346}
5347
5348
5349/**
5350 * Stores a result in a FPU register and updates the FSW and FTW.
5351 *
5352 * @param pFpuCtx The FPU context.
5353 * @param pResult The result to store.
5354 * @param iStReg Which FPU register to store it in.
5355 */
5356IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
5357{
5358 Assert(iStReg < 8);
5359 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5360 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5361 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
5362 pFpuCtx->FTW |= RT_BIT(iReg);
5363 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5364}
5365
5366
5367/**
5368 * Only updates the FPU status word (FSW) with the result of the current
5369 * instruction.
5370 *
5371 * @param pFpuCtx The FPU context.
5372 * @param u16FSW The FSW output of the current instruction.
5373 */
5374IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
5375{
5376 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5377 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
5378}
5379
5380
5381/**
5382 * Pops one item off the FPU stack if no pending exception prevents it.
5383 *
5384 * @param pFpuCtx The FPU context.
5385 */
5386IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
5387{
5388 /* Check pending exceptions. */
5389 uint16_t uFSW = pFpuCtx->FSW;
5390 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5391 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5392 return;
5393
5394 /* TOP--. */
5395 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5396 uFSW &= ~X86_FSW_TOP_MASK;
5397 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5398 pFpuCtx->FSW = uFSW;
5399
5400 /* Mark the previous ST0 as empty. */
5401 iOldTop >>= X86_FSW_TOP_SHIFT;
5402 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5403
5404 /* Rotate the registers. */
5405 iemFpuRotateStackPop(pFpuCtx);
5406}
5407
5408
5409/**
5410 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5411 *
5412 * @param pIemCpu The IEM per CPU data.
5413 * @param pResult The FPU operation result to push.
5414 */
5415IEM_STATIC void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult)
5416{
5417 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5418 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5419 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5420 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5421}
5422
5423
5424/**
5425 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5426 * and sets FPUDP and FPUDS.
5427 *
5428 * @param pIemCpu The IEM per CPU data.
5429 * @param pResult The FPU operation result to push.
5430 * @param iEffSeg The effective segment register.
5431 * @param GCPtrEff The effective address relative to @a iEffSeg.
5432 */
5433IEM_STATIC void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5434{
5435 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5436 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5437 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5438 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5439 iemFpuMaybePushResult(pIemCpu, pResult, pFpuCtx);
5440}
5441
5442
5443/**
5444 * Replace ST0 with the first value and push the second onto the FPU stack,
5445 * unless a pending exception prevents it.
5446 *
5447 * @param pIemCpu The IEM per CPU data.
5448 * @param pResult The FPU operation result to store and push.
5449 */
5450IEM_STATIC void iemFpuPushResultTwo(PIEMCPU pIemCpu, PIEMFPURESULTTWO pResult)
5451{
5452 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5453 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5454 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5455
5456 /* Update FSW and bail if there are pending exceptions afterwards. */
5457 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5458 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5459 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5460 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5461 {
5462 pFpuCtx->FSW = fFsw;
5463 return;
5464 }
5465
5466 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5467 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5468 {
5469 /* All is fine, push the actual value. */
5470 pFpuCtx->FTW |= RT_BIT(iNewTop);
5471 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5472 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5473 }
5474 else if (pFpuCtx->FCW & X86_FCW_IM)
5475 {
5476 /* Masked stack overflow, push QNaN. */
5477 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5478 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5479 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5480 }
5481 else
5482 {
5483 /* Raise stack overflow, don't push anything. */
5484 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5485 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5486 return;
5487 }
5488
5489 fFsw &= ~X86_FSW_TOP_MASK;
5490 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5491 pFpuCtx->FSW = fFsw;
5492
5493 iemFpuRotateStackPush(pFpuCtx);
5494}
5495
5496
5497/**
5498 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5499 * FOP.
5500 *
5501 * @param pIemCpu The IEM per CPU data.
5502 * @param pResult The result to store.
5503 * @param iStReg Which FPU register to store it in.
5504 */
5505IEM_STATIC void iemFpuStoreResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5506{
5507 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5508 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5509 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5510 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5511}
5512
5513
5514/**
5515 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5516 * FOP, and then pops the stack.
5517 *
5518 * @param pIemCpu The IEM per CPU data.
5519 * @param pResult The result to store.
5520 * @param iStReg Which FPU register to store it in.
5521 */
5522IEM_STATIC void iemFpuStoreResultThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)
5523{
5524 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5525 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5526 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5527 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5528 iemFpuMaybePopOne(pFpuCtx);
5529}
5530
5531
5532/**
5533 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5534 * FPUDP, and FPUDS.
5535 *
5536 * @param pIemCpu The IEM per CPU data.
5537 * @param pResult The result to store.
5538 * @param iStReg Which FPU register to store it in.
5539 * @param iEffSeg The effective memory operand selector register.
5540 * @param GCPtrEff The effective memory operand offset.
5541 */
5542IEM_STATIC void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5543 uint8_t iEffSeg, RTGCPTR GCPtrEff)
5544{
5545 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5546 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5547 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5548 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5549 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5550}
5551
5552
5553/**
5554 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5555 * FPUDP, and FPUDS, and then pops the stack.
5556 *
5557 * @param pIemCpu The IEM per CPU data.
5558 * @param pResult The result to store.
5559 * @param iStReg Which FPU register to store it in.
5560 * @param iEffSeg The effective memory operand selector register.
5561 * @param GCPtrEff The effective memory operand offset.
5562 */
5563IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PIEMCPU pIemCpu, PIEMFPURESULT pResult,
5564 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5565{
5566 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5567 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5568 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5569 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5570 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
5571 iemFpuMaybePopOne(pFpuCtx);
5572}
5573
5574
5575/**
5576 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5577 *
5578 * @param pIemCpu The IEM per CPU data.
5579 */
5580IEM_STATIC void iemFpuUpdateOpcodeAndIp(PIEMCPU pIemCpu)
5581{
5582 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5583 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5584 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5585}
5586
5587
5588/**
5589 * Marks the specified stack register as free (for FFREE).
5590 *
5591 * @param pIemCpu The IEM per CPU data.
5592 * @param iStReg The register to free.
5593 */
5594IEM_STATIC void iemFpuStackFree(PIEMCPU pIemCpu, uint8_t iStReg)
5595{
5596 Assert(iStReg < 8);
5597 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5598 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5599 pFpuCtx->FTW &= ~RT_BIT(iReg);
5600}
5601
5602
5603/**
5604 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
5605 *
5606 * @param pIemCpu The IEM per CPU data.
5607 */
5608IEM_STATIC void iemFpuStackIncTop(PIEMCPU pIemCpu)
5609{
5610 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5611 uint16_t uFsw = pFpuCtx->FSW;
5612 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5613 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5614 uFsw &= ~X86_FSW_TOP_MASK;
5615 uFsw |= uTop;
5616 pFpuCtx->FSW = uFsw;
5617}
5618
5619
5620/**
5621 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
5622 *
5623 * @param pIemCpu The IEM per CPU data.
5624 */
5625IEM_STATIC void iemFpuStackDecTop(PIEMCPU pIemCpu)
5626{
5627 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5628 uint16_t uFsw = pFpuCtx->FSW;
5629 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
5630 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5631 uFsw &= ~X86_FSW_TOP_MASK;
5632 uFsw |= uTop;
5633 pFpuCtx->FSW = uFsw;
5634}
5635
5636
5637/**
5638 * Updates the FSW, FOP, FPUIP, and FPUCS.
5639 *
5640 * @param pIemCpu The IEM per CPU data.
5641 * @param u16FSW The FSW from the current instruction.
5642 */
5643IEM_STATIC void iemFpuUpdateFSW(PIEMCPU pIemCpu, uint16_t u16FSW)
5644{
5645 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5646 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5647 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5648 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5649}
5650
5651
5652/**
5653 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5654 *
5655 * @param pIemCpu The IEM per CPU data.
5656 * @param u16FSW The FSW from the current instruction.
5657 */
5658IEM_STATIC void iemFpuUpdateFSWThenPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5659{
5660 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5661 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5662 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5663 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5664 iemFpuMaybePopOne(pFpuCtx);
5665}
5666
5667
5668/**
5669 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5670 *
5671 * @param pIemCpu The IEM per CPU data.
5672 * @param u16FSW The FSW from the current instruction.
5673 * @param iEffSeg The effective memory operand selector register.
5674 * @param GCPtrEff The effective memory operand offset.
5675 */
5676IEM_STATIC void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5677{
5678 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5679 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5680 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5681 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5682 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5683}
5684
5685
5686/**
5687 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5688 *
5689 * @param pIemCpu The IEM per CPU data.
5690 * @param u16FSW The FSW from the current instruction.
5691 */
5692IEM_STATIC void iemFpuUpdateFSWThenPopPop(PIEMCPU pIemCpu, uint16_t u16FSW)
5693{
5694 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5695 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5696 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5697 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5698 iemFpuMaybePopOne(pFpuCtx);
5699 iemFpuMaybePopOne(pFpuCtx);
5700}
5701
5702
5703/**
5704 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5705 *
5706 * @param pIemCpu The IEM per CPU data.
5707 * @param u16FSW The FSW from the current instruction.
5708 * @param iEffSeg The effective memory operand selector register.
5709 * @param GCPtrEff The effective memory operand offset.
5710 */
5711IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5712{
5713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5714 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5715 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5716 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5717 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
5718 iemFpuMaybePopOne(pFpuCtx);
5719}
5720
5721
5722/**
5723 * Worker routine for raising an FPU stack underflow exception.
5724 *
5725 * @param pIemCpu The IEM per CPU data.
5726 * @param pFpuCtx The FPU context.
5727 * @param iStReg The stack register being accessed.
5728 */
5729IEM_STATIC void iemFpuStackUnderflowOnly(PIEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5730{
5731 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5732 if (pFpuCtx->FCW & X86_FCW_IM)
5733 {
5734 /* Masked underflow. */
5735 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5736 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5737 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5738 if (iStReg != UINT8_MAX)
5739 {
5740 pFpuCtx->FTW |= RT_BIT(iReg);
5741 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5742 }
5743 }
5744 else
5745 {
5746 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5747 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5748 }
5749}
5750
5751
5752/**
5753 * Raises a FPU stack underflow exception.
5754 *
5755 * @param pIemCpu The IEM per CPU data.
5756 * @param iStReg The destination register that should be loaded
5757 * with QNaN if \#IS is not masked. Specify
5758 * UINT8_MAX if none (like for fcom).
5759 */
5760DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PIEMCPU pIemCpu, uint8_t iStReg)
5761{
5762 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5763 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5764 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5765 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5766}
5767
5768
5769DECL_NO_INLINE(IEM_STATIC, void)
5770iemFpuStackUnderflowWithMemOp(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5771{
5772 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5773 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5774 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5775 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5776 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5777}
5778
5779
5780DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PIEMCPU pIemCpu, uint8_t iStReg)
5781{
5782 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5783 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5784 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5785 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5786 iemFpuMaybePopOne(pFpuCtx);
5787}
5788
5789
5790DECL_NO_INLINE(IEM_STATIC, void)
5791iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5792{
5793 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5794 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5795 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5796 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5797 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, iStReg);
5798 iemFpuMaybePopOne(pFpuCtx);
5799}
5800
5801
5802DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PIEMCPU pIemCpu)
5803{
5804 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5805 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5806 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5807 iemFpuStackUnderflowOnly(pIemCpu, pFpuCtx, UINT8_MAX);
5808 iemFpuMaybePopOne(pFpuCtx);
5809 iemFpuMaybePopOne(pFpuCtx);
5810}
5811
5812
5813DECL_NO_INLINE(IEM_STATIC, void)
5814iemFpuStackPushUnderflow(PIEMCPU pIemCpu)
5815{
5816 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5817 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5818 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5819
5820 if (pFpuCtx->FCW & X86_FCW_IM)
5821 {
5822 /* Masked overflow - Push QNaN. */
5823 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5824 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5825 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5826 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5827 pFpuCtx->FTW |= RT_BIT(iNewTop);
5828 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5829 iemFpuRotateStackPush(pFpuCtx);
5830 }
5831 else
5832 {
5833 /* Exception pending - don't change TOP or the register stack. */
5834 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5835 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5836 }
5837}
5838
5839
5840DECL_NO_INLINE(IEM_STATIC, void)
5841iemFpuStackPushUnderflowTwo(PIEMCPU pIemCpu)
5842{
5843 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5844 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5845 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5846
5847 if (pFpuCtx->FCW & X86_FCW_IM)
5848 {
5849 /* Masked overflow - Push QNaN. */
5850 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5851 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5852 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5853 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5854 pFpuCtx->FTW |= RT_BIT(iNewTop);
5855 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5856 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5857 iemFpuRotateStackPush(pFpuCtx);
5858 }
5859 else
5860 {
5861 /* Exception pending - don't change TOP or the register stack. */
5862 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5863 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5864 }
5865}
5866
5867
5868/**
5869 * Worker routine for raising an FPU stack overflow exception on a push.
5870 *
5871 * @param pFpuCtx The FPU context.
5872 */
5873IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
5874{
5875 if (pFpuCtx->FCW & X86_FCW_IM)
5876 {
5877 /* Masked overflow. */
5878 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5879 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5880 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5881 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5882 pFpuCtx->FTW |= RT_BIT(iNewTop);
5883 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5884 iemFpuRotateStackPush(pFpuCtx);
5885 }
5886 else
5887 {
5888 /* Exception pending - don't change TOP or the register stack. */
5889 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5890 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5891 }
5892}
5893
5894
5895/**
5896 * Raises a FPU stack overflow exception on a push.
5897 *
5898 * @param pIemCpu The IEM per CPU data.
5899 */
5900DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PIEMCPU pIemCpu)
5901{
5902 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5903 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5904 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5905 iemFpuStackPushOverflowOnly(pFpuCtx);
5906}
5907
5908
5909/**
5910 * Raises a FPU stack overflow exception on a push with a memory operand.
5911 *
5912 * @param pIemCpu The IEM per CPU data.
5913 * @param iEffSeg The effective memory operand selector register.
5914 * @param GCPtrEff The effective memory operand offset.
5915 */
5916DECL_NO_INLINE(IEM_STATIC, void)
5917iemFpuStackPushOverflowWithMemOp(PIEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5918{
5919 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5920 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
5921 iemFpuUpdateDP(pIemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
5922 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
5923 iemFpuStackPushOverflowOnly(pFpuCtx);
5924}
5925
5926
5927IEM_STATIC int iemFpuStRegNotEmpty(PIEMCPU pIemCpu, uint8_t iStReg)
5928{
5929 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5930 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5931 if (pFpuCtx->FTW & RT_BIT(iReg))
5932 return VINF_SUCCESS;
5933 return VERR_NOT_FOUND;
5934}
5935
5936
5937IEM_STATIC int iemFpuStRegNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
5938{
5939 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5940 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5941 if (pFpuCtx->FTW & RT_BIT(iReg))
5942 {
5943 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
5944 return VINF_SUCCESS;
5945 }
5946 return VERR_NOT_FOUND;
5947}
5948
5949
5950IEM_STATIC int iemFpu2StRegsNotEmptyRef(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
5951 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
5952{
5953 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5954 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5955 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5956 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5957 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5958 {
5959 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5960 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
5961 return VINF_SUCCESS;
5962 }
5963 return VERR_NOT_FOUND;
5964}
5965
5966
5967IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
5968{
5969 PX86FXSTATE pFpuCtx = &pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;
5970 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
5971 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
5972 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
5973 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
5974 {
5975 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
5976 return VINF_SUCCESS;
5977 }
5978 return VERR_NOT_FOUND;
5979}
5980
5981
5982/**
5983 * Updates the FPU exception status after FCW is changed.
5984 *
5985 * @param pFpuCtx The FPU context.
5986 */
5987IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
5988{
5989 uint16_t u16Fsw = pFpuCtx->FSW;
5990 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
5991 u16Fsw |= X86_FSW_ES | X86_FSW_B;
5992 else
5993 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
5994 pFpuCtx->FSW = u16Fsw;
5995}
5996
5997
5998/**
5999 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
6000 *
6001 * @returns The full FTW.
6002 * @param pFpuCtx The FPU context.
6003 */
6004IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
6005{
6006 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
6007 uint16_t u16Ftw = 0;
6008 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
6009 for (unsigned iSt = 0; iSt < 8; iSt++)
6010 {
6011 unsigned const iReg = (iSt + iTop) & 7;
6012 if (!(u8Ftw & RT_BIT(iReg)))
6013 u16Ftw |= 3 << (iReg * 2); /* empty */
6014 else
6015 {
6016 uint16_t uTag;
6017 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
6018 if (pr80Reg->s.uExponent == 0x7fff)
6019 uTag = 2; /* Exponent is all 1's => Special. */
6020 else if (pr80Reg->s.uExponent == 0x0000)
6021 {
6022 if (pr80Reg->s.u64Mantissa == 0x0000)
6023 uTag = 1; /* All bits are zero => Zero. */
6024 else
6025 uTag = 2; /* Must be special. */
6026 }
6027 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
6028 uTag = 0; /* Valid. */
6029 else
6030 uTag = 2; /* Must be special. */
6031
6032 u16Ftw |= uTag << (iReg * 2); /* empty */
6033 }
6034 }
6035
6036 return u16Ftw;
6037}
6038
6039
6040/**
6041 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
6042 *
6043 * @returns The compressed FTW.
6044 * @param u16FullFtw The full FTW to convert.
6045 */
6046IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
6047{
6048 uint8_t u8Ftw = 0;
6049 for (unsigned i = 0; i < 8; i++)
6050 {
6051 if ((u16FullFtw & 3) != 3 /*empty*/)
6052 u8Ftw |= RT_BIT(i);
6053 u16FullFtw >>= 2;
6054 }
6055
6056 return u8Ftw;
6057}
6058
6059/** @} */
6060
6061
6062/** @name Memory access.
6063 *
6064 * @{
6065 */
6066
6067
6068/**
6069 * Updates the IEMCPU::cbWritten counter if applicable.
6070 *
6071 * @param pIemCpu The IEM per CPU data.
6072 * @param fAccess The access being accounted for.
6073 * @param cbMem The access size.
6074 */
6075DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PIEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)
6076{
6077 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
6078 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
6079 pIemCpu->cbWritten += (uint32_t)cbMem;
6080}
6081
6082
6083/**
6084 * Checks if the given segment can be written to, raise the appropriate
6085 * exception if not.
6086 *
6087 * @returns VBox strict status code.
6088 *
6089 * @param pIemCpu The IEM per CPU data.
6090 * @param pHid Pointer to the hidden register.
6091 * @param iSegReg The register number.
6092 * @param pu64BaseAddr Where to return the base address to use for the
6093 * segment. (In 64-bit code it may differ from the
6094 * base in the hidden segment.)
6095 */
6096IEM_STATIC VBOXSTRICTRC
6097iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6098{
6099 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6100 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6101 else
6102 {
6103 if (!pHid->Attr.n.u1Present)
6104 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6105
6106 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
6107 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6108 && pIemCpu->enmCpuMode != IEMMODE_64BIT )
6109 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);
6110 *pu64BaseAddr = pHid->u64Base;
6111 }
6112 return VINF_SUCCESS;
6113}
6114
6115
6116/**
6117 * Checks if the given segment can be read from, raise the appropriate
6118 * exception if not.
6119 *
6120 * @returns VBox strict status code.
6121 *
6122 * @param pIemCpu The IEM per CPU data.
6123 * @param pHid Pointer to the hidden register.
6124 * @param iSegReg The register number.
6125 * @param pu64BaseAddr Where to return the base address to use for the
6126 * segment. (In 64-bit code it may differ from the
6127 * base in the hidden segment.)
6128 */
6129IEM_STATIC VBOXSTRICTRC
6130iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
6131{
6132 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6133 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
6134 else
6135 {
6136 if (!pHid->Attr.n.u1Present)
6137 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);
6138
6139 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
6140 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);
6141 *pu64BaseAddr = pHid->u64Base;
6142 }
6143 return VINF_SUCCESS;
6144}
6145
6146
6147/**
6148 * Applies the segment limit, base and attributes.
6149 *
6150 * This may raise a \#GP or \#SS.
6151 *
6152 * @returns VBox strict status code.
6153 *
6154 * @param pIemCpu The IEM per CPU data.
6155 * @param fAccess The kind of access which is being performed.
6156 * @param iSegReg The index of the segment register to apply.
6157 * This is UINT8_MAX if none (for IDT, GDT, LDT,
6158 * TSS, ++).
6159 * @param cbMem The access size.
6160 * @param pGCPtrMem Pointer to the guest memory address to apply
6161 * segmentation to. Input and output parameter.
6162 */
6163IEM_STATIC VBOXSTRICTRC
6164iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
6165{
6166 if (iSegReg == UINT8_MAX)
6167 return VINF_SUCCESS;
6168
6169 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);
6170 switch (pIemCpu->enmCpuMode)
6171 {
6172 case IEMMODE_16BIT:
6173 case IEMMODE_32BIT:
6174 {
6175 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
6176 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
6177
6178 Assert(pSel->Attr.n.u1Present);
6179 Assert(pSel->Attr.n.u1DescType);
6180 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6181 {
6182 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6183 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
6184 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6185
6186 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6187 {
6188 /** @todo CPL check. */
6189 }
6190
6191 /*
6192 * There are two kinds of data selectors, normal and expand down.
6193 */
6194 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6195 {
6196 if ( GCPtrFirst32 > pSel->u32Limit
6197 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6198 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6199 }
6200 else
6201 {
6202 /*
6203 * The upper boundary is defined by the B bit, not the G bit!
6204 */
6205 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6206 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6207 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6208 }
6209 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6210 }
6211 else
6212 {
6213
6214 /*
6215 * Code selector and usually be used to read thru, writing is
6216 * only permitted in real and V8086 mode.
6217 */
6218 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6219 || ( (fAccess & IEM_ACCESS_TYPE_READ)
6220 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
6221 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )
6222 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);
6223
6224 if ( GCPtrFirst32 > pSel->u32Limit
6225 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
6226 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);
6227
6228 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6229 {
6230 /** @todo CPL check. */
6231 }
6232
6233 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
6234 }
6235 return VINF_SUCCESS;
6236 }
6237
6238 case IEMMODE_64BIT:
6239 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
6240 *pGCPtrMem += pSel->u64Base;
6241 return VINF_SUCCESS;
6242
6243 default:
6244 AssertFailedReturn(VERR_IEM_IPE_7);
6245 }
6246}
6247
6248
6249/**
6250 * Translates a virtual address to a physical physical address and checks if we
6251 * can access the page as specified.
6252 *
6253 * @param pIemCpu The IEM per CPU data.
6254 * @param GCPtrMem The virtual address.
6255 * @param fAccess The intended access.
6256 * @param pGCPhysMem Where to return the physical address.
6257 */
6258IEM_STATIC VBOXSTRICTRC
6259iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
6260{
6261 /** @todo Need a different PGM interface here. We're currently using
6262 * generic / REM interfaces. this won't cut it for R0 & RC. */
6263 RTGCPHYS GCPhys;
6264 uint64_t fFlags;
6265 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);
6266 if (RT_FAILURE(rc))
6267 {
6268 /** @todo Check unassigned memory in unpaged mode. */
6269 /** @todo Reserved bits in page tables. Requires new PGM interface. */
6270 *pGCPhysMem = NIL_RTGCPHYS;
6271 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);
6272 }
6273
6274 /* If the page is writable and does not have the no-exec bit set, all
6275 access is allowed. Otherwise we'll have to check more carefully... */
6276 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
6277 {
6278 /* Write to read only memory? */
6279 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
6280 && !(fFlags & X86_PTE_RW)
6281 && ( pIemCpu->uCpl != 0
6282 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))
6283 {
6284 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6285 *pGCPhysMem = NIL_RTGCPHYS;
6286 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6287 }
6288
6289 /* Kernel memory accessed by userland? */
6290 if ( !(fFlags & X86_PTE_US)
6291 && pIemCpu->uCpl == 3
6292 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6293 {
6294 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6295 *pGCPhysMem = NIL_RTGCPHYS;
6296 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6297 }
6298
6299 /* Executing non-executable memory? */
6300 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
6301 && (fFlags & X86_PTE_PAE_NX)
6302 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )
6303 {
6304 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
6305 *pGCPhysMem = NIL_RTGCPHYS;
6306 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
6307 VERR_ACCESS_DENIED);
6308 }
6309 }
6310
6311 /*
6312 * Set the dirty / access flags.
6313 * ASSUMES this is set when the address is translated rather than on committ...
6314 */
6315 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6316 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6317 if ((fFlags & fAccessedDirty) != fAccessedDirty)
6318 {
6319 int rc2 = PGMGstModifyPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6320 AssertRC(rc2);
6321 }
6322
6323 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
6324 *pGCPhysMem = GCPhys;
6325 return VINF_SUCCESS;
6326}
6327
6328
6329
6330/**
6331 * Maps a physical page.
6332 *
6333 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
6334 * @param pIemCpu The IEM per CPU data.
6335 * @param GCPhysMem The physical address.
6336 * @param fAccess The intended access.
6337 * @param ppvMem Where to return the mapping address.
6338 * @param pLock The PGM lock.
6339 */
6340IEM_STATIC int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
6341{
6342#ifdef IEM_VERIFICATION_MODE_FULL
6343 /* Force the alternative path so we can ignore writes. */
6344 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)
6345 {
6346 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6347 {
6348 int rc2 = PGMPhysIemQueryAccess(IEMCPU_TO_VM(pIemCpu), GCPhysMem,
6349 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6350 if (RT_FAILURE(rc2))
6351 pIemCpu->fProblematicMemory = true;
6352 }
6353 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6354 }
6355#endif
6356#ifdef IEM_LOG_MEMORY_WRITES
6357 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6358 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6359#endif
6360#ifdef IEM_VERIFICATION_MODE_MINIMAL
6361 return VERR_PGM_PHYS_TLB_CATCH_ALL;
6362#endif
6363
6364 /** @todo This API may require some improving later. A private deal with PGM
6365 * regarding locking and unlocking needs to be struct. A couple of TLBs
6366 * living in PGM, but with publicly accessible inlined access methods
6367 * could perhaps be an even better solution. */
6368 int rc = PGMPhysIemGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),
6369 GCPhysMem,
6370 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
6371 pIemCpu->fBypassHandlers,
6372 ppvMem,
6373 pLock);
6374 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
6375 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6376
6377#ifdef IEM_VERIFICATION_MODE_FULL
6378 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6379 pIemCpu->fProblematicMemory = true;
6380#endif
6381 return rc;
6382}
6383
6384
6385/**
6386 * Unmap a page previously mapped by iemMemPageMap.
6387 *
6388 * @param pIemCpu The IEM per CPU data.
6389 * @param GCPhysMem The physical address.
6390 * @param fAccess The intended access.
6391 * @param pvMem What iemMemPageMap returned.
6392 * @param pLock The PGM lock.
6393 */
6394DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
6395{
6396 NOREF(pIemCpu);
6397 NOREF(GCPhysMem);
6398 NOREF(fAccess);
6399 NOREF(pvMem);
6400 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), pLock);
6401}
6402
6403
6404/**
6405 * Looks up a memory mapping entry.
6406 *
6407 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6408 * @param pIemCpu The IEM per CPU data.
6409 * @param pvMem The memory address.
6410 * @param fAccess The access to.
6411 */
6412DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
6413{
6414 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6415 if ( pIemCpu->aMemMappings[0].pv == pvMem
6416 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6417 return 0;
6418 if ( pIemCpu->aMemMappings[1].pv == pvMem
6419 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6420 return 1;
6421 if ( pIemCpu->aMemMappings[2].pv == pvMem
6422 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6423 return 2;
6424 return VERR_NOT_FOUND;
6425}
6426
6427
6428/**
6429 * Finds a free memmap entry when using iNextMapping doesn't work.
6430 *
6431 * @returns Memory mapping index, 1024 on failure.
6432 * @param pIemCpu The IEM per CPU data.
6433 */
6434IEM_STATIC unsigned iemMemMapFindFree(PIEMCPU pIemCpu)
6435{
6436 /*
6437 * The easy case.
6438 */
6439 if (pIemCpu->cActiveMappings == 0)
6440 {
6441 pIemCpu->iNextMapping = 1;
6442 return 0;
6443 }
6444
6445 /* There should be enough mappings for all instructions. */
6446 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);
6447
6448 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++)
6449 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6450 return i;
6451
6452 AssertFailedReturn(1024);
6453}
6454
6455
6456/**
6457 * Commits a bounce buffer that needs writing back and unmaps it.
6458 *
6459 * @returns Strict VBox status code.
6460 * @param pIemCpu The IEM per CPU data.
6461 * @param iMemMap The index of the buffer to commit.
6462 */
6463IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)
6464{
6465 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6466 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6467
6468 /*
6469 * Do the writing.
6470 */
6471#ifndef IEM_VERIFICATION_MODE_MINIMAL
6472 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6473 if ( !pIemCpu->aMemBbMappings[iMemMap].fUnassigned
6474 && !IEM_VERIFICATION_ENABLED(pIemCpu))
6475 {
6476 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6477 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6478 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6479 if (!pIemCpu->fBypassHandlers)
6480 {
6481 /*
6482 * Carefully and efficiently dealing with access handler return
6483 * codes make this a little bloated.
6484 */
6485 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6486 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6487 pbBuf,
6488 cbFirst,
6489 PGMACCESSORIGIN_IEM);
6490 if (rcStrict == VINF_SUCCESS)
6491 {
6492 if (cbSecond)
6493 {
6494 rcStrict = PGMPhysWrite(pVM,
6495 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6496 pbBuf + cbFirst,
6497 cbSecond,
6498 PGMACCESSORIGIN_IEM);
6499 if (rcStrict == VINF_SUCCESS)
6500 { /* nothing */ }
6501 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6502 {
6503 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6504 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6505 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6506 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6507 }
6508 else
6509 {
6510 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6511 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6512 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6513 return rcStrict;
6514 }
6515 }
6516 }
6517 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6518 {
6519 if (!cbSecond)
6520 {
6521 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6522 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6523 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6524 }
6525 else
6526 {
6527 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6528 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6529 pbBuf + cbFirst,
6530 cbSecond,
6531 PGMACCESSORIGIN_IEM);
6532 if (rcStrict2 == VINF_SUCCESS)
6533 {
6534 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6535 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6536 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6537 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6538 }
6539 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6540 {
6541 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6542 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6543 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6544 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6545 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6546 }
6547 else
6548 {
6549 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6550 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6551 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6552 return rcStrict2;
6553 }
6554 }
6555 }
6556 else
6557 {
6558 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6559 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6560 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6561 return rcStrict;
6562 }
6563 }
6564 else
6565 {
6566 /*
6567 * No access handlers, much simpler.
6568 */
6569 int rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6570 if (RT_SUCCESS(rc))
6571 {
6572 if (cbSecond)
6573 {
6574 rc = PGMPhysSimpleWriteGCPhys(pVM, pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6575 if (RT_SUCCESS(rc))
6576 { /* likely */ }
6577 else
6578 {
6579 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6580 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6581 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6582 return rc;
6583 }
6584 }
6585 }
6586 else
6587 {
6588 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6589 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6590 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6591 return rc;
6592 }
6593 }
6594 }
6595#endif
6596
6597#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6598 /*
6599 * Record the write(s).
6600 */
6601 if (!pIemCpu->fNoRem)
6602 {
6603 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6604 if (pEvtRec)
6605 {
6606 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6607 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;
6608 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;
6609 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
6610 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pIemCpu->aBounceBuffers[0].ab));
6611 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6612 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6613 }
6614 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6615 {
6616 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6617 if (pEvtRec)
6618 {
6619 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
6620 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;
6621 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6622 memcpy(pEvtRec->u.RamWrite.ab,
6623 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],
6624 pIemCpu->aMemBbMappings[iMemMap].cbSecond);
6625 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6626 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6627 }
6628 }
6629 }
6630#endif
6631#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
6632 Log(("IEM Wrote %RGp: %.*Rhxs\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,
6633 RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
6634 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)
6635 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,
6636 RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbSecond, 64),
6637 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));
6638
6639 size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
6640 g_cbIemWrote = cbWrote;
6641 memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6642#endif
6643
6644 /*
6645 * Free the mapping entry.
6646 */
6647 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6648 Assert(pIemCpu->cActiveMappings != 0);
6649 pIemCpu->cActiveMappings--;
6650 return VINF_SUCCESS;
6651}
6652
6653
6654/**
6655 * iemMemMap worker that deals with a request crossing pages.
6656 */
6657IEM_STATIC VBOXSTRICTRC
6658iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6659{
6660 /*
6661 * Do the address translations.
6662 */
6663 RTGCPHYS GCPhysFirst;
6664 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
6665 if (rcStrict != VINF_SUCCESS)
6666 return rcStrict;
6667
6668/** @todo Testcase & AMD-V/VT-x verification: Check if CR2 should really be the
6669 * last byte. */
6670 RTGCPHYS GCPhysSecond;
6671 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
6672 if (rcStrict != VINF_SUCCESS)
6673 return rcStrict;
6674 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
6675
6676 PVM pVM = IEMCPU_TO_VM(pIemCpu);
6677#ifdef IEM_VERIFICATION_MODE_FULL
6678 /*
6679 * Detect problematic memory when verifying so we can select
6680 * the right execution engine. (TLB: Redo this.)
6681 */
6682 if (IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
6683 {
6684 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6685 if (RT_SUCCESS(rc2))
6686 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pIemCpu->fBypassHandlers);
6687 if (RT_FAILURE(rc2))
6688 pIemCpu->fProblematicMemory = true;
6689 }
6690#endif
6691
6692
6693 /*
6694 * Read in the current memory content if it's a read, execute or partial
6695 * write access.
6696 */
6697 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6698 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
6699 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
6700
6701 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6702 {
6703 if (!pIemCpu->fBypassHandlers)
6704 {
6705 /*
6706 * Must carefully deal with access handler status codes here,
6707 * makes the code a bit bloated.
6708 */
6709 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6710 if (rcStrict == VINF_SUCCESS)
6711 {
6712 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6713 if (rcStrict == VINF_SUCCESS)
6714 { /*likely */ }
6715 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6716 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6717 else
6718 {
6719 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6720 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6721 return rcStrict;
6722 }
6723 }
6724 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6725 {
6726 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6727 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6728 {
6729 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6730 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6731 }
6732 else
6733 {
6734 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6735 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6736 return rcStrict2;
6737 }
6738 }
6739 else
6740 {
6741 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6742 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6743 return rcStrict;
6744 }
6745 }
6746 else
6747 {
6748 /*
6749 * No informational status codes here, much more straight forward.
6750 */
6751 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6752 if (RT_SUCCESS(rc))
6753 {
6754 Assert(rc == VINF_SUCCESS);
6755 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6756 if (RT_SUCCESS(rc))
6757 Assert(rc == VINF_SUCCESS);
6758 else
6759 {
6760 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6761 return rc;
6762 }
6763 }
6764 else
6765 {
6766 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6767 return rc;
6768 }
6769 }
6770
6771#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6772 if ( !pIemCpu->fNoRem
6773 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6774 {
6775 /*
6776 * Record the reads.
6777 */
6778 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6779 if (pEvtRec)
6780 {
6781 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6782 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6783 pEvtRec->u.RamRead.cb = cbFirstPage;
6784 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6785 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6786 }
6787 pEvtRec = iemVerifyAllocRecord(pIemCpu);
6788 if (pEvtRec)
6789 {
6790 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6791 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
6792 pEvtRec->u.RamRead.cb = cbSecondPage;
6793 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6794 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6795 }
6796 }
6797#endif
6798 }
6799#ifdef VBOX_STRICT
6800 else
6801 memset(pbBuf, 0xcc, cbMem);
6802 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6803 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6804#endif
6805
6806 /*
6807 * Commit the bounce buffer entry.
6808 */
6809 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6810 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6811 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6812 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6813 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;
6814 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6815 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6816 pIemCpu->iNextMapping = iMemMap + 1;
6817 pIemCpu->cActiveMappings++;
6818
6819 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6820 *ppvMem = pbBuf;
6821 return VINF_SUCCESS;
6822}
6823
6824
6825/**
6826 * iemMemMap woker that deals with iemMemPageMap failures.
6827 */
6828IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6829 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6830{
6831 /*
6832 * Filter out conditions we can handle and the ones which shouldn't happen.
6833 */
6834 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6835 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6836 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6837 {
6838 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6839 return rcMap;
6840 }
6841 pIemCpu->cPotentialExits++;
6842
6843 /*
6844 * Read in the current memory content if it's a read, execute or partial
6845 * write access.
6846 */
6847 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];
6848 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6849 {
6850 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6851 memset(pbBuf, 0xff, cbMem);
6852 else
6853 {
6854 int rc;
6855 if (!pIemCpu->fBypassHandlers)
6856 {
6857 VBOXSTRICTRC rcStrict = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6858 if (rcStrict == VINF_SUCCESS)
6859 { /* nothing */ }
6860 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6861 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
6862 else
6863 {
6864 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6865 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6866 return rcStrict;
6867 }
6868 }
6869 else
6870 {
6871 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);
6872 if (RT_SUCCESS(rc))
6873 { /* likely */ }
6874 else
6875 {
6876 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6877 GCPhysFirst, rc));
6878 return rc;
6879 }
6880 }
6881 }
6882
6883#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
6884 if ( !pIemCpu->fNoRem
6885 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
6886 {
6887 /*
6888 * Record the read.
6889 */
6890 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
6891 if (pEvtRec)
6892 {
6893 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
6894 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
6895 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
6896 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
6897 *pIemCpu->ppIemEvtRecNext = pEvtRec;
6898 }
6899 }
6900#endif
6901 }
6902#ifdef VBOX_STRICT
6903 else
6904 memset(pbBuf, 0xcc, cbMem);
6905#endif
6906#ifdef VBOX_STRICT
6907 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))
6908 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);
6909#endif
6910
6911 /*
6912 * Commit the bounce buffer entry.
6913 */
6914 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6915 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6916 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6917 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;
6918 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6919 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;
6920 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6921 pIemCpu->iNextMapping = iMemMap + 1;
6922 pIemCpu->cActiveMappings++;
6923
6924 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
6925 *ppvMem = pbBuf;
6926 return VINF_SUCCESS;
6927}
6928
6929
6930
6931/**
6932 * Maps the specified guest memory for the given kind of access.
6933 *
6934 * This may be using bounce buffering of the memory if it's crossing a page
6935 * boundary or if there is an access handler installed for any of it. Because
6936 * of lock prefix guarantees, we're in for some extra clutter when this
6937 * happens.
6938 *
6939 * This may raise a \#GP, \#SS, \#PF or \#AC.
6940 *
6941 * @returns VBox strict status code.
6942 *
6943 * @param pIemCpu The IEM per CPU data.
6944 * @param ppvMem Where to return the pointer to the mapped
6945 * memory.
6946 * @param cbMem The number of bytes to map. This is usually 1,
6947 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6948 * string operations it can be up to a page.
6949 * @param iSegReg The index of the segment register to use for
6950 * this access. The base and limits are checked.
6951 * Use UINT8_MAX to indicate that no segmentation
6952 * is required (for IDT, GDT and LDT accesses).
6953 * @param GCPtrMem The address of the guest memory.
6954 * @param fAccess How the memory is being accessed. The
6955 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6956 * how to map the memory, while the
6957 * IEM_ACCESS_WHAT_XXX bit is used when raising
6958 * exceptions.
6959 */
6960IEM_STATIC VBOXSTRICTRC
6961iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
6962{
6963 /*
6964 * Check the input and figure out which mapping entry to use.
6965 */
6966 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6967 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6968
6969 unsigned iMemMap = pIemCpu->iNextMapping;
6970 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings)
6971 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6972 {
6973 iMemMap = iemMemMapFindFree(pIemCpu);
6974 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings),
6975 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings,
6976 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,
6977 pIemCpu->aMemMappings[2].fAccess),
6978 VERR_IEM_IPE_9);
6979 }
6980
6981 /*
6982 * Map the memory, checking that we can actually access it. If something
6983 * slightly complicated happens, fall back on bounce buffering.
6984 */
6985 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6986 if (rcStrict != VINF_SUCCESS)
6987 return rcStrict;
6988
6989 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
6990 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6991
6992 RTGCPHYS GCPhysFirst;
6993 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);
6994 if (rcStrict != VINF_SUCCESS)
6995 return rcStrict;
6996
6997 void *pvMem;
6998 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
6999 if (rcStrict != VINF_SUCCESS)
7000 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
7001
7002 /*
7003 * Fill in the mapping table entry.
7004 */
7005 pIemCpu->aMemMappings[iMemMap].pv = pvMem;
7006 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;
7007 pIemCpu->iNextMapping = iMemMap + 1;
7008 pIemCpu->cActiveMappings++;
7009
7010 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem);
7011 *ppvMem = pvMem;
7012 return VINF_SUCCESS;
7013}
7014
7015
7016/**
7017 * Commits the guest memory if bounce buffered and unmaps it.
7018 *
7019 * @returns Strict VBox status code.
7020 * @param pIemCpu The IEM per CPU data.
7021 * @param pvMem The mapping.
7022 * @param fAccess The kind of access.
7023 */
7024IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)
7025{
7026 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);
7027 AssertReturn(iMemMap >= 0, iMemMap);
7028
7029 /* If it's bounce buffered, we may need to write back the buffer. */
7030 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7031 {
7032 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7033 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);
7034 }
7035 /* Otherwise unlock it. */
7036 else
7037 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7038
7039 /* Free the entry. */
7040 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7041 Assert(pIemCpu->cActiveMappings != 0);
7042 pIemCpu->cActiveMappings--;
7043 return VINF_SUCCESS;
7044}
7045
7046
7047/**
7048 * Rollbacks mappings, releasing page locks and such.
7049 *
7050 * The caller shall only call this after checking cActiveMappings.
7051 *
7052 * @returns Strict VBox status code to pass up.
7053 * @param pIemCpu The IEM per CPU data.
7054 */
7055IEM_STATIC void iemMemRollback(PIEMCPU pIemCpu)
7056{
7057 Assert(pIemCpu->cActiveMappings > 0);
7058
7059 uint32_t iMemMap = RT_ELEMENTS(pIemCpu->aMemMappings);
7060 while (iMemMap-- > 0)
7061 {
7062 uint32_t fAccess = pIemCpu->aMemMappings[iMemMap].fAccess;
7063 if (fAccess != IEM_ACCESS_INVALID)
7064 {
7065 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7066 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
7067 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);
7068 Assert(pIemCpu->cActiveMappings > 0);
7069 pIemCpu->cActiveMappings--;
7070 }
7071 }
7072}
7073
7074
7075/**
7076 * Fetches a data byte.
7077 *
7078 * @returns Strict VBox status code.
7079 * @param pIemCpu The IEM per CPU data.
7080 * @param pu8Dst Where to return the byte.
7081 * @param iSegReg The index of the segment register to use for
7082 * this access. The base and limits are checked.
7083 * @param GCPtrMem The address of the guest memory.
7084 */
7085IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7086{
7087 /* The lazy approach for now... */
7088 uint8_t const *pu8Src;
7089 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7090 if (rc == VINF_SUCCESS)
7091 {
7092 *pu8Dst = *pu8Src;
7093 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7094 }
7095 return rc;
7096}
7097
7098
7099/**
7100 * Fetches a data word.
7101 *
7102 * @returns Strict VBox status code.
7103 * @param pIemCpu The IEM per CPU data.
7104 * @param pu16Dst Where to return the word.
7105 * @param iSegReg The index of the segment register to use for
7106 * this access. The base and limits are checked.
7107 * @param GCPtrMem The address of the guest memory.
7108 */
7109IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7110{
7111 /* The lazy approach for now... */
7112 uint16_t const *pu16Src;
7113 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7114 if (rc == VINF_SUCCESS)
7115 {
7116 *pu16Dst = *pu16Src;
7117 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7118 }
7119 return rc;
7120}
7121
7122
7123/**
7124 * Fetches a data dword.
7125 *
7126 * @returns Strict VBox status code.
7127 * @param pIemCpu The IEM per CPU data.
7128 * @param pu32Dst Where to return the dword.
7129 * @param iSegReg The index of the segment register to use for
7130 * this access. The base and limits are checked.
7131 * @param GCPtrMem The address of the guest memory.
7132 */
7133IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7134{
7135 /* The lazy approach for now... */
7136 uint32_t const *pu32Src;
7137 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7138 if (rc == VINF_SUCCESS)
7139 {
7140 *pu32Dst = *pu32Src;
7141 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7142 }
7143 return rc;
7144}
7145
7146
7147#ifdef SOME_UNUSED_FUNCTION
7148/**
7149 * Fetches a data dword and sign extends it to a qword.
7150 *
7151 * @returns Strict VBox status code.
7152 * @param pIemCpu The IEM per CPU data.
7153 * @param pu64Dst Where to return the sign extended value.
7154 * @param iSegReg The index of the segment register to use for
7155 * this access. The base and limits are checked.
7156 * @param GCPtrMem The address of the guest memory.
7157 */
7158IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7159{
7160 /* The lazy approach for now... */
7161 int32_t const *pi32Src;
7162 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7163 if (rc == VINF_SUCCESS)
7164 {
7165 *pu64Dst = *pi32Src;
7166 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7167 }
7168#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7169 else
7170 *pu64Dst = 0;
7171#endif
7172 return rc;
7173}
7174#endif
7175
7176
7177/**
7178 * Fetches a data qword.
7179 *
7180 * @returns Strict VBox status code.
7181 * @param pIemCpu The IEM per CPU data.
7182 * @param pu64Dst Where to return the qword.
7183 * @param iSegReg The index of the segment register to use for
7184 * this access. The base and limits are checked.
7185 * @param GCPtrMem The address of the guest memory.
7186 */
7187IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7188{
7189 /* The lazy approach for now... */
7190 uint64_t const *pu64Src;
7191 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7192 if (rc == VINF_SUCCESS)
7193 {
7194 *pu64Dst = *pu64Src;
7195 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7196 }
7197 return rc;
7198}
7199
7200
7201/**
7202 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7203 *
7204 * @returns Strict VBox status code.
7205 * @param pIemCpu The IEM per CPU data.
7206 * @param pu64Dst Where to return the qword.
7207 * @param iSegReg The index of the segment register to use for
7208 * this access. The base and limits are checked.
7209 * @param GCPtrMem The address of the guest memory.
7210 */
7211IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7212{
7213 /* The lazy approach for now... */
7214 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7215 if (RT_UNLIKELY(GCPtrMem & 15))
7216 return iemRaiseGeneralProtectionFault0(pIemCpu);
7217
7218 uint64_t const *pu64Src;
7219 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7220 if (rc == VINF_SUCCESS)
7221 {
7222 *pu64Dst = *pu64Src;
7223 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7224 }
7225 return rc;
7226}
7227
7228
7229/**
7230 * Fetches a data tword.
7231 *
7232 * @returns Strict VBox status code.
7233 * @param pIemCpu The IEM per CPU data.
7234 * @param pr80Dst Where to return the tword.
7235 * @param iSegReg The index of the segment register to use for
7236 * this access. The base and limits are checked.
7237 * @param GCPtrMem The address of the guest memory.
7238 */
7239IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7240{
7241 /* The lazy approach for now... */
7242 PCRTFLOAT80U pr80Src;
7243 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7244 if (rc == VINF_SUCCESS)
7245 {
7246 *pr80Dst = *pr80Src;
7247 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7248 }
7249 return rc;
7250}
7251
7252
7253/**
7254 * Fetches a data dqword (double qword), generally SSE related.
7255 *
7256 * @returns Strict VBox status code.
7257 * @param pIemCpu The IEM per CPU data.
7258 * @param pu128Dst Where to return the qword.
7259 * @param iSegReg The index of the segment register to use for
7260 * this access. The base and limits are checked.
7261 * @param GCPtrMem The address of the guest memory.
7262 */
7263IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7264{
7265 /* The lazy approach for now... */
7266 uint128_t const *pu128Src;
7267 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7268 if (rc == VINF_SUCCESS)
7269 {
7270 *pu128Dst = *pu128Src;
7271 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7272 }
7273 return rc;
7274}
7275
7276
7277/**
7278 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7279 * related.
7280 *
7281 * Raises \#GP(0) if not aligned.
7282 *
7283 * @returns Strict VBox status code.
7284 * @param pIemCpu The IEM per CPU data.
7285 * @param pu128Dst Where to return the qword.
7286 * @param iSegReg The index of the segment register to use for
7287 * this access. The base and limits are checked.
7288 * @param GCPtrMem The address of the guest memory.
7289 */
7290IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
7291{
7292 /* The lazy approach for now... */
7293 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
7294 if ( (GCPtrMem & 15)
7295 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7296 return iemRaiseGeneralProtectionFault0(pIemCpu);
7297
7298 uint128_t const *pu128Src;
7299 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7300 if (rc == VINF_SUCCESS)
7301 {
7302 *pu128Dst = *pu128Src;
7303 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7304 }
7305 return rc;
7306}
7307
7308
7309
7310
7311/**
7312 * Fetches a descriptor register (lgdt, lidt).
7313 *
7314 * @returns Strict VBox status code.
7315 * @param pIemCpu The IEM per CPU data.
7316 * @param pcbLimit Where to return the limit.
7317 * @param pGCPtrBase Where to return the base.
7318 * @param iSegReg The index of the segment register to use for
7319 * this access. The base and limits are checked.
7320 * @param GCPtrMem The address of the guest memory.
7321 * @param enmOpSize The effective operand size.
7322 */
7323IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7324 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7325{
7326 uint8_t const *pu8Src;
7327 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7328 (void **)&pu8Src,
7329 enmOpSize == IEMMODE_64BIT
7330 ? 2 + 8
7331 : enmOpSize == IEMMODE_32BIT
7332 ? 2 + 4
7333 : 2 + 3,
7334 iSegReg,
7335 GCPtrMem,
7336 IEM_ACCESS_DATA_R);
7337 if (rcStrict == VINF_SUCCESS)
7338 {
7339 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);
7340 switch (enmOpSize)
7341 {
7342 case IEMMODE_16BIT:
7343 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);
7344 break;
7345 case IEMMODE_32BIT:
7346 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);
7347 break;
7348 case IEMMODE_64BIT:
7349 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],
7350 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);
7351 break;
7352
7353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7354 }
7355 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
7356 }
7357 return rcStrict;
7358}
7359
7360
7361
7362/**
7363 * Stores a data byte.
7364 *
7365 * @returns Strict VBox status code.
7366 * @param pIemCpu The IEM per CPU data.
7367 * @param iSegReg The index of the segment register to use for
7368 * this access. The base and limits are checked.
7369 * @param GCPtrMem The address of the guest memory.
7370 * @param u8Value The value to store.
7371 */
7372IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
7373{
7374 /* The lazy approach for now... */
7375 uint8_t *pu8Dst;
7376 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7377 if (rc == VINF_SUCCESS)
7378 {
7379 *pu8Dst = u8Value;
7380 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);
7381 }
7382 return rc;
7383}
7384
7385
7386/**
7387 * Stores a data word.
7388 *
7389 * @returns Strict VBox status code.
7390 * @param pIemCpu The IEM per CPU data.
7391 * @param iSegReg The index of the segment register to use for
7392 * this access. The base and limits are checked.
7393 * @param GCPtrMem The address of the guest memory.
7394 * @param u16Value The value to store.
7395 */
7396IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
7397{
7398 /* The lazy approach for now... */
7399 uint16_t *pu16Dst;
7400 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7401 if (rc == VINF_SUCCESS)
7402 {
7403 *pu16Dst = u16Value;
7404 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);
7405 }
7406 return rc;
7407}
7408
7409
7410/**
7411 * Stores a data dword.
7412 *
7413 * @returns Strict VBox status code.
7414 * @param pIemCpu The IEM per CPU data.
7415 * @param iSegReg The index of the segment register to use for
7416 * this access. The base and limits are checked.
7417 * @param GCPtrMem The address of the guest memory.
7418 * @param u32Value The value to store.
7419 */
7420IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
7421{
7422 /* The lazy approach for now... */
7423 uint32_t *pu32Dst;
7424 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7425 if (rc == VINF_SUCCESS)
7426 {
7427 *pu32Dst = u32Value;
7428 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);
7429 }
7430 return rc;
7431}
7432
7433
7434/**
7435 * Stores a data qword.
7436 *
7437 * @returns Strict VBox status code.
7438 * @param pIemCpu The IEM per CPU data.
7439 * @param iSegReg The index of the segment register to use for
7440 * this access. The base and limits are checked.
7441 * @param GCPtrMem The address of the guest memory.
7442 * @param u64Value The value to store.
7443 */
7444IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
7445{
7446 /* The lazy approach for now... */
7447 uint64_t *pu64Dst;
7448 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7449 if (rc == VINF_SUCCESS)
7450 {
7451 *pu64Dst = u64Value;
7452 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);
7453 }
7454 return rc;
7455}
7456
7457
7458/**
7459 * Stores a data dqword.
7460 *
7461 * @returns Strict VBox status code.
7462 * @param pIemCpu The IEM per CPU data.
7463 * @param iSegReg The index of the segment register to use for
7464 * this access. The base and limits are checked.
7465 * @param GCPtrMem The address of the guest memory.
7466 * @param u128Value The value to store.
7467 */
7468IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7469{
7470 /* The lazy approach for now... */
7471 uint128_t *pu128Dst;
7472 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7473 if (rc == VINF_SUCCESS)
7474 {
7475 *pu128Dst = u128Value;
7476 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7477 }
7478 return rc;
7479}
7480
7481
7482/**
7483 * Stores a data dqword, SSE aligned.
7484 *
7485 * @returns Strict VBox status code.
7486 * @param pIemCpu The IEM per CPU data.
7487 * @param iSegReg The index of the segment register to use for
7488 * this access. The base and limits are checked.
7489 * @param GCPtrMem The address of the guest memory.
7490 * @param u128Value The value to store.
7491 */
7492IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
7493{
7494 /* The lazy approach for now... */
7495 if ( (GCPtrMem & 15)
7496 && !(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7497 return iemRaiseGeneralProtectionFault0(pIemCpu);
7498
7499 uint128_t *pu128Dst;
7500 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7501 if (rc == VINF_SUCCESS)
7502 {
7503 *pu128Dst = u128Value;
7504 rc = iemMemCommitAndUnmap(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W);
7505 }
7506 return rc;
7507}
7508
7509
7510/**
7511 * Stores a descriptor register (sgdt, sidt).
7512 *
7513 * @returns Strict VBox status code.
7514 * @param pIemCpu The IEM per CPU data.
7515 * @param cbLimit The limit.
7516 * @param GCPtrBase The base address.
7517 * @param iSegReg The index of the segment register to use for
7518 * this access. The base and limits are checked.
7519 * @param GCPtrMem The address of the guest memory.
7520 * @param enmOpSize The effective operand size.
7521 */
7522IEM_STATIC VBOXSTRICTRC
7523iemMemStoreDataXdtr(PIEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)
7524{
7525 uint8_t *pu8Src;
7526 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,
7527 (void **)&pu8Src,
7528 enmOpSize == IEMMODE_64BIT
7529 ? 2 + 8
7530 : enmOpSize == IEMMODE_32BIT
7531 ? 2 + 4
7532 : 2 + 3,
7533 iSegReg,
7534 GCPtrMem,
7535 IEM_ACCESS_DATA_W);
7536 if (rcStrict == VINF_SUCCESS)
7537 {
7538 pu8Src[0] = RT_BYTE1(cbLimit);
7539 pu8Src[1] = RT_BYTE2(cbLimit);
7540 pu8Src[2] = RT_BYTE1(GCPtrBase);
7541 pu8Src[3] = RT_BYTE2(GCPtrBase);
7542 pu8Src[4] = RT_BYTE3(GCPtrBase);
7543 if (enmOpSize == IEMMODE_16BIT)
7544 pu8Src[5] = 0; /* Note! the 286 stored 0xff here. */
7545 else
7546 {
7547 pu8Src[5] = RT_BYTE4(GCPtrBase);
7548 if (enmOpSize == IEMMODE_64BIT)
7549 {
7550 pu8Src[6] = RT_BYTE5(GCPtrBase);
7551 pu8Src[7] = RT_BYTE6(GCPtrBase);
7552 pu8Src[8] = RT_BYTE7(GCPtrBase);
7553 pu8Src[9] = RT_BYTE8(GCPtrBase);
7554 }
7555 }
7556 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_W);
7557 }
7558 return rcStrict;
7559}
7560
7561
7562/**
7563 * Pushes a word onto the stack.
7564 *
7565 * @returns Strict VBox status code.
7566 * @param pIemCpu The IEM per CPU data.
7567 * @param u16Value The value to push.
7568 */
7569IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)
7570{
7571 /* Increment the stack pointer. */
7572 uint64_t uNewRsp;
7573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7574 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 2, &uNewRsp);
7575
7576 /* Write the word the lazy way. */
7577 uint16_t *pu16Dst;
7578 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7579 if (rc == VINF_SUCCESS)
7580 {
7581 *pu16Dst = u16Value;
7582 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7583 }
7584
7585 /* Commit the new RSP value unless we an access handler made trouble. */
7586 if (rc == VINF_SUCCESS)
7587 pCtx->rsp = uNewRsp;
7588
7589 return rc;
7590}
7591
7592
7593/**
7594 * Pushes a dword onto the stack.
7595 *
7596 * @returns Strict VBox status code.
7597 * @param pIemCpu The IEM per CPU data.
7598 * @param u32Value The value to push.
7599 */
7600IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)
7601{
7602 /* Increment the stack pointer. */
7603 uint64_t uNewRsp;
7604 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7605 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7606
7607 /* Write the dword the lazy way. */
7608 uint32_t *pu32Dst;
7609 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7610 if (rc == VINF_SUCCESS)
7611 {
7612 *pu32Dst = u32Value;
7613 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7614 }
7615
7616 /* Commit the new RSP value unless we an access handler made trouble. */
7617 if (rc == VINF_SUCCESS)
7618 pCtx->rsp = uNewRsp;
7619
7620 return rc;
7621}
7622
7623
7624/**
7625 * Pushes a dword segment register value onto the stack.
7626 *
7627 * @returns Strict VBox status code.
7628 * @param pIemCpu The IEM per CPU data.
7629 * @param u32Value The value to push.
7630 */
7631IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PIEMCPU pIemCpu, uint32_t u32Value)
7632{
7633 /* Increment the stack pointer. */
7634 uint64_t uNewRsp;
7635 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7636 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 4, &uNewRsp);
7637
7638 VBOXSTRICTRC rc;
7639 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
7640 {
7641 /* The recompiler writes a full dword. */
7642 uint32_t *pu32Dst;
7643 rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7644 if (rc == VINF_SUCCESS)
7645 {
7646 *pu32Dst = u32Value;
7647 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7648 }
7649 }
7650 else
7651 {
7652 /* The intel docs talks about zero extending the selector register
7653 value. My actual intel CPU here might be zero extending the value
7654 but it still only writes the lower word... */
7655 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7656 * happens when crossing an electric page boundrary, is the high word
7657 * checked for write accessibility or not? Probably it is. What about
7658 * segment limits? */
7659 uint16_t *pu16Dst;
7660 rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7661 if (rc == VINF_SUCCESS)
7662 {
7663 *pu16Dst = (uint16_t)u32Value;
7664 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7665 }
7666 }
7667
7668 /* Commit the new RSP value unless we an access handler made trouble. */
7669 if (rc == VINF_SUCCESS)
7670 pCtx->rsp = uNewRsp;
7671
7672 return rc;
7673}
7674
7675
7676/**
7677 * Pushes a qword onto the stack.
7678 *
7679 * @returns Strict VBox status code.
7680 * @param pIemCpu The IEM per CPU data.
7681 * @param u64Value The value to push.
7682 */
7683IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)
7684{
7685 /* Increment the stack pointer. */
7686 uint64_t uNewRsp;
7687 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7688 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, 8, &uNewRsp);
7689
7690 /* Write the word the lazy way. */
7691 uint64_t *pu64Dst;
7692 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7693 if (rc == VINF_SUCCESS)
7694 {
7695 *pu64Dst = u64Value;
7696 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7697 }
7698
7699 /* Commit the new RSP value unless we an access handler made trouble. */
7700 if (rc == VINF_SUCCESS)
7701 pCtx->rsp = uNewRsp;
7702
7703 return rc;
7704}
7705
7706
7707/**
7708 * Pops a word from the stack.
7709 *
7710 * @returns Strict VBox status code.
7711 * @param pIemCpu The IEM per CPU data.
7712 * @param pu16Value Where to store the popped value.
7713 */
7714IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)
7715{
7716 /* Increment the stack pointer. */
7717 uint64_t uNewRsp;
7718 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7719 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 2, &uNewRsp);
7720
7721 /* Write the word the lazy way. */
7722 uint16_t const *pu16Src;
7723 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7724 if (rc == VINF_SUCCESS)
7725 {
7726 *pu16Value = *pu16Src;
7727 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7728
7729 /* Commit the new RSP value. */
7730 if (rc == VINF_SUCCESS)
7731 pCtx->rsp = uNewRsp;
7732 }
7733
7734 return rc;
7735}
7736
7737
7738/**
7739 * Pops a dword from the stack.
7740 *
7741 * @returns Strict VBox status code.
7742 * @param pIemCpu The IEM per CPU data.
7743 * @param pu32Value Where to store the popped value.
7744 */
7745IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)
7746{
7747 /* Increment the stack pointer. */
7748 uint64_t uNewRsp;
7749 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7750 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 4, &uNewRsp);
7751
7752 /* Write the word the lazy way. */
7753 uint32_t const *pu32Src;
7754 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7755 if (rc == VINF_SUCCESS)
7756 {
7757 *pu32Value = *pu32Src;
7758 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7759
7760 /* Commit the new RSP value. */
7761 if (rc == VINF_SUCCESS)
7762 pCtx->rsp = uNewRsp;
7763 }
7764
7765 return rc;
7766}
7767
7768
7769/**
7770 * Pops a qword from the stack.
7771 *
7772 * @returns Strict VBox status code.
7773 * @param pIemCpu The IEM per CPU data.
7774 * @param pu64Value Where to store the popped value.
7775 */
7776IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)
7777{
7778 /* Increment the stack pointer. */
7779 uint64_t uNewRsp;
7780 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7781 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, 8, &uNewRsp);
7782
7783 /* Write the word the lazy way. */
7784 uint64_t const *pu64Src;
7785 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7786 if (rc == VINF_SUCCESS)
7787 {
7788 *pu64Value = *pu64Src;
7789 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7790
7791 /* Commit the new RSP value. */
7792 if (rc == VINF_SUCCESS)
7793 pCtx->rsp = uNewRsp;
7794 }
7795
7796 return rc;
7797}
7798
7799
7800/**
7801 * Pushes a word onto the stack, using a temporary stack pointer.
7802 *
7803 * @returns Strict VBox status code.
7804 * @param pIemCpu The IEM per CPU data.
7805 * @param u16Value The value to push.
7806 * @param pTmpRsp Pointer to the temporary stack pointer.
7807 */
7808IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
7809{
7810 /* Increment the stack pointer. */
7811 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7812 RTUINT64U NewRsp = *pTmpRsp;
7813 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 2);
7814
7815 /* Write the word the lazy way. */
7816 uint16_t *pu16Dst;
7817 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7818 if (rc == VINF_SUCCESS)
7819 {
7820 *pu16Dst = u16Value;
7821 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);
7822 }
7823
7824 /* Commit the new RSP value unless we an access handler made trouble. */
7825 if (rc == VINF_SUCCESS)
7826 *pTmpRsp = NewRsp;
7827
7828 return rc;
7829}
7830
7831
7832/**
7833 * Pushes a dword onto the stack, using a temporary stack pointer.
7834 *
7835 * @returns Strict VBox status code.
7836 * @param pIemCpu The IEM per CPU data.
7837 * @param u32Value The value to push.
7838 * @param pTmpRsp Pointer to the temporary stack pointer.
7839 */
7840IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
7841{
7842 /* Increment the stack pointer. */
7843 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7844 RTUINT64U NewRsp = *pTmpRsp;
7845 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 4);
7846
7847 /* Write the word the lazy way. */
7848 uint32_t *pu32Dst;
7849 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7850 if (rc == VINF_SUCCESS)
7851 {
7852 *pu32Dst = u32Value;
7853 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);
7854 }
7855
7856 /* Commit the new RSP value unless we an access handler made trouble. */
7857 if (rc == VINF_SUCCESS)
7858 *pTmpRsp = NewRsp;
7859
7860 return rc;
7861}
7862
7863
7864/**
7865 * Pushes a dword onto the stack, using a temporary stack pointer.
7866 *
7867 * @returns Strict VBox status code.
7868 * @param pIemCpu The IEM per CPU data.
7869 * @param u64Value The value to push.
7870 * @param pTmpRsp Pointer to the temporary stack pointer.
7871 */
7872IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
7873{
7874 /* Increment the stack pointer. */
7875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7876 RTUINT64U NewRsp = *pTmpRsp;
7877 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pIemCpu, pCtx, &NewRsp, 8);
7878
7879 /* Write the word the lazy way. */
7880 uint64_t *pu64Dst;
7881 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7882 if (rc == VINF_SUCCESS)
7883 {
7884 *pu64Dst = u64Value;
7885 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);
7886 }
7887
7888 /* Commit the new RSP value unless we an access handler made trouble. */
7889 if (rc == VINF_SUCCESS)
7890 *pTmpRsp = NewRsp;
7891
7892 return rc;
7893}
7894
7895
7896/**
7897 * Pops a word from the stack, using a temporary stack pointer.
7898 *
7899 * @returns Strict VBox status code.
7900 * @param pIemCpu The IEM per CPU data.
7901 * @param pu16Value Where to store the popped value.
7902 * @param pTmpRsp Pointer to the temporary stack pointer.
7903 */
7904IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
7905{
7906 /* Increment the stack pointer. */
7907 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7908 RTUINT64U NewRsp = *pTmpRsp;
7909 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 2);
7910
7911 /* Write the word the lazy way. */
7912 uint16_t const *pu16Src;
7913 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7914 if (rc == VINF_SUCCESS)
7915 {
7916 *pu16Value = *pu16Src;
7917 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7918
7919 /* Commit the new RSP value. */
7920 if (rc == VINF_SUCCESS)
7921 *pTmpRsp = NewRsp;
7922 }
7923
7924 return rc;
7925}
7926
7927
7928/**
7929 * Pops a dword from the stack, using a temporary stack pointer.
7930 *
7931 * @returns Strict VBox status code.
7932 * @param pIemCpu The IEM per CPU data.
7933 * @param pu32Value Where to store the popped value.
7934 * @param pTmpRsp Pointer to the temporary stack pointer.
7935 */
7936IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
7937{
7938 /* Increment the stack pointer. */
7939 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7940 RTUINT64U NewRsp = *pTmpRsp;
7941 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 4);
7942
7943 /* Write the word the lazy way. */
7944 uint32_t const *pu32Src;
7945 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7946 if (rc == VINF_SUCCESS)
7947 {
7948 *pu32Value = *pu32Src;
7949 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7950
7951 /* Commit the new RSP value. */
7952 if (rc == VINF_SUCCESS)
7953 *pTmpRsp = NewRsp;
7954 }
7955
7956 return rc;
7957}
7958
7959
7960/**
7961 * Pops a qword from the stack, using a temporary stack pointer.
7962 *
7963 * @returns Strict VBox status code.
7964 * @param pIemCpu The IEM per CPU data.
7965 * @param pu64Value Where to store the popped value.
7966 * @param pTmpRsp Pointer to the temporary stack pointer.
7967 */
7968IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
7969{
7970 /* Increment the stack pointer. */
7971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7972 RTUINT64U NewRsp = *pTmpRsp;
7973 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
7974
7975 /* Write the word the lazy way. */
7976 uint64_t const *pu64Src;
7977 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7978 if (rcStrict == VINF_SUCCESS)
7979 {
7980 *pu64Value = *pu64Src;
7981 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7982
7983 /* Commit the new RSP value. */
7984 if (rcStrict == VINF_SUCCESS)
7985 *pTmpRsp = NewRsp;
7986 }
7987
7988 return rcStrict;
7989}
7990
7991
7992/**
7993 * Begin a special stack push (used by interrupt, exceptions and such).
7994 *
7995 * This will raise \#SS or \#PF if appropriate.
7996 *
7997 * @returns Strict VBox status code.
7998 * @param pIemCpu The IEM per CPU data.
7999 * @param cbMem The number of bytes to push onto the stack.
8000 * @param ppvMem Where to return the pointer to the stack memory.
8001 * As with the other memory functions this could be
8002 * direct access or bounce buffered access, so
8003 * don't commit register until the commit call
8004 * succeeds.
8005 * @param puNewRsp Where to return the new RSP value. This must be
8006 * passed unchanged to
8007 * iemMemStackPushCommitSpecial().
8008 */
8009IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
8010{
8011 Assert(cbMem < UINT8_MAX);
8012 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8013 RTGCPTR GCPtrTop = iemRegGetRspForPush(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8014 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8015}
8016
8017
8018/**
8019 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8020 *
8021 * This will update the rSP.
8022 *
8023 * @returns Strict VBox status code.
8024 * @param pIemCpu The IEM per CPU data.
8025 * @param pvMem The pointer returned by
8026 * iemMemStackPushBeginSpecial().
8027 * @param uNewRsp The new RSP value returned by
8028 * iemMemStackPushBeginSpecial().
8029 */
8030IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)
8031{
8032 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);
8033 if (rcStrict == VINF_SUCCESS)
8034 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8035 return rcStrict;
8036}
8037
8038
8039/**
8040 * Begin a special stack pop (used by iret, retf and such).
8041 *
8042 * This will raise \#SS or \#PF if appropriate.
8043 *
8044 * @returns Strict VBox status code.
8045 * @param pIemCpu The IEM per CPU data.
8046 * @param cbMem The number of bytes to push onto the stack.
8047 * @param ppvMem Where to return the pointer to the stack memory.
8048 * @param puNewRsp Where to return the new RSP value. This must be
8049 * passed unchanged to
8050 * iemMemStackPopCommitSpecial() or applied
8051 * manually if iemMemStackPopDoneSpecial() is used.
8052 */
8053IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8054{
8055 Assert(cbMem < UINT8_MAX);
8056 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8057 RTGCPTR GCPtrTop = iemRegGetRspForPop(pIemCpu, pCtx, (uint8_t)cbMem, puNewRsp);
8058 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8059}
8060
8061
8062/**
8063 * Continue a special stack pop (used by iret and retf).
8064 *
8065 * This will raise \#SS or \#PF if appropriate.
8066 *
8067 * @returns Strict VBox status code.
8068 * @param pIemCpu The IEM per CPU data.
8069 * @param cbMem The number of bytes to push onto the stack.
8070 * @param ppvMem Where to return the pointer to the stack memory.
8071 * @param puNewRsp Where to return the new RSP value. This must be
8072 * passed unchanged to
8073 * iemMemStackPopCommitSpecial() or applied
8074 * manually if iemMemStackPopDoneSpecial() is used.
8075 */
8076IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
8077{
8078 Assert(cbMem < UINT8_MAX);
8079 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8080 RTUINT64U NewRsp;
8081 NewRsp.u = *puNewRsp;
8082 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pIemCpu, pCtx, &NewRsp, 8);
8083 *puNewRsp = NewRsp.u;
8084 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8085}
8086
8087
8088/**
8089 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).
8090 *
8091 * This will update the rSP.
8092 *
8093 * @returns Strict VBox status code.
8094 * @param pIemCpu The IEM per CPU data.
8095 * @param pvMem The pointer returned by
8096 * iemMemStackPopBeginSpecial().
8097 * @param uNewRsp The new RSP value returned by
8098 * iemMemStackPopBeginSpecial().
8099 */
8100IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
8101{
8102 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8103 if (rcStrict == VINF_SUCCESS)
8104 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;
8105 return rcStrict;
8106}
8107
8108
8109/**
8110 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8111 * iemMemStackPopContinueSpecial).
8112 *
8113 * The caller will manually commit the rSP.
8114 *
8115 * @returns Strict VBox status code.
8116 * @param pIemCpu The IEM per CPU data.
8117 * @param pvMem The pointer returned by
8118 * iemMemStackPopBeginSpecial() or
8119 * iemMemStackPopContinueSpecial().
8120 */
8121IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem)
8122{
8123 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8124}
8125
8126
8127/**
8128 * Fetches a system table byte.
8129 *
8130 * @returns Strict VBox status code.
8131 * @param pIemCpu The IEM per CPU data.
8132 * @param pbDst Where to return the byte.
8133 * @param iSegReg The index of the segment register to use for
8134 * this access. The base and limits are checked.
8135 * @param GCPtrMem The address of the guest memory.
8136 */
8137IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PIEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8138{
8139 /* The lazy approach for now... */
8140 uint8_t const *pbSrc;
8141 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8142 if (rc == VINF_SUCCESS)
8143 {
8144 *pbDst = *pbSrc;
8145 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8146 }
8147 return rc;
8148}
8149
8150
8151/**
8152 * Fetches a system table word.
8153 *
8154 * @returns Strict VBox status code.
8155 * @param pIemCpu The IEM per CPU data.
8156 * @param pu16Dst Where to return the word.
8157 * @param iSegReg The index of the segment register to use for
8158 * this access. The base and limits are checked.
8159 * @param GCPtrMem The address of the guest memory.
8160 */
8161IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8162{
8163 /* The lazy approach for now... */
8164 uint16_t const *pu16Src;
8165 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8166 if (rc == VINF_SUCCESS)
8167 {
8168 *pu16Dst = *pu16Src;
8169 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8170 }
8171 return rc;
8172}
8173
8174
8175/**
8176 * Fetches a system table dword.
8177 *
8178 * @returns Strict VBox status code.
8179 * @param pIemCpu The IEM per CPU data.
8180 * @param pu32Dst Where to return the dword.
8181 * @param iSegReg The index of the segment register to use for
8182 * this access. The base and limits are checked.
8183 * @param GCPtrMem The address of the guest memory.
8184 */
8185IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8186{
8187 /* The lazy approach for now... */
8188 uint32_t const *pu32Src;
8189 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8190 if (rc == VINF_SUCCESS)
8191 {
8192 *pu32Dst = *pu32Src;
8193 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8194 }
8195 return rc;
8196}
8197
8198
8199/**
8200 * Fetches a system table qword.
8201 *
8202 * @returns Strict VBox status code.
8203 * @param pIemCpu The IEM per CPU data.
8204 * @param pu64Dst Where to return the qword.
8205 * @param iSegReg The index of the segment register to use for
8206 * this access. The base and limits are checked.
8207 * @param GCPtrMem The address of the guest memory.
8208 */
8209IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8210{
8211 /* The lazy approach for now... */
8212 uint64_t const *pu64Src;
8213 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8214 if (rc == VINF_SUCCESS)
8215 {
8216 *pu64Dst = *pu64Src;
8217 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8218 }
8219 return rc;
8220}
8221
8222
8223/**
8224 * Fetches a descriptor table entry with caller specified error code.
8225 *
8226 * @returns Strict VBox status code.
8227 * @param pIemCpu The IEM per CPU.
8228 * @param pDesc Where to return the descriptor table entry.
8229 * @param uSel The selector which table entry to fetch.
8230 * @param uXcpt The exception to raise on table lookup error.
8231 * @param uErrorCode The error code associated with the exception.
8232 */
8233IEM_STATIC VBOXSTRICTRC
8234iemMemFetchSelDescWithErr(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
8235{
8236 AssertPtr(pDesc);
8237 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8238
8239 /** @todo did the 286 require all 8 bytes to be accessible? */
8240 /*
8241 * Get the selector table base and check bounds.
8242 */
8243 RTGCPTR GCPtrBase;
8244 if (uSel & X86_SEL_LDT)
8245 {
8246 if ( !pCtx->ldtr.Attr.n.u1Present
8247 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
8248 {
8249 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8250 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
8251 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8252 uErrorCode, 0);
8253 }
8254
8255 Assert(pCtx->ldtr.Attr.n.u1Present);
8256 GCPtrBase = pCtx->ldtr.u64Base;
8257 }
8258 else
8259 {
8260 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
8261 {
8262 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
8263 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8264 uErrorCode, 0);
8265 }
8266 GCPtrBase = pCtx->gdtr.pGdt;
8267 }
8268
8269 /*
8270 * Read the legacy descriptor and maybe the long mode extensions if
8271 * required.
8272 */
8273 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8274 if (rcStrict == VINF_SUCCESS)
8275 {
8276 if ( !IEM_IS_LONG_MODE(pIemCpu)
8277 || pDesc->Legacy.Gen.u1DescType)
8278 pDesc->Long.au64[1] = 0;
8279 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
8280 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8281 else
8282 {
8283 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8284 /** @todo is this the right exception? */
8285 return iemRaiseXcptOrInt(pIemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8286 }
8287 }
8288 return rcStrict;
8289}
8290
8291
8292/**
8293 * Fetches a descriptor table entry.
8294 *
8295 * @returns Strict VBox status code.
8296 * @param pIemCpu The IEM per CPU.
8297 * @param pDesc Where to return the descriptor table entry.
8298 * @param uSel The selector which table entry to fetch.
8299 * @param uXcpt The exception to raise on table lookup error.
8300 */
8301IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
8302{
8303 return iemMemFetchSelDescWithErr(pIemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8304}
8305
8306
8307/**
8308 * Fakes a long mode stack selector for SS = 0.
8309 *
8310 * @param pDescSs Where to return the fake stack descriptor.
8311 * @param uDpl The DPL we want.
8312 */
8313IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
8314{
8315 pDescSs->Long.au64[0] = 0;
8316 pDescSs->Long.au64[1] = 0;
8317 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
8318 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
8319 pDescSs->Long.Gen.u2Dpl = uDpl;
8320 pDescSs->Long.Gen.u1Present = 1;
8321 pDescSs->Long.Gen.u1Long = 1;
8322}
8323
8324
8325/**
8326 * Marks the selector descriptor as accessed (only non-system descriptors).
8327 *
8328 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8329 * will therefore skip the limit checks.
8330 *
8331 * @returns Strict VBox status code.
8332 * @param pIemCpu The IEM per CPU.
8333 * @param uSel The selector.
8334 */
8335IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)
8336{
8337 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
8338
8339 /*
8340 * Get the selector table base and calculate the entry address.
8341 */
8342 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8343 ? pCtx->ldtr.u64Base
8344 : pCtx->gdtr.pGdt;
8345 GCPtr += uSel & X86_SEL_MASK;
8346
8347 /*
8348 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8349 * ugly stuff to avoid this. This will make sure it's an atomic access
8350 * as well more or less remove any question about 8-bit or 32-bit accesss.
8351 */
8352 VBOXSTRICTRC rcStrict;
8353 uint32_t volatile *pu32;
8354 if ((GCPtr & 3) == 0)
8355 {
8356 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8357 GCPtr += 2 + 2;
8358 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8359 if (rcStrict != VINF_SUCCESS)
8360 return rcStrict;
8361 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8362 }
8363 else
8364 {
8365 /* The misaligned GDT/LDT case, map the whole thing. */
8366 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8367 if (rcStrict != VINF_SUCCESS)
8368 return rcStrict;
8369 switch ((uintptr_t)pu32 & 3)
8370 {
8371 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8372 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8373 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8374 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8375 }
8376 }
8377
8378 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8379}
8380
8381/** @} */
8382
8383
8384/*
8385 * Include the C/C++ implementation of instruction.
8386 */
8387#include "IEMAllCImpl.cpp.h"
8388
8389
8390
8391/** @name "Microcode" macros.
8392 *
8393 * The idea is that we should be able to use the same code to interpret
8394 * instructions as well as recompiler instructions. Thus this obfuscation.
8395 *
8396 * @{
8397 */
8398#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
8399#define IEM_MC_END() }
8400#define IEM_MC_PAUSE() do {} while (0)
8401#define IEM_MC_CONTINUE() do {} while (0)
8402
8403/** Internal macro. */
8404#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
8405 do \
8406 { \
8407 VBOXSTRICTRC rcStrict2 = a_Expr; \
8408 if (rcStrict2 != VINF_SUCCESS) \
8409 return rcStrict2; \
8410 } while (0)
8411
8412#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu)
8413#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))
8414#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
8415#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
8416#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
8417#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
8418#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
8419
8420#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)
8421#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
8422 do { \
8423 if ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
8424 return iemRaiseDeviceNotAvailable(pIemCpu); \
8425 } while (0)
8426#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
8427 do { \
8428 if ((pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
8429 return iemRaiseMathFault(pIemCpu); \
8430 } while (0)
8431#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
8432 do { \
8433 if ( (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8434 || !(pIemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \
8435 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2) \
8436 return iemRaiseUndefinedOpcode(pIemCpu); \
8437 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8438 return iemRaiseDeviceNotAvailable(pIemCpu); \
8439 } while (0)
8440#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
8441 do { \
8442 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8443 || !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMmx) \
8444 return iemRaiseUndefinedOpcode(pIemCpu); \
8445 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8446 return iemRaiseDeviceNotAvailable(pIemCpu); \
8447 } while (0)
8448#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
8449 do { \
8450 if ( ((pIemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
8451 || ( !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse \
8452 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fAmdMmxExts) ) \
8453 return iemRaiseUndefinedOpcode(pIemCpu); \
8454 if (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \
8455 return iemRaiseDeviceNotAvailable(pIemCpu); \
8456 } while (0)
8457#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
8458 do { \
8459 if (pIemCpu->uCpl != 0) \
8460 return iemRaiseGeneralProtectionFault0(pIemCpu); \
8461 } while (0)
8462
8463
8464#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
8465#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
8466#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
8467#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
8468#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
8469#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
8470#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
8471 uint32_t a_Name; \
8472 uint32_t *a_pName = &a_Name
8473#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
8474 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
8475
8476#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
8477#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
8478
8479#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8480#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8481#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8482#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
8483#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8484#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8485#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
8486#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8487#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8488#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
8489#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8490#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
8491#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8492#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
8493#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
8494#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))
8495#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
8496#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8497#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8498#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
8499#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8500#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pIemCpu)->CTX_SUFF(pCtx)->cr0
8501#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->cr0
8502#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8503#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8504#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->ldtr.Sel
8505#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8506#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8507#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pIemCpu)->CTX_SUFF(pCtx)->tr.Sel
8508/** @note Not for IOPL or IF testing or modification. */
8509#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8510#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8511#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW
8512#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW
8513
8514#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)
8515#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
8516#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
8517#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
8518#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
8519#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
8520#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
8521#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
8522#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
8523#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
8524#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
8525 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
8526
8527#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))
8528#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
8529/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
8530 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
8531#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
8532#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
8533/** @note Not for IOPL or IF testing or modification. */
8534#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u
8535
8536#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
8537#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
8538#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
8539 do { \
8540 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8541 *pu32Reg += (a_u32Value); \
8542 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8543 } while (0)
8544#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
8545
8546#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
8547#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
8548#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
8549 do { \
8550 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8551 *pu32Reg -= (a_u32Value); \
8552 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8553 } while (0)
8554#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
8555
8556#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
8557#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
8558#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
8559#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
8560#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
8561#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
8562#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
8563
8564#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
8565#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
8566#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8567#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
8568
8569#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
8570#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
8571#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
8572
8573#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
8574#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8575
8576#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
8577#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
8578#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
8579
8580#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
8581#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
8582#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
8583
8584#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
8585
8586#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
8587
8588#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
8589#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
8590#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
8591 do { \
8592 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8593 *pu32Reg &= (a_u32Value); \
8594 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8595 } while (0)
8596#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
8597
8598#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
8599#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
8600#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
8601 do { \
8602 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \
8603 *pu32Reg |= (a_u32Value); \
8604 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
8605 } while (0)
8606#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
8607
8608
8609/** @note Not for IOPL or IF modification. */
8610#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
8611/** @note Not for IOPL or IF modification. */
8612#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
8613/** @note Not for IOPL or IF modification. */
8614#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
8615
8616#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
8617
8618
8619#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
8620 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
8621#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
8622 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
8623#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
8624 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
8625#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
8626 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
8627#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
8628 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8629#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
8630 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8631#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
8632 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
8633
8634#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
8635 do { (a_u128Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
8636#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
8637 do { (a_u64Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
8638#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
8639 do { (a_u32Value) = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
8640#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
8641 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
8642#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
8643 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
8644 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8645 } while (0)
8646#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
8647 do { pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
8648 pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
8649 } while (0)
8650#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
8651 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8652#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
8653 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
8654#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
8655 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
8656
8657#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
8658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
8659#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
8660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
8661#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
8662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
8663
8664#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
8666#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8667 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8668#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
8669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
8670
8671#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
8673#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8675#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
8676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
8677
8678#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8680
8681#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
8683#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
8684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
8685#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8687#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
8688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
8689
8690#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
8691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
8692#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
8693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
8694#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
8695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
8696
8697#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
8698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8699#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
8700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
8701
8702
8703
8704#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8705 do { \
8706 uint8_t u8Tmp; \
8707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8708 (a_u16Dst) = u8Tmp; \
8709 } while (0)
8710#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8711 do { \
8712 uint8_t u8Tmp; \
8713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8714 (a_u32Dst) = u8Tmp; \
8715 } while (0)
8716#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8717 do { \
8718 uint8_t u8Tmp; \
8719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8720 (a_u64Dst) = u8Tmp; \
8721 } while (0)
8722#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8723 do { \
8724 uint16_t u16Tmp; \
8725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8726 (a_u32Dst) = u16Tmp; \
8727 } while (0)
8728#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8729 do { \
8730 uint16_t u16Tmp; \
8731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8732 (a_u64Dst) = u16Tmp; \
8733 } while (0)
8734#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8735 do { \
8736 uint32_t u32Tmp; \
8737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8738 (a_u64Dst) = u32Tmp; \
8739 } while (0)
8740
8741#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
8742 do { \
8743 uint8_t u8Tmp; \
8744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8745 (a_u16Dst) = (int8_t)u8Tmp; \
8746 } while (0)
8747#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8748 do { \
8749 uint8_t u8Tmp; \
8750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8751 (a_u32Dst) = (int8_t)u8Tmp; \
8752 } while (0)
8753#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8754 do { \
8755 uint8_t u8Tmp; \
8756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
8757 (a_u64Dst) = (int8_t)u8Tmp; \
8758 } while (0)
8759#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
8760 do { \
8761 uint16_t u16Tmp; \
8762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8763 (a_u32Dst) = (int16_t)u16Tmp; \
8764 } while (0)
8765#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8766 do { \
8767 uint16_t u16Tmp; \
8768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
8769 (a_u64Dst) = (int16_t)u16Tmp; \
8770 } while (0)
8771#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
8772 do { \
8773 uint32_t u32Tmp; \
8774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
8775 (a_u64Dst) = (int32_t)u32Tmp; \
8776 } while (0)
8777
8778#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
8779 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
8780#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
8781 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
8782#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
8783 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
8784#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
8785 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
8786
8787#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
8788 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
8789#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
8790 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
8791#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
8792 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
8793#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
8794 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
8795
8796#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
8797#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
8798#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
8799#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
8800#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
8801#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
8802#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
8803 do { \
8804 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
8805 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
8806 } while (0)
8807
8808#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
8809 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8810#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
8811 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
8812
8813
8814#define IEM_MC_PUSH_U16(a_u16Value) \
8815 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))
8816#define IEM_MC_PUSH_U32(a_u32Value) \
8817 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))
8818#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
8819 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pIemCpu, (a_u32Value)))
8820#define IEM_MC_PUSH_U64(a_u64Value) \
8821 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))
8822
8823#define IEM_MC_POP_U16(a_pu16Value) \
8824 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))
8825#define IEM_MC_POP_U32(a_pu32Value) \
8826 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))
8827#define IEM_MC_POP_U64(a_pu64Value) \
8828 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))
8829
8830/** Maps guest memory for direct or bounce buffered access.
8831 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8832 * @remarks May return.
8833 */
8834#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
8835 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8836
8837/** Maps guest memory for direct or bounce buffered access.
8838 * The purpose is to pass it to an operand implementation, thus the a_iArg.
8839 * @remarks May return.
8840 */
8841#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
8842 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
8843
8844/** Commits the memory and unmaps the guest memory.
8845 * @remarks May return.
8846 */
8847#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
8848 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))
8849
8850/** Commits the memory and unmaps the guest memory unless the FPU status word
8851 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
8852 * that would cause FLD not to store.
8853 *
8854 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
8855 * store, while \#P will not.
8856 *
8857 * @remarks May in theory return - for now.
8858 */
8859#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
8860 do { \
8861 if ( !(a_u16FSW & X86_FSW_ES) \
8862 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
8863 & ~(pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
8864 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess))); \
8865 } while (0)
8866
8867/** Calculate efficient address from R/M. */
8868#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
8869 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff)))
8870
8871#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
8872#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
8873#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
8874#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
8875#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
8876#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
8877#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
8878
8879/**
8880 * Defers the rest of the instruction emulation to a C implementation routine
8881 * and returns, only taking the standard parameters.
8882 *
8883 * @param a_pfnCImpl The pointer to the C routine.
8884 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8885 */
8886#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8887
8888/**
8889 * Defers the rest of instruction emulation to a C implementation routine and
8890 * returns, taking one argument in addition to the standard ones.
8891 *
8892 * @param a_pfnCImpl The pointer to the C routine.
8893 * @param a0 The argument.
8894 */
8895#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8896
8897/**
8898 * Defers the rest of the instruction emulation to a C implementation routine
8899 * and returns, taking two arguments in addition to the standard ones.
8900 *
8901 * @param a_pfnCImpl The pointer to the C routine.
8902 * @param a0 The first extra argument.
8903 * @param a1 The second extra argument.
8904 */
8905#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8906
8907/**
8908 * Defers the rest of the instruction emulation to a C implementation routine
8909 * and returns, taking three arguments in addition to the standard ones.
8910 *
8911 * @param a_pfnCImpl The pointer to the C routine.
8912 * @param a0 The first extra argument.
8913 * @param a1 The second extra argument.
8914 * @param a2 The third extra argument.
8915 */
8916#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8917
8918/**
8919 * Defers the rest of the instruction emulation to a C implementation routine
8920 * and returns, taking four arguments in addition to the standard ones.
8921 *
8922 * @param a_pfnCImpl The pointer to the C routine.
8923 * @param a0 The first extra argument.
8924 * @param a1 The second extra argument.
8925 * @param a2 The third extra argument.
8926 * @param a3 The fourth extra argument.
8927 */
8928#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)
8929
8930/**
8931 * Defers the rest of the instruction emulation to a C implementation routine
8932 * and returns, taking two arguments in addition to the standard ones.
8933 *
8934 * @param a_pfnCImpl The pointer to the C routine.
8935 * @param a0 The first extra argument.
8936 * @param a1 The second extra argument.
8937 * @param a2 The third extra argument.
8938 * @param a3 The fourth extra argument.
8939 * @param a4 The fifth extra argument.
8940 */
8941#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
8942
8943/**
8944 * Defers the entire instruction emulation to a C implementation routine and
8945 * returns, only taking the standard parameters.
8946 *
8947 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8948 *
8949 * @param a_pfnCImpl The pointer to the C routine.
8950 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
8951 */
8952#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)
8953
8954/**
8955 * Defers the entire instruction emulation to a C implementation routine and
8956 * returns, taking one argument in addition to the standard ones.
8957 *
8958 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8959 *
8960 * @param a_pfnCImpl The pointer to the C routine.
8961 * @param a0 The argument.
8962 */
8963#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)
8964
8965/**
8966 * Defers the entire instruction emulation to a C implementation routine and
8967 * returns, taking two arguments in addition to the standard ones.
8968 *
8969 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8970 *
8971 * @param a_pfnCImpl The pointer to the C routine.
8972 * @param a0 The first extra argument.
8973 * @param a1 The second extra argument.
8974 */
8975#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
8976
8977/**
8978 * Defers the entire instruction emulation to a C implementation routine and
8979 * returns, taking three arguments in addition to the standard ones.
8980 *
8981 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
8982 *
8983 * @param a_pfnCImpl The pointer to the C routine.
8984 * @param a0 The first extra argument.
8985 * @param a1 The second extra argument.
8986 * @param a2 The third extra argument.
8987 */
8988#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
8989
8990/**
8991 * Calls a FPU assembly implementation taking one visible argument.
8992 *
8993 * @param a_pfnAImpl Pointer to the assembly FPU routine.
8994 * @param a0 The first extra argument.
8995 */
8996#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
8997 do { \
8998 iemFpuPrepareUsage(pIemCpu); \
8999 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \
9000 } while (0)
9001
9002/**
9003 * Calls a FPU assembly implementation taking two visible arguments.
9004 *
9005 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9006 * @param a0 The first extra argument.
9007 * @param a1 The second extra argument.
9008 */
9009#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
9010 do { \
9011 iemFpuPrepareUsage(pIemCpu); \
9012 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9013 } while (0)
9014
9015/**
9016 * Calls a FPU assembly implementation taking three visible arguments.
9017 *
9018 * @param a_pfnAImpl Pointer to the assembly FPU routine.
9019 * @param a0 The first extra argument.
9020 * @param a1 The second extra argument.
9021 * @param a2 The third extra argument.
9022 */
9023#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9024 do { \
9025 iemFpuPrepareUsage(pIemCpu); \
9026 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9027 } while (0)
9028
9029#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
9030 do { \
9031 (a_FpuData).FSW = (a_FSW); \
9032 (a_FpuData).r80Result = *(a_pr80Value); \
9033 } while (0)
9034
9035/** Pushes FPU result onto the stack. */
9036#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
9037 iemFpuPushResult(pIemCpu, &a_FpuData)
9038/** Pushes FPU result onto the stack and sets the FPUDP. */
9039#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
9040 iemFpuPushResultWithMemOp(pIemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
9041
9042/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
9043#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
9044 iemFpuPushResultTwo(pIemCpu, &a_FpuDataTwo)
9045
9046/** Stores FPU result in a stack register. */
9047#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
9048 iemFpuStoreResult(pIemCpu, &a_FpuData, a_iStReg)
9049/** Stores FPU result in a stack register and pops the stack. */
9050#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
9051 iemFpuStoreResultThenPop(pIemCpu, &a_FpuData, a_iStReg)
9052/** Stores FPU result in a stack register and sets the FPUDP. */
9053#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9054 iemFpuStoreResultWithMemOp(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9055/** Stores FPU result in a stack register, sets the FPUDP, and pops the
9056 * stack. */
9057#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
9058 iemFpuStoreResultWithMemOpThenPop(pIemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
9059
9060/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
9061#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
9062 iemFpuUpdateOpcodeAndIp(pIemCpu)
9063/** Free a stack register (for FFREE and FFREEP). */
9064#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
9065 iemFpuStackFree(pIemCpu, a_iStReg)
9066/** Increment the FPU stack pointer. */
9067#define IEM_MC_FPU_STACK_INC_TOP() \
9068 iemFpuStackIncTop(pIemCpu)
9069/** Decrement the FPU stack pointer. */
9070#define IEM_MC_FPU_STACK_DEC_TOP() \
9071 iemFpuStackDecTop(pIemCpu)
9072
9073/** Updates the FSW, FOP, FPUIP, and FPUCS. */
9074#define IEM_MC_UPDATE_FSW(a_u16FSW) \
9075 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9076/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
9077#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
9078 iemFpuUpdateFSW(pIemCpu, a_u16FSW)
9079/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
9080#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9081 iemFpuUpdateFSWWithMemOp(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9082/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
9083#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
9084 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9085/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
9086 * stack. */
9087#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
9088 iemFpuUpdateFSWWithMemOpThenPop(pIemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
9089/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
9090#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
9091 iemFpuUpdateFSWThenPop(pIemCpu, a_u16FSW)
9092
9093/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
9094#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
9095 iemFpuStackUnderflow(pIemCpu, a_iStDst)
9096/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9097 * stack. */
9098#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
9099 iemFpuStackUnderflowThenPop(pIemCpu, a_iStDst)
9100/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9101 * FPUDS. */
9102#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9103 iemFpuStackUnderflowWithMemOp(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9104/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
9105 * FPUDS. Pops stack. */
9106#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
9107 iemFpuStackUnderflowWithMemOpThenPop(pIemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
9108/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
9109 * stack twice. */
9110#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
9111 iemFpuStackUnderflowThenPopPop(pIemCpu)
9112/** Raises a FPU stack underflow exception for an instruction pushing a result
9113 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
9114#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
9115 iemFpuStackPushUnderflow(pIemCpu)
9116/** Raises a FPU stack underflow exception for an instruction pushing a result
9117 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
9118#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
9119 iemFpuStackPushUnderflowTwo(pIemCpu)
9120
9121/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9122 * FPUIP, FPUCS and FOP. */
9123#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
9124 iemFpuStackPushOverflow(pIemCpu)
9125/** Raises a FPU stack overflow exception as part of a push attempt. Sets
9126 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
9127#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
9128 iemFpuStackPushOverflowWithMemOp(pIemCpu, a_iEffSeg, a_GCPtrEff)
9129/** Indicates that we (might) have modified the FPU state. */
9130#define IEM_MC_USED_FPU() \
9131 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM)
9132
9133/**
9134 * Calls a MMX assembly implementation taking two visible arguments.
9135 *
9136 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9137 * @param a0 The first extra argument.
9138 * @param a1 The second extra argument.
9139 */
9140#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
9141 do { \
9142 iemFpuPrepareUsage(pIemCpu); \
9143 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9144 } while (0)
9145
9146/**
9147 * Calls a MMX assembly implementation taking three visible arguments.
9148 *
9149 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9150 * @param a0 The first extra argument.
9151 * @param a1 The second extra argument.
9152 * @param a2 The third extra argument.
9153 */
9154#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9155 do { \
9156 iemFpuPrepareUsage(pIemCpu); \
9157 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9158 } while (0)
9159
9160
9161/**
9162 * Calls a SSE assembly implementation taking two visible arguments.
9163 *
9164 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9165 * @param a0 The first extra argument.
9166 * @param a1 The second extra argument.
9167 */
9168#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
9169 do { \
9170 iemFpuPrepareUsageSse(pIemCpu); \
9171 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
9172 } while (0)
9173
9174/**
9175 * Calls a SSE assembly implementation taking three visible arguments.
9176 *
9177 * @param a_pfnAImpl Pointer to the assembly MMX routine.
9178 * @param a0 The first extra argument.
9179 * @param a1 The second extra argument.
9180 * @param a2 The third extra argument.
9181 */
9182#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
9183 do { \
9184 iemFpuPrepareUsageSse(pIemCpu); \
9185 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
9186 } while (0)
9187
9188
9189/** @note Not for IOPL or IF testing. */
9190#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {
9191/** @note Not for IOPL or IF testing. */
9192#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {
9193/** @note Not for IOPL or IF testing. */
9194#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {
9195/** @note Not for IOPL or IF testing. */
9196#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {
9197/** @note Not for IOPL or IF testing. */
9198#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
9199 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9200 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9201/** @note Not for IOPL or IF testing. */
9202#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
9203 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9204 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9205/** @note Not for IOPL or IF testing. */
9206#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
9207 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9208 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9209 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9210/** @note Not for IOPL or IF testing. */
9211#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
9212 if ( !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \
9213 && !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \
9214 == !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {
9215#define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {
9216#define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {
9217#define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {
9218/** @note Not for IOPL or IF testing. */
9219#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9220 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9221 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9222/** @note Not for IOPL or IF testing. */
9223#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9224 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9225 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9226/** @note Not for IOPL or IF testing. */
9227#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
9228 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9229 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9230/** @note Not for IOPL or IF testing. */
9231#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9232 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \
9233 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9234/** @note Not for IOPL or IF testing. */
9235#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9236 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \
9237 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9238/** @note Not for IOPL or IF testing. */
9239#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
9240 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \
9241 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {
9242#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
9243#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
9244#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
9245 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) == VINF_SUCCESS) {
9246#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
9247 if (iemFpuStRegNotEmpty(pIemCpu, (a_iSt)) != VINF_SUCCESS) {
9248#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
9249 if (iemFpuStRegNotEmptyRef(pIemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
9250#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
9251 if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
9252#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
9253 if (iemFpu2StRegsNotEmptyRefFirst(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
9254#define IEM_MC_IF_FCW_IM() \
9255 if (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
9256
9257#define IEM_MC_ELSE() } else {
9258#define IEM_MC_ENDIF() } do {} while (0)
9259
9260/** @} */
9261
9262
9263/** @name Opcode Debug Helpers.
9264 * @{
9265 */
9266#ifdef DEBUG
9267# define IEMOP_MNEMONIC(a_szMnemonic) \
9268 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9269 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))
9270# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
9271 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
9272 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
9273#else
9274# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
9275# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
9276#endif
9277
9278/** @} */
9279
9280
9281/** @name Opcode Helpers.
9282 * @{
9283 */
9284
9285/** The instruction raises an \#UD in real and V8086 mode. */
9286#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
9287 do \
9288 { \
9289 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu)) \
9290 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9291 } while (0)
9292
9293/** The instruction allows no lock prefixing (in this encoding), throw \#UD if
9294 * lock prefixed.
9295 * @deprecated IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX */
9296#define IEMOP_HLP_NO_LOCK_PREFIX() \
9297 do \
9298 { \
9299 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \
9300 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9301 } while (0)
9302
9303/** The instruction is not available in 64-bit mode, throw \#UD if we're in
9304 * 64-bit mode. */
9305#define IEMOP_HLP_NO_64BIT() \
9306 do \
9307 { \
9308 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9309 return IEMOP_RAISE_INVALID_OPCODE(); \
9310 } while (0)
9311
9312/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
9313 * 64-bit mode. */
9314#define IEMOP_HLP_ONLY_64BIT() \
9315 do \
9316 { \
9317 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) \
9318 return IEMOP_RAISE_INVALID_OPCODE(); \
9319 } while (0)
9320
9321/** The instruction defaults to 64-bit operand size if 64-bit mode. */
9322#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
9323 do \
9324 { \
9325 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9326 iemRecalEffOpSize64Default(pIemCpu); \
9327 } while (0)
9328
9329/** The instruction has 64-bit operand size if 64-bit mode. */
9330#define IEMOP_HLP_64BIT_OP_SIZE() \
9331 do \
9332 { \
9333 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \
9334 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \
9335 } while (0)
9336
9337/** Only a REX prefix immediately preceeding the first opcode byte takes
9338 * effect. This macro helps ensuring this as well as logging bad guest code. */
9339#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
9340 do \
9341 { \
9342 if (RT_UNLIKELY(pIemCpu->fPrefixes & IEM_OP_PRF_REX)) \
9343 { \
9344 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
9345 pIemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \
9346 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
9347 pIemCpu->uRexB = 0; \
9348 pIemCpu->uRexIndex = 0; \
9349 pIemCpu->uRexReg = 0; \
9350 iemRecalEffOpSize(pIemCpu); \
9351 } \
9352 } while (0)
9353
9354/**
9355 * Done decoding.
9356 */
9357#define IEMOP_HLP_DONE_DECODING() \
9358 do \
9359 { \
9360 /*nothing for now, maybe later... */ \
9361 } while (0)
9362
9363/**
9364 * Done decoding, raise \#UD exception if lock prefix present.
9365 */
9366#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
9367 do \
9368 { \
9369 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9370 { /* likely */ } \
9371 else \
9372 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9373 } while (0)
9374#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
9375 do \
9376 { \
9377 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9378 { /* likely */ } \
9379 else \
9380 { \
9381 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
9382 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9383 } \
9384 } while (0)
9385#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
9386 do \
9387 { \
9388 if (RT_LIKELY(!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \
9389 { /* likely */ } \
9390 else \
9391 { \
9392 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
9393 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
9394 } \
9395 } while (0)
9396/**
9397 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
9398 * are present.
9399 */
9400#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
9401 do \
9402 { \
9403 if (RT_LIKELY(!(pIemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
9404 { /* likely */ } \
9405 else \
9406 return IEMOP_RAISE_INVALID_OPCODE(); \
9407 } while (0)
9408
9409
9410/**
9411 * Calculates the effective address of a ModR/M memory operand.
9412 *
9413 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9414 *
9415 * @return Strict VBox status code.
9416 * @param pIemCpu The IEM per CPU data.
9417 * @param bRm The ModRM byte.
9418 * @param cbImm The size of any immediate following the
9419 * effective address opcode bytes. Important for
9420 * RIP relative addressing.
9421 * @param pGCPtrEff Where to return the effective address.
9422 */
9423IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
9424{
9425 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9426 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9427#define SET_SS_DEF() \
9428 do \
9429 { \
9430 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9431 pIemCpu->iEffSeg = X86_SREG_SS; \
9432 } while (0)
9433
9434 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
9435 {
9436/** @todo Check the effective address size crap! */
9437 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT)
9438 {
9439 uint16_t u16EffAddr;
9440
9441 /* Handle the disp16 form with no registers first. */
9442 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9443 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9444 else
9445 {
9446 /* Get the displacment. */
9447 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9448 {
9449 case 0: u16EffAddr = 0; break;
9450 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9451 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9452 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9453 }
9454
9455 /* Add the base and index registers to the disp. */
9456 switch (bRm & X86_MODRM_RM_MASK)
9457 {
9458 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
9459 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
9460 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
9461 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
9462 case 4: u16EffAddr += pCtx->si; break;
9463 case 5: u16EffAddr += pCtx->di; break;
9464 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
9465 case 7: u16EffAddr += pCtx->bx; break;
9466 }
9467 }
9468
9469 *pGCPtrEff = u16EffAddr;
9470 }
9471 else
9472 {
9473 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9474 uint32_t u32EffAddr;
9475
9476 /* Handle the disp32 form with no registers first. */
9477 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9478 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9479 else
9480 {
9481 /* Get the register (or SIB) value. */
9482 switch ((bRm & X86_MODRM_RM_MASK))
9483 {
9484 case 0: u32EffAddr = pCtx->eax; break;
9485 case 1: u32EffAddr = pCtx->ecx; break;
9486 case 2: u32EffAddr = pCtx->edx; break;
9487 case 3: u32EffAddr = pCtx->ebx; break;
9488 case 4: /* SIB */
9489 {
9490 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9491
9492 /* Get the index and scale it. */
9493 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9494 {
9495 case 0: u32EffAddr = pCtx->eax; break;
9496 case 1: u32EffAddr = pCtx->ecx; break;
9497 case 2: u32EffAddr = pCtx->edx; break;
9498 case 3: u32EffAddr = pCtx->ebx; break;
9499 case 4: u32EffAddr = 0; /*none */ break;
9500 case 5: u32EffAddr = pCtx->ebp; break;
9501 case 6: u32EffAddr = pCtx->esi; break;
9502 case 7: u32EffAddr = pCtx->edi; break;
9503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9504 }
9505 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9506
9507 /* add base */
9508 switch (bSib & X86_SIB_BASE_MASK)
9509 {
9510 case 0: u32EffAddr += pCtx->eax; break;
9511 case 1: u32EffAddr += pCtx->ecx; break;
9512 case 2: u32EffAddr += pCtx->edx; break;
9513 case 3: u32EffAddr += pCtx->ebx; break;
9514 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
9515 case 5:
9516 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9517 {
9518 u32EffAddr += pCtx->ebp;
9519 SET_SS_DEF();
9520 }
9521 else
9522 {
9523 uint32_t u32Disp;
9524 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9525 u32EffAddr += u32Disp;
9526 }
9527 break;
9528 case 6: u32EffAddr += pCtx->esi; break;
9529 case 7: u32EffAddr += pCtx->edi; break;
9530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9531 }
9532 break;
9533 }
9534 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
9535 case 6: u32EffAddr = pCtx->esi; break;
9536 case 7: u32EffAddr = pCtx->edi; break;
9537 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9538 }
9539
9540 /* Get and add the displacement. */
9541 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9542 {
9543 case 0:
9544 break;
9545 case 1:
9546 {
9547 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9548 u32EffAddr += i8Disp;
9549 break;
9550 }
9551 case 2:
9552 {
9553 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9554 u32EffAddr += u32Disp;
9555 break;
9556 }
9557 default:
9558 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9559 }
9560
9561 }
9562 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)
9563 *pGCPtrEff = u32EffAddr;
9564 else
9565 {
9566 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);
9567 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9568 }
9569 }
9570 }
9571 else
9572 {
9573 uint64_t u64EffAddr;
9574
9575 /* Handle the rip+disp32 form with no registers first. */
9576 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9577 {
9578 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9579 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm;
9580 }
9581 else
9582 {
9583 /* Get the register (or SIB) value. */
9584 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)
9585 {
9586 case 0: u64EffAddr = pCtx->rax; break;
9587 case 1: u64EffAddr = pCtx->rcx; break;
9588 case 2: u64EffAddr = pCtx->rdx; break;
9589 case 3: u64EffAddr = pCtx->rbx; break;
9590 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
9591 case 6: u64EffAddr = pCtx->rsi; break;
9592 case 7: u64EffAddr = pCtx->rdi; break;
9593 case 8: u64EffAddr = pCtx->r8; break;
9594 case 9: u64EffAddr = pCtx->r9; break;
9595 case 10: u64EffAddr = pCtx->r10; break;
9596 case 11: u64EffAddr = pCtx->r11; break;
9597 case 13: u64EffAddr = pCtx->r13; break;
9598 case 14: u64EffAddr = pCtx->r14; break;
9599 case 15: u64EffAddr = pCtx->r15; break;
9600 /* SIB */
9601 case 4:
9602 case 12:
9603 {
9604 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9605
9606 /* Get the index and scale it. */
9607 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)
9608 {
9609 case 0: u64EffAddr = pCtx->rax; break;
9610 case 1: u64EffAddr = pCtx->rcx; break;
9611 case 2: u64EffAddr = pCtx->rdx; break;
9612 case 3: u64EffAddr = pCtx->rbx; break;
9613 case 4: u64EffAddr = 0; /*none */ break;
9614 case 5: u64EffAddr = pCtx->rbp; break;
9615 case 6: u64EffAddr = pCtx->rsi; break;
9616 case 7: u64EffAddr = pCtx->rdi; break;
9617 case 8: u64EffAddr = pCtx->r8; break;
9618 case 9: u64EffAddr = pCtx->r9; break;
9619 case 10: u64EffAddr = pCtx->r10; break;
9620 case 11: u64EffAddr = pCtx->r11; break;
9621 case 12: u64EffAddr = pCtx->r12; break;
9622 case 13: u64EffAddr = pCtx->r13; break;
9623 case 14: u64EffAddr = pCtx->r14; break;
9624 case 15: u64EffAddr = pCtx->r15; break;
9625 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9626 }
9627 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9628
9629 /* add base */
9630 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)
9631 {
9632 case 0: u64EffAddr += pCtx->rax; break;
9633 case 1: u64EffAddr += pCtx->rcx; break;
9634 case 2: u64EffAddr += pCtx->rdx; break;
9635 case 3: u64EffAddr += pCtx->rbx; break;
9636 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
9637 case 6: u64EffAddr += pCtx->rsi; break;
9638 case 7: u64EffAddr += pCtx->rdi; break;
9639 case 8: u64EffAddr += pCtx->r8; break;
9640 case 9: u64EffAddr += pCtx->r9; break;
9641 case 10: u64EffAddr += pCtx->r10; break;
9642 case 11: u64EffAddr += pCtx->r11; break;
9643 case 12: u64EffAddr += pCtx->r12; break;
9644 case 14: u64EffAddr += pCtx->r14; break;
9645 case 15: u64EffAddr += pCtx->r15; break;
9646 /* complicated encodings */
9647 case 5:
9648 case 13:
9649 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9650 {
9651 if (!pIemCpu->uRexB)
9652 {
9653 u64EffAddr += pCtx->rbp;
9654 SET_SS_DEF();
9655 }
9656 else
9657 u64EffAddr += pCtx->r13;
9658 }
9659 else
9660 {
9661 uint32_t u32Disp;
9662 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9663 u64EffAddr += (int32_t)u32Disp;
9664 }
9665 break;
9666 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9667 }
9668 break;
9669 }
9670 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9671 }
9672
9673 /* Get and add the displacement. */
9674 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9675 {
9676 case 0:
9677 break;
9678 case 1:
9679 {
9680 int8_t i8Disp;
9681 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9682 u64EffAddr += i8Disp;
9683 break;
9684 }
9685 case 2:
9686 {
9687 uint32_t u32Disp;
9688 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9689 u64EffAddr += (int32_t)u32Disp;
9690 break;
9691 }
9692 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9693 }
9694
9695 }
9696
9697 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)
9698 *pGCPtrEff = u64EffAddr;
9699 else
9700 {
9701 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT);
9702 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9703 }
9704 }
9705
9706 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9707 return VINF_SUCCESS;
9708}
9709
9710/** @} */
9711
9712
9713
9714/*
9715 * Include the instructions
9716 */
9717#include "IEMAllInstructions.cpp.h"
9718
9719
9720
9721
9722#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
9723
9724/**
9725 * Sets up execution verification mode.
9726 */
9727IEM_STATIC void iemExecVerificationModeSetup(PIEMCPU pIemCpu)
9728{
9729 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
9730 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);
9731
9732 /*
9733 * Always note down the address of the current instruction.
9734 */
9735 pIemCpu->uOldCs = pOrgCtx->cs.Sel;
9736 pIemCpu->uOldRip = pOrgCtx->rip;
9737
9738 /*
9739 * Enable verification and/or logging.
9740 */
9741 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
9742 if ( fNewNoRem
9743 && ( 0
9744#if 0 /* auto enable on first paged protected mode interrupt */
9745 || ( pOrgCtx->eflags.Bits.u1IF
9746 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
9747 && TRPMHasTrap(pVCpu)
9748 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
9749#endif
9750#if 0
9751 || ( pOrgCtx->cs == 0x10
9752 && ( pOrgCtx->rip == 0x90119e3e
9753 || pOrgCtx->rip == 0x901d9810)
9754#endif
9755#if 0 /* Auto enable DSL - FPU stuff. */
9756 || ( pOrgCtx->cs == 0x10
9757 && (// pOrgCtx->rip == 0xc02ec07f
9758 //|| pOrgCtx->rip == 0xc02ec082
9759 //|| pOrgCtx->rip == 0xc02ec0c9
9760 0
9761 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
9762#endif
9763#if 0 /* Auto enable DSL - fstp st0 stuff. */
9764 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
9765#endif
9766#if 0
9767 || pOrgCtx->rip == 0x9022bb3a
9768#endif
9769#if 0
9770 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
9771#endif
9772#if 0
9773 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
9774 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
9775#endif
9776#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
9777 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
9778 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
9779 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
9780#endif
9781#if 0 /* NT4SP1 - xadd early boot. */
9782 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
9783#endif
9784#if 0 /* NT4SP1 - wrmsr (intel MSR). */
9785 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
9786#endif
9787#if 0 /* NT4SP1 - cmpxchg (AMD). */
9788 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
9789#endif
9790#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
9791 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
9792#endif
9793#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
9794 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
9795
9796#endif
9797#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
9798 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
9799
9800#endif
9801#if 0 /* NT4SP1 - frstor [ecx] */
9802 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
9803#endif
9804#if 0 /* xxxxxx - All long mode code. */
9805 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
9806#endif
9807#if 0 /* rep movsq linux 3.7 64-bit boot. */
9808 || (pOrgCtx->rip == 0x0000000000100241)
9809#endif
9810#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
9811 || (pOrgCtx->rip == 0x000000000215e240)
9812#endif
9813#if 0 /* DOS's size-overridden iret to v8086. */
9814 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
9815#endif
9816 )
9817 )
9818 {
9819 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
9820 RTLogFlags(NULL, "enabled");
9821 fNewNoRem = false;
9822 }
9823 if (fNewNoRem != pIemCpu->fNoRem)
9824 {
9825 pIemCpu->fNoRem = fNewNoRem;
9826 if (!fNewNoRem)
9827 {
9828 LogAlways(("Enabling verification mode!\n"));
9829 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
9830 }
9831 else
9832 LogAlways(("Disabling verification mode!\n"));
9833 }
9834
9835 /*
9836 * Switch state.
9837 */
9838 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9839 {
9840 static CPUMCTX s_DebugCtx; /* Ugly! */
9841
9842 s_DebugCtx = *pOrgCtx;
9843 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;
9844 }
9845
9846 /*
9847 * See if there is an interrupt pending in TRPM and inject it if we can.
9848 */
9849 pIemCpu->uInjectCpl = UINT8_MAX;
9850 if ( pOrgCtx->eflags.Bits.u1IF
9851 && TRPMHasTrap(pVCpu)
9852 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
9853 {
9854 uint8_t u8TrapNo;
9855 TRPMEVENT enmType;
9856 RTGCUINT uErrCode;
9857 RTGCPTR uCr2;
9858 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
9859 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9860 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9861 TRPMResetTrap(pVCpu);
9862 pIemCpu->uInjectCpl = pIemCpu->uCpl;
9863 }
9864
9865 /*
9866 * Reset the counters.
9867 */
9868 pIemCpu->cIOReads = 0;
9869 pIemCpu->cIOWrites = 0;
9870 pIemCpu->fIgnoreRaxRdx = false;
9871 pIemCpu->fOverlappingMovs = false;
9872 pIemCpu->fProblematicMemory = false;
9873 pIemCpu->fUndefinedEFlags = 0;
9874
9875 if (IEM_VERIFICATION_ENABLED(pIemCpu))
9876 {
9877 /*
9878 * Free all verification records.
9879 */
9880 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;
9881 pIemCpu->pIemEvtRecHead = NULL;
9882 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;
9883 do
9884 {
9885 while (pEvtRec)
9886 {
9887 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
9888 pEvtRec->pNext = pIemCpu->pFreeEvtRec;
9889 pIemCpu->pFreeEvtRec = pEvtRec;
9890 pEvtRec = pNext;
9891 }
9892 pEvtRec = pIemCpu->pOtherEvtRecHead;
9893 pIemCpu->pOtherEvtRecHead = NULL;
9894 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;
9895 } while (pEvtRec);
9896 }
9897}
9898
9899
9900/**
9901 * Allocate an event record.
9902 * @returns Pointer to a record.
9903 */
9904IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)
9905{
9906 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
9907 return NULL;
9908
9909 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;
9910 if (pEvtRec)
9911 pIemCpu->pFreeEvtRec = pEvtRec->pNext;
9912 else
9913 {
9914 if (!pIemCpu->ppIemEvtRecNext)
9915 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
9916
9917 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
9918 if (!pEvtRec)
9919 return NULL;
9920 }
9921 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
9922 pEvtRec->pNext = NULL;
9923 return pEvtRec;
9924}
9925
9926
9927/**
9928 * IOMMMIORead notification.
9929 */
9930VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
9931{
9932 PVMCPU pVCpu = VMMGetCpu(pVM);
9933 if (!pVCpu)
9934 return;
9935 PIEMCPU pIemCpu = &pVCpu->iem.s;
9936 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9937 if (!pEvtRec)
9938 return;
9939 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
9940 pEvtRec->u.RamRead.GCPhys = GCPhys;
9941 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
9942 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9943 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9944}
9945
9946
9947/**
9948 * IOMMMIOWrite notification.
9949 */
9950VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
9951{
9952 PVMCPU pVCpu = VMMGetCpu(pVM);
9953 if (!pVCpu)
9954 return;
9955 PIEMCPU pIemCpu = &pVCpu->iem.s;
9956 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9957 if (!pEvtRec)
9958 return;
9959 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
9960 pEvtRec->u.RamWrite.GCPhys = GCPhys;
9961 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
9962 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
9963 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
9964 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
9965 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
9966 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9967 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9968}
9969
9970
9971/**
9972 * IOMIOPortRead notification.
9973 */
9974VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
9975{
9976 PVMCPU pVCpu = VMMGetCpu(pVM);
9977 if (!pVCpu)
9978 return;
9979 PIEMCPU pIemCpu = &pVCpu->iem.s;
9980 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
9981 if (!pEvtRec)
9982 return;
9983 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
9984 pEvtRec->u.IOPortRead.Port = Port;
9985 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
9986 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
9987 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
9988}
9989
9990/**
9991 * IOMIOPortWrite notification.
9992 */
9993VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
9994{
9995 PVMCPU pVCpu = VMMGetCpu(pVM);
9996 if (!pVCpu)
9997 return;
9998 PIEMCPU pIemCpu = &pVCpu->iem.s;
9999 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10000 if (!pEvtRec)
10001 return;
10002 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10003 pEvtRec->u.IOPortWrite.Port = Port;
10004 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10005 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10006 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;
10007 *pIemCpu->ppOtherEvtRecNext = pEvtRec;
10008}
10009
10010
10011VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
10012{
10013 AssertFailed();
10014}
10015
10016
10017VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
10018{
10019 AssertFailed();
10020}
10021
10022
10023/**
10024 * Fakes and records an I/O port read.
10025 *
10026 * @returns VINF_SUCCESS.
10027 * @param pIemCpu The IEM per CPU data.
10028 * @param Port The I/O port.
10029 * @param pu32Value Where to store the fake value.
10030 * @param cbValue The size of the access.
10031 */
10032IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10033{
10034 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10035 if (pEvtRec)
10036 {
10037 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
10038 pEvtRec->u.IOPortRead.Port = Port;
10039 pEvtRec->u.IOPortRead.cbValue = (uint32_t)cbValue;
10040 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10041 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10042 }
10043 pIemCpu->cIOReads++;
10044 *pu32Value = 0xcccccccc;
10045 return VINF_SUCCESS;
10046}
10047
10048
10049/**
10050 * Fakes and records an I/O port write.
10051 *
10052 * @returns VINF_SUCCESS.
10053 * @param pIemCpu The IEM per CPU data.
10054 * @param Port The I/O port.
10055 * @param u32Value The value being written.
10056 * @param cbValue The size of the access.
10057 */
10058IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10059{
10060 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);
10061 if (pEvtRec)
10062 {
10063 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
10064 pEvtRec->u.IOPortWrite.Port = Port;
10065 pEvtRec->u.IOPortWrite.cbValue = (uint32_t)cbValue;
10066 pEvtRec->u.IOPortWrite.u32Value = u32Value;
10067 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;
10068 *pIemCpu->ppIemEvtRecNext = pEvtRec;
10069 }
10070 pIemCpu->cIOWrites++;
10071 return VINF_SUCCESS;
10072}
10073
10074
10075/**
10076 * Used to add extra details about a stub case.
10077 * @param pIemCpu The IEM per CPU state.
10078 */
10079IEM_STATIC void iemVerifyAssertMsg2(PIEMCPU pIemCpu)
10080{
10081 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10082 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10083 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10084 char szRegs[4096];
10085 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
10086 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
10087 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
10088 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
10089 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
10090 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
10091 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
10092 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
10093 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
10094 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
10095 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
10096 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
10097 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
10098 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
10099 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
10100 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
10101 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
10102 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
10103 " efer=%016VR{efer}\n"
10104 " pat=%016VR{pat}\n"
10105 " sf_mask=%016VR{sf_mask}\n"
10106 "krnl_gs_base=%016VR{krnl_gs_base}\n"
10107 " lstar=%016VR{lstar}\n"
10108 " star=%016VR{star} cstar=%016VR{cstar}\n"
10109 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
10110 );
10111
10112 char szInstr1[256];
10113 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pIemCpu->uOldCs, pIemCpu->uOldRip,
10114 DBGF_DISAS_FLAGS_DEFAULT_MODE,
10115 szInstr1, sizeof(szInstr1), NULL);
10116 char szInstr2[256];
10117 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
10118 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10119 szInstr2, sizeof(szInstr2), NULL);
10120
10121 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
10122}
10123
10124
10125/**
10126 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
10127 * dump to the assertion info.
10128 *
10129 * @param pEvtRec The record to dump.
10130 */
10131IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
10132{
10133 switch (pEvtRec->enmEvent)
10134 {
10135 case IEMVERIFYEVENT_IOPORT_READ:
10136 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
10137 pEvtRec->u.IOPortWrite.Port,
10138 pEvtRec->u.IOPortWrite.cbValue);
10139 break;
10140 case IEMVERIFYEVENT_IOPORT_WRITE:
10141 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
10142 pEvtRec->u.IOPortWrite.Port,
10143 pEvtRec->u.IOPortWrite.cbValue,
10144 pEvtRec->u.IOPortWrite.u32Value);
10145 break;
10146 case IEMVERIFYEVENT_RAM_READ:
10147 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
10148 pEvtRec->u.RamRead.GCPhys,
10149 pEvtRec->u.RamRead.cb);
10150 break;
10151 case IEMVERIFYEVENT_RAM_WRITE:
10152 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
10153 pEvtRec->u.RamWrite.GCPhys,
10154 pEvtRec->u.RamWrite.cb,
10155 (int)pEvtRec->u.RamWrite.cb,
10156 pEvtRec->u.RamWrite.ab);
10157 break;
10158 default:
10159 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
10160 break;
10161 }
10162}
10163
10164
10165/**
10166 * Raises an assertion on the specified record, showing the given message with
10167 * a record dump attached.
10168 *
10169 * @param pIemCpu The IEM per CPU data.
10170 * @param pEvtRec1 The first record.
10171 * @param pEvtRec2 The second record.
10172 * @param pszMsg The message explaining why we're asserting.
10173 */
10174IEM_STATIC void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
10175{
10176 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10177 iemVerifyAssertAddRecordDump(pEvtRec1);
10178 iemVerifyAssertAddRecordDump(pEvtRec2);
10179 iemVerifyAssertMsg2(pIemCpu);
10180 RTAssertPanic();
10181}
10182
10183
10184/**
10185 * Raises an assertion on the specified record, showing the given message with
10186 * a record dump attached.
10187 *
10188 * @param pIemCpu The IEM per CPU data.
10189 * @param pEvtRec1 The first record.
10190 * @param pszMsg The message explaining why we're asserting.
10191 */
10192IEM_STATIC void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
10193{
10194 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10195 iemVerifyAssertAddRecordDump(pEvtRec);
10196 iemVerifyAssertMsg2(pIemCpu);
10197 RTAssertPanic();
10198}
10199
10200
10201/**
10202 * Verifies a write record.
10203 *
10204 * @param pIemCpu The IEM per CPU data.
10205 * @param pEvtRec The write record.
10206 * @param fRem Set if REM was doing the other executing. If clear
10207 * it was HM.
10208 */
10209IEM_STATIC void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
10210{
10211 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
10212 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
10213 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
10214 if ( RT_FAILURE(rc)
10215 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
10216 {
10217 /* fend off ins */
10218 if ( !pIemCpu->cIOReads
10219 || pEvtRec->u.RamWrite.ab[0] != 0xcc
10220 || ( pEvtRec->u.RamWrite.cb != 1
10221 && pEvtRec->u.RamWrite.cb != 2
10222 && pEvtRec->u.RamWrite.cb != 4) )
10223 {
10224 /* fend off ROMs and MMIO */
10225 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
10226 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
10227 {
10228 /* fend off fxsave */
10229 if (pEvtRec->u.RamWrite.cb != 512)
10230 {
10231 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";
10232 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
10233 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
10234 RTAssertMsg2Add("%s: %.*Rhxs\n"
10235 "iem: %.*Rhxs\n",
10236 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
10237 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
10238 iemVerifyAssertAddRecordDump(pEvtRec);
10239 iemVerifyAssertMsg2(pIemCpu);
10240 RTAssertPanic();
10241 }
10242 }
10243 }
10244 }
10245
10246}
10247
10248/**
10249 * Performs the post-execution verfication checks.
10250 */
10251IEM_STATIC void iemExecVerificationModeCheck(PIEMCPU pIemCpu)
10252{
10253 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
10254 return;
10255
10256 /*
10257 * Switch back the state.
10258 */
10259 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
10260 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);
10261 Assert(pOrgCtx != pDebugCtx);
10262 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10263
10264 /*
10265 * Execute the instruction in REM.
10266 */
10267 bool fRem = false;
10268 PVM pVM = IEMCPU_TO_VM(pIemCpu);
10269 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
10270 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
10271#ifdef IEM_VERIFICATION_MODE_FULL_HM
10272 if ( HMIsEnabled(pVM)
10273 && pIemCpu->cIOReads == 0
10274 && pIemCpu->cIOWrites == 0
10275 && !pIemCpu->fProblematicMemory)
10276 {
10277 uint64_t uStartRip = pOrgCtx->rip;
10278 unsigned iLoops = 0;
10279 do
10280 {
10281 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
10282 iLoops++;
10283 } while ( rc == VINF_SUCCESS
10284 || ( rc == VINF_EM_DBG_STEPPED
10285 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10286 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
10287 || ( pOrgCtx->rip != pDebugCtx->rip
10288 && pIemCpu->uInjectCpl != UINT8_MAX
10289 && iLoops < 8) );
10290 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
10291 rc = VINF_SUCCESS;
10292 }
10293#endif
10294 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
10295 || rc == VINF_IOM_R3_IOPORT_READ
10296 || rc == VINF_IOM_R3_IOPORT_WRITE
10297 || rc == VINF_IOM_R3_MMIO_READ
10298 || rc == VINF_IOM_R3_MMIO_READ_WRITE
10299 || rc == VINF_IOM_R3_MMIO_WRITE
10300 || rc == VINF_CPUM_R3_MSR_READ
10301 || rc == VINF_CPUM_R3_MSR_WRITE
10302 || rc == VINF_EM_RESCHEDULE
10303 )
10304 {
10305 EMRemLock(pVM);
10306 rc = REMR3EmulateInstruction(pVM, pVCpu);
10307 AssertRC(rc);
10308 EMRemUnlock(pVM);
10309 fRem = true;
10310 }
10311
10312 /*
10313 * Compare the register states.
10314 */
10315 unsigned cDiffs = 0;
10316 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
10317 {
10318 //Log(("REM and IEM ends up with different registers!\n"));
10319 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
10320
10321# define CHECK_FIELD(a_Field) \
10322 do \
10323 { \
10324 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10325 { \
10326 switch (sizeof(pOrgCtx->a_Field)) \
10327 { \
10328 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10329 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10330 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10331 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
10332 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10333 } \
10334 cDiffs++; \
10335 } \
10336 } while (0)
10337# define CHECK_XSTATE_FIELD(a_Field) \
10338 do \
10339 { \
10340 if (pOrgXState->a_Field != pDebugXState->a_Field) \
10341 { \
10342 switch (sizeof(pOrgCtx->a_Field)) \
10343 { \
10344 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10345 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10346 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10347 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
10348 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
10349 } \
10350 cDiffs++; \
10351 } \
10352 } while (0)
10353
10354# define CHECK_BIT_FIELD(a_Field) \
10355 do \
10356 { \
10357 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
10358 { \
10359 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
10360 cDiffs++; \
10361 } \
10362 } while (0)
10363
10364# define CHECK_SEL(a_Sel) \
10365 do \
10366 { \
10367 CHECK_FIELD(a_Sel.Sel); \
10368 CHECK_FIELD(a_Sel.Attr.u); \
10369 CHECK_FIELD(a_Sel.u64Base); \
10370 CHECK_FIELD(a_Sel.u32Limit); \
10371 CHECK_FIELD(a_Sel.fFlags); \
10372 } while (0)
10373
10374 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
10375 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
10376
10377#if 1 /* The recompiler doesn't update these the intel way. */
10378 if (fRem)
10379 {
10380 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
10381 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
10382 pOrgXState->x87.CS = pDebugXState->x87.CS;
10383 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
10384 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
10385 pOrgXState->x87.DS = pDebugXState->x87.DS;
10386 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
10387 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
10388 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
10389 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
10390 }
10391#endif
10392 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
10393 {
10394 RTAssertMsg2Weak(" the FPU state differs\n");
10395 cDiffs++;
10396 CHECK_XSTATE_FIELD(x87.FCW);
10397 CHECK_XSTATE_FIELD(x87.FSW);
10398 CHECK_XSTATE_FIELD(x87.FTW);
10399 CHECK_XSTATE_FIELD(x87.FOP);
10400 CHECK_XSTATE_FIELD(x87.FPUIP);
10401 CHECK_XSTATE_FIELD(x87.CS);
10402 CHECK_XSTATE_FIELD(x87.Rsrvd1);
10403 CHECK_XSTATE_FIELD(x87.FPUDP);
10404 CHECK_XSTATE_FIELD(x87.DS);
10405 CHECK_XSTATE_FIELD(x87.Rsrvd2);
10406 CHECK_XSTATE_FIELD(x87.MXCSR);
10407 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
10408 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
10409 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
10410 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
10411 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
10412 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
10413 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
10414 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
10415 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
10416 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
10417 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
10418 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
10419 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
10420 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
10421 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
10422 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
10423 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
10424 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
10425 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
10426 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
10427 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
10428 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
10429 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
10430 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
10431 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
10432 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
10433 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
10434 }
10435 CHECK_FIELD(rip);
10436 uint32_t fFlagsMask = UINT32_MAX & ~pIemCpu->fUndefinedEFlags;
10437 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
10438 {
10439 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
10440 CHECK_BIT_FIELD(rflags.Bits.u1CF);
10441 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
10442 CHECK_BIT_FIELD(rflags.Bits.u1PF);
10443 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
10444 CHECK_BIT_FIELD(rflags.Bits.u1AF);
10445 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
10446 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
10447 CHECK_BIT_FIELD(rflags.Bits.u1SF);
10448 CHECK_BIT_FIELD(rflags.Bits.u1TF);
10449 CHECK_BIT_FIELD(rflags.Bits.u1IF);
10450 CHECK_BIT_FIELD(rflags.Bits.u1DF);
10451 CHECK_BIT_FIELD(rflags.Bits.u1OF);
10452 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
10453 CHECK_BIT_FIELD(rflags.Bits.u1NT);
10454 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
10455 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
10456 CHECK_BIT_FIELD(rflags.Bits.u1RF);
10457 CHECK_BIT_FIELD(rflags.Bits.u1VM);
10458 CHECK_BIT_FIELD(rflags.Bits.u1AC);
10459 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
10460 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
10461 CHECK_BIT_FIELD(rflags.Bits.u1ID);
10462 }
10463
10464 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)
10465 CHECK_FIELD(rax);
10466 CHECK_FIELD(rcx);
10467 if (!pIemCpu->fIgnoreRaxRdx)
10468 CHECK_FIELD(rdx);
10469 CHECK_FIELD(rbx);
10470 CHECK_FIELD(rsp);
10471 CHECK_FIELD(rbp);
10472 CHECK_FIELD(rsi);
10473 CHECK_FIELD(rdi);
10474 CHECK_FIELD(r8);
10475 CHECK_FIELD(r9);
10476 CHECK_FIELD(r10);
10477 CHECK_FIELD(r11);
10478 CHECK_FIELD(r12);
10479 CHECK_FIELD(r13);
10480 CHECK_SEL(cs);
10481 CHECK_SEL(ss);
10482 CHECK_SEL(ds);
10483 CHECK_SEL(es);
10484 CHECK_SEL(fs);
10485 CHECK_SEL(gs);
10486 CHECK_FIELD(cr0);
10487
10488 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
10489 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
10490 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
10491 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
10492 if (pOrgCtx->cr2 != pDebugCtx->cr2)
10493 {
10494 if (pIemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)
10495 { /* ignore */ }
10496 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
10497 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
10498 && fRem)
10499 { /* ignore */ }
10500 else
10501 CHECK_FIELD(cr2);
10502 }
10503 CHECK_FIELD(cr3);
10504 CHECK_FIELD(cr4);
10505 CHECK_FIELD(dr[0]);
10506 CHECK_FIELD(dr[1]);
10507 CHECK_FIELD(dr[2]);
10508 CHECK_FIELD(dr[3]);
10509 CHECK_FIELD(dr[6]);
10510 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
10511 CHECK_FIELD(dr[7]);
10512 CHECK_FIELD(gdtr.cbGdt);
10513 CHECK_FIELD(gdtr.pGdt);
10514 CHECK_FIELD(idtr.cbIdt);
10515 CHECK_FIELD(idtr.pIdt);
10516 CHECK_SEL(ldtr);
10517 CHECK_SEL(tr);
10518 CHECK_FIELD(SysEnter.cs);
10519 CHECK_FIELD(SysEnter.eip);
10520 CHECK_FIELD(SysEnter.esp);
10521 CHECK_FIELD(msrEFER);
10522 CHECK_FIELD(msrSTAR);
10523 CHECK_FIELD(msrPAT);
10524 CHECK_FIELD(msrLSTAR);
10525 CHECK_FIELD(msrCSTAR);
10526 CHECK_FIELD(msrSFMASK);
10527 CHECK_FIELD(msrKERNELGSBASE);
10528
10529 if (cDiffs != 0)
10530 {
10531 DBGFR3Info(pVM->pUVM, "cpumguest", "verbose", NULL);
10532 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
10533 iemVerifyAssertMsg2(pIemCpu);
10534 RTAssertPanic();
10535 }
10536# undef CHECK_FIELD
10537# undef CHECK_BIT_FIELD
10538 }
10539
10540 /*
10541 * If the register state compared fine, check the verification event
10542 * records.
10543 */
10544 if (cDiffs == 0 && !pIemCpu->fOverlappingMovs)
10545 {
10546 /*
10547 * Compare verficiation event records.
10548 * - I/O port accesses should be a 1:1 match.
10549 */
10550 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;
10551 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;
10552 while (pIemRec && pOtherRec)
10553 {
10554 /* Since we might miss RAM writes and reads, ignore reads and check
10555 that any written memory is the same extra ones. */
10556 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
10557 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
10558 && pIemRec->pNext)
10559 {
10560 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10561 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10562 pIemRec = pIemRec->pNext;
10563 }
10564
10565 /* Do the compare. */
10566 if (pIemRec->enmEvent != pOtherRec->enmEvent)
10567 {
10568 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");
10569 break;
10570 }
10571 bool fEquals;
10572 switch (pIemRec->enmEvent)
10573 {
10574 case IEMVERIFYEVENT_IOPORT_READ:
10575 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
10576 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
10577 break;
10578 case IEMVERIFYEVENT_IOPORT_WRITE:
10579 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
10580 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
10581 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
10582 break;
10583 case IEMVERIFYEVENT_RAM_READ:
10584 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
10585 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
10586 break;
10587 case IEMVERIFYEVENT_RAM_WRITE:
10588 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
10589 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
10590 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
10591 break;
10592 default:
10593 fEquals = false;
10594 break;
10595 }
10596 if (!fEquals)
10597 {
10598 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");
10599 break;
10600 }
10601
10602 /* advance */
10603 pIemRec = pIemRec->pNext;
10604 pOtherRec = pOtherRec->pNext;
10605 }
10606
10607 /* Ignore extra writes and reads. */
10608 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
10609 {
10610 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
10611 iemVerifyWriteRecord(pIemCpu, pIemRec, fRem);
10612 pIemRec = pIemRec->pNext;
10613 }
10614 if (pIemRec != NULL)
10615 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");
10616 else if (pOtherRec != NULL)
10617 iemVerifyAssertRecord(pIemCpu, pOtherRec, "Extra Other record!");
10618 }
10619 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;
10620}
10621
10622#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10623
10624/* stubs */
10625IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
10626{
10627 NOREF(pIemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
10628 return VERR_INTERNAL_ERROR;
10629}
10630
10631IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
10632{
10633 NOREF(pIemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
10634 return VERR_INTERNAL_ERROR;
10635}
10636
10637#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
10638
10639
10640#ifdef LOG_ENABLED
10641/**
10642 * Logs the current instruction.
10643 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10644 * @param pCtx The current CPU context.
10645 * @param fSameCtx Set if we have the same context information as the VMM,
10646 * clear if we may have already executed an instruction in
10647 * our debug context. When clear, we assume IEMCPU holds
10648 * valid CPU mode info.
10649 */
10650IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
10651{
10652# ifdef IN_RING3
10653 if (LogIs2Enabled())
10654 {
10655 char szInstr[256];
10656 uint32_t cbInstr = 0;
10657 if (fSameCtx)
10658 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10659 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10660 szInstr, sizeof(szInstr), &cbInstr);
10661 else
10662 {
10663 uint32_t fFlags = 0;
10664 switch (pVCpu->iem.s.enmCpuMode)
10665 {
10666 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10667 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10668 case IEMMODE_16BIT:
10669 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
10670 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10671 else
10672 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10673 break;
10674 }
10675 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
10676 szInstr, sizeof(szInstr), &cbInstr);
10677 }
10678
10679 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
10680 Log2(("****\n"
10681 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10682 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10683 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10684 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10685 " %s\n"
10686 ,
10687 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
10688 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
10689 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
10690 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
10691 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10692 szInstr));
10693
10694 if (LogIs3Enabled())
10695 DBGFR3Info(pVCpu->pVMR3->pUVM, "cpumguest", "verbose", NULL);
10696 }
10697 else
10698# endif
10699 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
10700 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
10701}
10702#endif
10703
10704
10705/**
10706 * Makes status code addjustments (pass up from I/O and access handler)
10707 * as well as maintaining statistics.
10708 *
10709 * @returns Strict VBox status code to pass up.
10710 * @param pIemCpu The IEM per CPU data.
10711 * @param rcStrict The status from executing an instruction.
10712 */
10713DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PIEMCPU pIemCpu, VBOXSTRICTRC rcStrict)
10714{
10715 if (rcStrict != VINF_SUCCESS)
10716 {
10717 if (RT_SUCCESS(rcStrict))
10718 {
10719 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
10720 || rcStrict == VINF_IOM_R3_IOPORT_READ
10721 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
10722 || rcStrict == VINF_IOM_R3_MMIO_READ
10723 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
10724 || rcStrict == VINF_IOM_R3_MMIO_WRITE
10725 || rcStrict == VINF_CPUM_R3_MSR_READ
10726 || rcStrict == VINF_CPUM_R3_MSR_WRITE
10727 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10728 || rcStrict == VINF_EM_RAW_TO_R3
10729 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
10730 /* raw-mode / virt handlers only: */
10731 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
10732 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
10733 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
10734 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
10735 || rcStrict == VINF_SELM_SYNC_GDT
10736 || rcStrict == VINF_CSAM_PENDING_ACTION
10737 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
10738 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10739/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
10740 int32_t const rcPassUp = pIemCpu->rcPassUp;
10741 if (rcPassUp == VINF_SUCCESS)
10742 pIemCpu->cRetInfStatuses++;
10743 else if ( rcPassUp < VINF_EM_FIRST
10744 || rcPassUp > VINF_EM_LAST
10745 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
10746 {
10747 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10748 pIemCpu->cRetPassUpStatus++;
10749 rcStrict = rcPassUp;
10750 }
10751 else
10752 {
10753 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
10754 pIemCpu->cRetInfStatuses++;
10755 }
10756 }
10757 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
10758 pIemCpu->cRetAspectNotImplemented++;
10759 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
10760 pIemCpu->cRetInstrNotImplemented++;
10761#ifdef IEM_VERIFICATION_MODE_FULL
10762 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
10763 rcStrict = VINF_SUCCESS;
10764#endif
10765 else
10766 pIemCpu->cRetErrStatuses++;
10767 }
10768 else if (pIemCpu->rcPassUp != VINF_SUCCESS)
10769 {
10770 pIemCpu->cRetPassUpStatus++;
10771 rcStrict = pIemCpu->rcPassUp;
10772 }
10773
10774 return rcStrict;
10775}
10776
10777
10778/**
10779 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10780 * IEMExecOneWithPrefetchedByPC.
10781 *
10782 * @return Strict VBox status code.
10783 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10784 * @param pIemCpu The IEM per CPU data.
10785 * @param fExecuteInhibit If set, execute the instruction following CLI,
10786 * POP SS and MOV SS,GR.
10787 */
10788DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
10789{
10790 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10791 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10792 if (rcStrict == VINF_SUCCESS)
10793 pIemCpu->cInstructions++;
10794 if (pIemCpu->cActiveMappings > 0)
10795 iemMemRollback(pIemCpu);
10796//#ifdef DEBUG
10797// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
10798//#endif
10799
10800 /* Execute the next instruction as well if a cli, pop ss or
10801 mov ss, Gr has just completed successfully. */
10802 if ( fExecuteInhibit
10803 && rcStrict == VINF_SUCCESS
10804 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10805 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )
10806 {
10807 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, pIemCpu->fBypassHandlers);
10808 if (rcStrict == VINF_SUCCESS)
10809 {
10810# ifdef LOG_ENABLED
10811 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);
10812# endif
10813 IEM_OPCODE_GET_NEXT_U8(&b);
10814 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10815 if (rcStrict == VINF_SUCCESS)
10816 pIemCpu->cInstructions++;
10817 if (pIemCpu->cActiveMappings > 0)
10818 iemMemRollback(pIemCpu);
10819 }
10820 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
10821 }
10822
10823 /*
10824 * Return value fiddling, statistics and sanity assertions.
10825 */
10826 rcStrict = iemExecStatusCodeFiddling(pIemCpu, rcStrict);
10827
10828 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->cs));
10829 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ss));
10830#if defined(IEM_VERIFICATION_MODE_FULL)
10831 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->es));
10832 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->ds));
10833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->fs));
10834 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pIemCpu->CTX_SUFF(pCtx)->gs));
10835#endif
10836 return rcStrict;
10837}
10838
10839
10840#ifdef IN_RC
10841/**
10842 * Re-enters raw-mode or ensure we return to ring-3.
10843 *
10844 * @returns rcStrict, maybe modified.
10845 * @param pIemCpu The IEM CPU structure.
10846 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10847 * @param pCtx The current CPU context.
10848 * @param rcStrict The status code returne by the interpreter.
10849 */
10850DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
10851{
10852 if (!pIemCpu->fInPatchCode)
10853 CPUMRawEnter(pVCpu);
10854 return rcStrict;
10855}
10856#endif
10857
10858
10859/**
10860 * Execute one instruction.
10861 *
10862 * @return Strict VBox status code.
10863 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10864 */
10865VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
10866{
10867 PIEMCPU pIemCpu = &pVCpu->iem.s;
10868
10869#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10870 iemExecVerificationModeSetup(pIemCpu);
10871#endif
10872#ifdef LOG_ENABLED
10873 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10874 iemLogCurInstr(pVCpu, pCtx, true);
10875#endif
10876
10877 /*
10878 * Do the decoding and emulation.
10879 */
10880 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10881 if (rcStrict == VINF_SUCCESS)
10882 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10883
10884#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
10885 /*
10886 * Assert some sanity.
10887 */
10888 iemExecVerificationModeCheck(pIemCpu);
10889#endif
10890#ifdef IN_RC
10891 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
10892#endif
10893 if (rcStrict != VINF_SUCCESS)
10894 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10895 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10896 return rcStrict;
10897}
10898
10899
10900VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10901{
10902 PIEMCPU pIemCpu = &pVCpu->iem.s;
10903 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10904 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10905
10906 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10907 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10908 if (rcStrict == VINF_SUCCESS)
10909 {
10910 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10911 if (pcbWritten)
10912 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10913 }
10914
10915#ifdef IN_RC
10916 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10917#endif
10918 return rcStrict;
10919}
10920
10921
10922VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10923 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10924{
10925 PIEMCPU pIemCpu = &pVCpu->iem.s;
10926 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10927 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10928
10929 VBOXSTRICTRC rcStrict;
10930 if ( cbOpcodeBytes
10931 && pCtx->rip == OpcodeBytesPC)
10932 {
10933 iemInitDecoder(pIemCpu, false);
10934 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10935 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10936 rcStrict = VINF_SUCCESS;
10937 }
10938 else
10939 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
10940 if (rcStrict == VINF_SUCCESS)
10941 {
10942 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
10943 }
10944
10945#ifdef IN_RC
10946 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10947#endif
10948 return rcStrict;
10949}
10950
10951
10952VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
10953{
10954 PIEMCPU pIemCpu = &pVCpu->iem.s;
10955 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10956 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10957
10958 uint32_t const cbOldWritten = pIemCpu->cbWritten;
10959 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10960 if (rcStrict == VINF_SUCCESS)
10961 {
10962 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10963 if (pcbWritten)
10964 *pcbWritten = pIemCpu->cbWritten - cbOldWritten;
10965 }
10966
10967#ifdef IN_RC
10968 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10969#endif
10970 return rcStrict;
10971}
10972
10973
10974VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
10975 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10976{
10977 PIEMCPU pIemCpu = &pVCpu->iem.s;
10978 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
10979 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
10980
10981 VBOXSTRICTRC rcStrict;
10982 if ( cbOpcodeBytes
10983 && pCtx->rip == OpcodeBytesPC)
10984 {
10985 iemInitDecoder(pIemCpu, true);
10986 pIemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));
10987 memcpy(pIemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);
10988 rcStrict = VINF_SUCCESS;
10989 }
10990 else
10991 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, true);
10992 if (rcStrict == VINF_SUCCESS)
10993 rcStrict = iemExecOneInner(pVCpu, pIemCpu, false);
10994
10995#ifdef IN_RC
10996 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pCtx, rcStrict);
10997#endif
10998 return rcStrict;
10999}
11000
11001
11002VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu)
11003{
11004 PIEMCPU pIemCpu = &pVCpu->iem.s;
11005
11006 /*
11007 * See if there is an interrupt pending in TRPM and inject it if we can.
11008 */
11009#if !defined(IEM_VERIFICATION_MODE_FULL) || !defined(IN_RING3)
11010 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11011# ifdef IEM_VERIFICATION_MODE_FULL
11012 pIemCpu->uInjectCpl = UINT8_MAX;
11013# endif
11014 if ( pCtx->eflags.Bits.u1IF
11015 && TRPMHasTrap(pVCpu)
11016 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
11017 {
11018 uint8_t u8TrapNo;
11019 TRPMEVENT enmType;
11020 RTGCUINT uErrCode;
11021 RTGCPTR uCr2;
11022 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
11023 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
11024 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
11025 TRPMResetTrap(pVCpu);
11026 }
11027#else
11028 iemExecVerificationModeSetup(pIemCpu);
11029 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
11030#endif
11031
11032 /*
11033 * Log the state.
11034 */
11035#ifdef LOG_ENABLED
11036 iemLogCurInstr(pVCpu, pCtx, true);
11037#endif
11038
11039 /*
11040 * Do the decoding and emulation.
11041 */
11042 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu, false);
11043 if (rcStrict == VINF_SUCCESS)
11044 rcStrict = iemExecOneInner(pVCpu, pIemCpu, true);
11045
11046#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
11047 /*
11048 * Assert some sanity.
11049 */
11050 iemExecVerificationModeCheck(pIemCpu);
11051#endif
11052
11053 /*
11054 * Maybe re-enter raw-mode and log.
11055 */
11056#ifdef IN_RC
11057 rcStrict = iemRCRawMaybeReenter(pIemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);
11058#endif
11059 if (rcStrict != VINF_SUCCESS)
11060 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11061 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11062 return rcStrict;
11063}
11064
11065
11066
11067/**
11068 * Injects a trap, fault, abort, software interrupt or external interrupt.
11069 *
11070 * The parameter list matches TRPMQueryTrapAll pretty closely.
11071 *
11072 * @returns Strict VBox status code.
11073 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11074 * @param u8TrapNo The trap number.
11075 * @param enmType What type is it (trap/fault/abort), software
11076 * interrupt or hardware interrupt.
11077 * @param uErrCode The error code if applicable.
11078 * @param uCr2 The CR2 value if applicable.
11079 * @param cbInstr The instruction length (only relevant for
11080 * software interrupts).
11081 */
11082VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
11083 uint8_t cbInstr)
11084{
11085 iemInitDecoder(&pVCpu->iem.s, false);
11086#ifdef DBGFTRACE_ENABLED
11087 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
11088 u8TrapNo, enmType, uErrCode, uCr2);
11089#endif
11090
11091 uint32_t fFlags;
11092 switch (enmType)
11093 {
11094 case TRPM_HARDWARE_INT:
11095 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
11096 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
11097 uErrCode = uCr2 = 0;
11098 break;
11099
11100 case TRPM_SOFTWARE_INT:
11101 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
11102 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
11103 uErrCode = uCr2 = 0;
11104 break;
11105
11106 case TRPM_TRAP:
11107 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
11108 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
11109 if (u8TrapNo == X86_XCPT_PF)
11110 fFlags |= IEM_XCPT_FLAGS_CR2;
11111 switch (u8TrapNo)
11112 {
11113 case X86_XCPT_DF:
11114 case X86_XCPT_TS:
11115 case X86_XCPT_NP:
11116 case X86_XCPT_SS:
11117 case X86_XCPT_PF:
11118 case X86_XCPT_AC:
11119 fFlags |= IEM_XCPT_FLAGS_ERR;
11120 break;
11121
11122 case X86_XCPT_NMI:
11123 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
11124 break;
11125 }
11126 break;
11127
11128 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11129 }
11130
11131 return iemRaiseXcptOrInt(&pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
11132}
11133
11134
11135/**
11136 * Injects the active TRPM event.
11137 *
11138 * @returns Strict VBox status code.
11139 * @param pVCpu The cross context virtual CPU structure.
11140 */
11141VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
11142{
11143#ifndef IEM_IMPLEMENTS_TASKSWITCH
11144 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
11145#else
11146 uint8_t u8TrapNo;
11147 TRPMEVENT enmType;
11148 RTGCUINT uErrCode;
11149 RTGCUINTPTR uCr2;
11150 uint8_t cbInstr;
11151 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
11152 if (RT_FAILURE(rc))
11153 return rc;
11154
11155 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
11156
11157 /** @todo Are there any other codes that imply the event was successfully
11158 * delivered to the guest? See @bugref{6607}. */
11159 if ( rcStrict == VINF_SUCCESS
11160 || rcStrict == VINF_IEM_RAISED_XCPT)
11161 {
11162 TRPMResetTrap(pVCpu);
11163 }
11164 return rcStrict;
11165#endif
11166}
11167
11168
11169VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
11170{
11171 return VERR_NOT_IMPLEMENTED;
11172}
11173
11174
11175VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
11176{
11177 return VERR_NOT_IMPLEMENTED;
11178}
11179
11180
11181#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
11182/**
11183 * Executes a IRET instruction with default operand size.
11184 *
11185 * This is for PATM.
11186 *
11187 * @returns VBox status code.
11188 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11189 * @param pCtxCore The register frame.
11190 */
11191VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
11192{
11193 PIEMCPU pIemCpu = &pVCpu->iem.s;
11194 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
11195
11196 iemCtxCoreToCtx(pCtx, pCtxCore);
11197 iemInitDecoder(pIemCpu);
11198 VBOXSTRICTRC rcStrict = iemCImpl_iret(pIemCpu, 1, pIemCpu->enmDefOpSize);
11199 if (rcStrict == VINF_SUCCESS)
11200 iemCtxToCtxCore(pCtxCore, pCtx);
11201 else
11202 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
11203 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
11204 return rcStrict;
11205}
11206#endif
11207
11208
11209/**
11210 * Macro used by the IEMExec* method to check the given instruction length.
11211 *
11212 * Will return on failure!
11213 *
11214 * @param a_cbInstr The given instruction length.
11215 * @param a_cbMin The minimum length.
11216 */
11217#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
11218 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
11219 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
11220
11221
11222/**
11223 * Interface for HM and EM for executing string I/O OUT (write) instructions.
11224 *
11225 * This API ASSUMES that the caller has already verified that the guest code is
11226 * allowed to access the I/O port. (The I/O port is in the DX register in the
11227 * guest state.)
11228 *
11229 * @returns Strict VBox status code.
11230 * @param pVCpu The cross context virtual CPU structure.
11231 * @param cbValue The size of the I/O port access (1, 2, or 4).
11232 * @param enmAddrMode The addressing mode.
11233 * @param fRepPrefix Indicates whether a repeat prefix is used
11234 * (doesn't matter which for this instruction).
11235 * @param cbInstr The instruction length in bytes.
11236 * @param iEffSeg The effective segment address.
11237 */
11238VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11239 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg)
11240{
11241 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11242 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11243
11244 /*
11245 * State init.
11246 */
11247 PIEMCPU pIemCpu = &pVCpu->iem.s;
11248 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11249
11250 /*
11251 * Switch orgy for getting to the right handler.
11252 */
11253 VBOXSTRICTRC rcStrict;
11254 if (fRepPrefix)
11255 {
11256 switch (enmAddrMode)
11257 {
11258 case IEMMODE_16BIT:
11259 switch (cbValue)
11260 {
11261 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11262 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11263 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11264 default:
11265 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11266 }
11267 break;
11268
11269 case IEMMODE_32BIT:
11270 switch (cbValue)
11271 {
11272 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11273 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11274 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11275 default:
11276 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11277 }
11278 break;
11279
11280 case IEMMODE_64BIT:
11281 switch (cbValue)
11282 {
11283 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11284 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11285 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11286 default:
11287 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11288 }
11289 break;
11290
11291 default:
11292 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11293 }
11294 }
11295 else
11296 {
11297 switch (enmAddrMode)
11298 {
11299 case IEMMODE_16BIT:
11300 switch (cbValue)
11301 {
11302 case 1: rcStrict = iemCImpl_outs_op8_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11303 case 2: rcStrict = iemCImpl_outs_op16_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11304 case 4: rcStrict = iemCImpl_outs_op32_addr16(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11305 default:
11306 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11307 }
11308 break;
11309
11310 case IEMMODE_32BIT:
11311 switch (cbValue)
11312 {
11313 case 1: rcStrict = iemCImpl_outs_op8_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11314 case 2: rcStrict = iemCImpl_outs_op16_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11315 case 4: rcStrict = iemCImpl_outs_op32_addr32(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11316 default:
11317 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11318 }
11319 break;
11320
11321 case IEMMODE_64BIT:
11322 switch (cbValue)
11323 {
11324 case 1: rcStrict = iemCImpl_outs_op8_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11325 case 2: rcStrict = iemCImpl_outs_op16_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11326 case 4: rcStrict = iemCImpl_outs_op32_addr64(pIemCpu, cbInstr, iEffSeg, true /*fIoChecked*/); break;
11327 default:
11328 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11329 }
11330 break;
11331
11332 default:
11333 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11334 }
11335 }
11336
11337 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11338}
11339
11340
11341/**
11342 * Interface for HM and EM for executing string I/O IN (read) instructions.
11343 *
11344 * This API ASSUMES that the caller has already verified that the guest code is
11345 * allowed to access the I/O port. (The I/O port is in the DX register in the
11346 * guest state.)
11347 *
11348 * @returns Strict VBox status code.
11349 * @param pVCpu The cross context virtual CPU structure.
11350 * @param cbValue The size of the I/O port access (1, 2, or 4).
11351 * @param enmAddrMode The addressing mode.
11352 * @param fRepPrefix Indicates whether a repeat prefix is used
11353 * (doesn't matter which for this instruction).
11354 * @param cbInstr The instruction length in bytes.
11355 */
11356VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11357 bool fRepPrefix, uint8_t cbInstr)
11358{
11359 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11360
11361 /*
11362 * State init.
11363 */
11364 PIEMCPU pIemCpu = &pVCpu->iem.s;
11365 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11366
11367 /*
11368 * Switch orgy for getting to the right handler.
11369 */
11370 VBOXSTRICTRC rcStrict;
11371 if (fRepPrefix)
11372 {
11373 switch (enmAddrMode)
11374 {
11375 case IEMMODE_16BIT:
11376 switch (cbValue)
11377 {
11378 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11379 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11380 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11381 default:
11382 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11383 }
11384 break;
11385
11386 case IEMMODE_32BIT:
11387 switch (cbValue)
11388 {
11389 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11390 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11391 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11392 default:
11393 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11394 }
11395 break;
11396
11397 case IEMMODE_64BIT:
11398 switch (cbValue)
11399 {
11400 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11401 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11402 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11403 default:
11404 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11405 }
11406 break;
11407
11408 default:
11409 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11410 }
11411 }
11412 else
11413 {
11414 switch (enmAddrMode)
11415 {
11416 case IEMMODE_16BIT:
11417 switch (cbValue)
11418 {
11419 case 1: rcStrict = iemCImpl_ins_op8_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11420 case 2: rcStrict = iemCImpl_ins_op16_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11421 case 4: rcStrict = iemCImpl_ins_op32_addr16(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11422 default:
11423 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11424 }
11425 break;
11426
11427 case IEMMODE_32BIT:
11428 switch (cbValue)
11429 {
11430 case 1: rcStrict = iemCImpl_ins_op8_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11431 case 2: rcStrict = iemCImpl_ins_op16_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11432 case 4: rcStrict = iemCImpl_ins_op32_addr32(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11433 default:
11434 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11435 }
11436 break;
11437
11438 case IEMMODE_64BIT:
11439 switch (cbValue)
11440 {
11441 case 1: rcStrict = iemCImpl_ins_op8_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11442 case 2: rcStrict = iemCImpl_ins_op16_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11443 case 4: rcStrict = iemCImpl_ins_op32_addr64(pIemCpu, cbInstr, true /*fIoChecked*/); break;
11444 default:
11445 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11446 }
11447 break;
11448
11449 default:
11450 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11451 }
11452 }
11453
11454 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11455}
11456
11457
11458
11459/**
11460 * Interface for HM and EM to write to a CRx register.
11461 *
11462 * @returns Strict VBox status code.
11463 * @param pVCpu The cross context virtual CPU structure.
11464 * @param cbInstr The instruction length in bytes.
11465 * @param iCrReg The control register number (destination).
11466 * @param iGReg The general purpose register number (source).
11467 *
11468 * @remarks In ring-0 not all of the state needs to be synced in.
11469 */
11470VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11471{
11472 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11473 Assert(iCrReg < 16);
11474 Assert(iGReg < 16);
11475
11476 PIEMCPU pIemCpu = &pVCpu->iem.s;
11477 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11478 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11479 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11480}
11481
11482
11483/**
11484 * Interface for HM and EM to read from a CRx register.
11485 *
11486 * @returns Strict VBox status code.
11487 * @param pVCpu The cross context virtual CPU structure.
11488 * @param cbInstr The instruction length in bytes.
11489 * @param iGReg The general purpose register number (destination).
11490 * @param iCrReg The control register number (source).
11491 *
11492 * @remarks In ring-0 not all of the state needs to be synced in.
11493 */
11494VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11495{
11496 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11497 Assert(iCrReg < 16);
11498 Assert(iGReg < 16);
11499
11500 PIEMCPU pIemCpu = &pVCpu->iem.s;
11501 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11502 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11503 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11504}
11505
11506
11507/**
11508 * Interface for HM and EM to clear the CR0[TS] bit.
11509 *
11510 * @returns Strict VBox status code.
11511 * @param pVCpu The cross context virtual CPU structure.
11512 * @param cbInstr The instruction length in bytes.
11513 *
11514 * @remarks In ring-0 not all of the state needs to be synced in.
11515 */
11516VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
11517{
11518 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11519
11520 PIEMCPU pIemCpu = &pVCpu->iem.s;
11521 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11522 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11523 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11524}
11525
11526
11527/**
11528 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11529 *
11530 * @returns Strict VBox status code.
11531 * @param pVCpu The cross context virtual CPU structure.
11532 * @param cbInstr The instruction length in bytes.
11533 * @param uValue The value to load into CR0.
11534 *
11535 * @remarks In ring-0 not all of the state needs to be synced in.
11536 */
11537VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
11538{
11539 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11540
11541 PIEMCPU pIemCpu = &pVCpu->iem.s;
11542 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11543 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
11544 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11545}
11546
11547
11548/**
11549 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11550 *
11551 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11552 *
11553 * @returns Strict VBox status code.
11554 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11555 * @param cbInstr The instruction length in bytes.
11556 * @remarks In ring-0 not all of the state needs to be synced in.
11557 * @thread EMT(pVCpu)
11558 */
11559VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
11560{
11561 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11562
11563 PIEMCPU pIemCpu = &pVCpu->iem.s;
11564 iemInitExec(pIemCpu, false /*fBypassHandlers*/);
11565 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11566 return iemExecStatusCodeFiddling(pIemCpu, rcStrict);
11567}
11568
11569#ifdef IN_RING3
11570
11571/**
11572 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11573 *
11574 * @returns Merge between @a rcStrict and what the commit operation returned.
11575 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11576 * @param rcStrict The status code returned by ring-0 or raw-mode.
11577 */
11578VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3DoPendingAction(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
11579{
11580 PIEMCPU pIemCpu = &pVCpu->iem.s;
11581
11582 /*
11583 * Retrieve and reset the pending commit.
11584 */
11585 IEMCOMMIT const enmFn = pIemCpu->PendingCommit.enmFn;
11586 pIemCpu->PendingCommit.enmFn = IEMCOMMIT_INVALID;
11587 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11588
11589 /*
11590 * Must reset pass-up status code.
11591 */
11592 pIemCpu->rcPassUp = VINF_SUCCESS;
11593
11594 /*
11595 * Call the function. Currently using switch here instead of function
11596 * pointer table as a switch won't get skewed.
11597 */
11598 VBOXSTRICTRC rcStrictCommit;
11599 switch (enmFn)
11600 {
11601 case IEMCOMMIT_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11602 case IEMCOMMIT_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11603 case IEMCOMMIT_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11604 case IEMCOMMIT_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11605 case IEMCOMMIT_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11606 case IEMCOMMIT_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11607 case IEMCOMMIT_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11608 case IEMCOMMIT_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11609 case IEMCOMMIT_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11610 case IEMCOMMIT_REP_INS_OP8_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11611 case IEMCOMMIT_REP_INS_OP8_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11612 case IEMCOMMIT_REP_INS_OP8_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op8_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11613 case IEMCOMMIT_REP_INS_OP16_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11614 case IEMCOMMIT_REP_INS_OP16_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11615 case IEMCOMMIT_REP_INS_OP16_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op16_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11616 case IEMCOMMIT_REP_INS_OP32_ADDR16: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr16(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11617 case IEMCOMMIT_REP_INS_OP32_ADDR32: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr32(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11618 case IEMCOMMIT_REP_INS_OP32_ADDR64: rcStrictCommit = iemR3CImpl_commit_rep_ins_op32_addr64(pIemCpu, pIemCpu->PendingCommit.cbInstr); break;
11619 default:
11620 AssertLogRelMsgFailedReturn(("enmFn=%#x (%d)\n", pIemCpu->PendingCommit.enmFn, pIemCpu->PendingCommit.enmFn), VERR_IEM_IPE_2);
11621 }
11622
11623 /*
11624 * Merge status code (if any) with the incomming one.
11625 */
11626 rcStrictCommit = iemExecStatusCodeFiddling(pIemCpu, rcStrictCommit);
11627 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11628 return rcStrict;
11629 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11630 return rcStrictCommit;
11631
11632 /* Complicated. */
11633 if (RT_FAILURE(rcStrict))
11634 return rcStrict;
11635 if (RT_FAILURE(rcStrictCommit))
11636 return rcStrictCommit;
11637 if ( rcStrict >= VINF_EM_FIRST
11638 && rcStrict <= VINF_EM_LAST)
11639 {
11640 if ( rcStrictCommit >= VINF_EM_FIRST
11641 && rcStrictCommit <= VINF_EM_LAST)
11642 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11643
11644 /* This really shouldn't happen. Check PGM + handler code! */
11645 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_1);
11646 }
11647 /* This shouldn't really happen either, see IOM_SUCCESS. */
11648 AssertLogRelMsgFailedReturn(("rcStrictCommit=%Rrc rcStrict=%Rrc enmFn=%d\n", VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), enmFn), VERR_IEM_IPE_2);
11649}
11650
11651#endif /* IN_RING */
11652
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette