VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsInterpretOnly.cpp@ 96014

Last change on this file since 96014 was 95517, checked in by vboxsync, 3 years ago

VMM/IEM: Simplified IEMOPMEDIAF3 and IEMOPMEDIAOPTF3 function table creation, moving most of them into the functions where they are used. Exceptions are 4 tables used by multiple decoder functions. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 49.9 KB
Line 
1/* $Id: IEMAllInstructionsInterpretOnly.cpp 95517 2022-07-05 15:01:42Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
23# define LOG_GROUP LOG_GROUP_IEM
24#endif
25#define VMCPU_INCL_CPUM_GST_CTX
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/apic.h>
29#include <VBox/vmm/pdm.h>
30#include <VBox/vmm/pgm.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/nem.h>
35#include <VBox/vmm/gim.h>
36#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
37# include <VBox/vmm/em.h>
38# include <VBox/vmm/hm_svm.h>
39#endif
40#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
41# include <VBox/vmm/hmvmxinline.h>
42#endif
43#include <VBox/vmm/tm.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/vmm/dbgftrace.h>
46#ifndef TST_IEM_CHECK_MC
47# include "IEMInternal.h"
48#endif
49#include <VBox/vmm/vmcc.h>
50#include <VBox/log.h>
51#include <VBox/err.h>
52#include <VBox/param.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55#include <iprt/asm-math.h>
56#include <iprt/assert.h>
57#include <iprt/string.h>
58#include <iprt/x86.h>
59
60#ifndef TST_IEM_CHECK_MC
61# include "IEMInline.h"
62# include "IEMOpHlp.h"
63# include "IEMMc.h"
64#endif
65
66
67#ifdef _MSC_VER
68# pragma warning(push)
69# pragma warning(disable: 4702) /* Unreachable code like return in iemOp_Grp6_lldt. */
70#endif
71
72
73/*********************************************************************************************************************************
74* Global Variables *
75*********************************************************************************************************************************/
76#ifndef TST_IEM_CHECK_MC
77/** Function table for the ADD instruction. */
78IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
79{
80 iemAImpl_add_u8, iemAImpl_add_u8_locked,
81 iemAImpl_add_u16, iemAImpl_add_u16_locked,
82 iemAImpl_add_u32, iemAImpl_add_u32_locked,
83 iemAImpl_add_u64, iemAImpl_add_u64_locked
84};
85
86/** Function table for the ADC instruction. */
87IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
88{
89 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
90 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
91 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
92 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
93};
94
95/** Function table for the SUB instruction. */
96IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
97{
98 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
99 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
100 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
101 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
102};
103
104/** Function table for the SBB instruction. */
105IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
106{
107 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
108 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
109 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
110 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
111};
112
113/** Function table for the OR instruction. */
114IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
115{
116 iemAImpl_or_u8, iemAImpl_or_u8_locked,
117 iemAImpl_or_u16, iemAImpl_or_u16_locked,
118 iemAImpl_or_u32, iemAImpl_or_u32_locked,
119 iemAImpl_or_u64, iemAImpl_or_u64_locked
120};
121
122/** Function table for the XOR instruction. */
123IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
124{
125 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
126 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
127 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
128 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
129};
130
131/** Function table for the AND instruction. */
132IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
133{
134 iemAImpl_and_u8, iemAImpl_and_u8_locked,
135 iemAImpl_and_u16, iemAImpl_and_u16_locked,
136 iemAImpl_and_u32, iemAImpl_and_u32_locked,
137 iemAImpl_and_u64, iemAImpl_and_u64_locked
138};
139
140/** Function table for the CMP instruction.
141 * @remarks Making operand order ASSUMPTIONS.
142 */
143IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
144{
145 iemAImpl_cmp_u8, NULL,
146 iemAImpl_cmp_u16, NULL,
147 iemAImpl_cmp_u32, NULL,
148 iemAImpl_cmp_u64, NULL
149};
150
151/** Function table for the TEST instruction.
152 * @remarks Making operand order ASSUMPTIONS.
153 */
154IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
155{
156 iemAImpl_test_u8, NULL,
157 iemAImpl_test_u16, NULL,
158 iemAImpl_test_u32, NULL,
159 iemAImpl_test_u64, NULL
160};
161
162
163/** Function table for the BT instruction. */
164IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
165{
166 NULL, NULL,
167 iemAImpl_bt_u16, NULL,
168 iemAImpl_bt_u32, NULL,
169 iemAImpl_bt_u64, NULL
170};
171
172/** Function table for the BTC instruction. */
173IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
174{
175 NULL, NULL,
176 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
177 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
178 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
179};
180
181/** Function table for the BTR instruction. */
182IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
183{
184 NULL, NULL,
185 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
186 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
187 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
188};
189
190/** Function table for the BTS instruction. */
191IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
192{
193 NULL, NULL,
194 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
195 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
196 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
197};
198
199/** Function table for the BSF instruction. */
200IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
201{
202 NULL, NULL,
203 iemAImpl_bsf_u16, NULL,
204 iemAImpl_bsf_u32, NULL,
205 iemAImpl_bsf_u64, NULL
206};
207
208/** Function table for the BSF instruction, AMD EFLAGS variant. */
209IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_amd =
210{
211 NULL, NULL,
212 iemAImpl_bsf_u16_amd, NULL,
213 iemAImpl_bsf_u32_amd, NULL,
214 iemAImpl_bsf_u64_amd, NULL
215};
216
217/** Function table for the BSF instruction, Intel EFLAGS variant. */
218IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_intel =
219{
220 NULL, NULL,
221 iemAImpl_bsf_u16_intel, NULL,
222 iemAImpl_bsf_u32_intel, NULL,
223 iemAImpl_bsf_u64_intel, NULL
224};
225
226/** EFLAGS variation selection table for the BSF instruction. */
227IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsf_eflags[] =
228{
229 &g_iemAImpl_bsf,
230 &g_iemAImpl_bsf_intel,
231 &g_iemAImpl_bsf_amd,
232 &g_iemAImpl_bsf,
233};
234
235/** Function table for the BSR instruction. */
236IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
237{
238 NULL, NULL,
239 iemAImpl_bsr_u16, NULL,
240 iemAImpl_bsr_u32, NULL,
241 iemAImpl_bsr_u64, NULL
242};
243
244/** Function table for the BSR instruction, AMD EFLAGS variant. */
245IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_amd =
246{
247 NULL, NULL,
248 iemAImpl_bsr_u16_amd, NULL,
249 iemAImpl_bsr_u32_amd, NULL,
250 iemAImpl_bsr_u64_amd, NULL
251};
252
253/** Function table for the BSR instruction, Intel EFLAGS variant. */
254IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_intel =
255{
256 NULL, NULL,
257 iemAImpl_bsr_u16_intel, NULL,
258 iemAImpl_bsr_u32_intel, NULL,
259 iemAImpl_bsr_u64_intel, NULL
260};
261
262/** EFLAGS variation selection table for the BSR instruction. */
263IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsr_eflags[] =
264{
265 &g_iemAImpl_bsr,
266 &g_iemAImpl_bsr_intel,
267 &g_iemAImpl_bsr_amd,
268 &g_iemAImpl_bsr,
269};
270
271/** Function table for the IMUL instruction. */
272IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
273{
274 NULL, NULL,
275 iemAImpl_imul_two_u16, NULL,
276 iemAImpl_imul_two_u32, NULL,
277 iemAImpl_imul_two_u64, NULL
278};
279
280/** Function table for the IMUL instruction, AMD EFLAGS variant. */
281IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_amd =
282{
283 NULL, NULL,
284 iemAImpl_imul_two_u16_amd, NULL,
285 iemAImpl_imul_two_u32_amd, NULL,
286 iemAImpl_imul_two_u64_amd, NULL
287};
288
289/** Function table for the IMUL instruction, Intel EFLAGS variant. */
290IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_intel =
291{
292 NULL, NULL,
293 iemAImpl_imul_two_u16_intel, NULL,
294 iemAImpl_imul_two_u32_intel, NULL,
295 iemAImpl_imul_two_u64_intel, NULL
296};
297
298/** EFLAGS variation selection table for the IMUL instruction. */
299IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_imul_two_eflags[] =
300{
301 &g_iemAImpl_imul_two,
302 &g_iemAImpl_imul_two_intel,
303 &g_iemAImpl_imul_two_amd,
304 &g_iemAImpl_imul_two,
305};
306
307/** EFLAGS variation selection table for the 16-bit IMUL instruction. */
308IEM_STATIC PFNIEMAIMPLBINU16 const g_iemAImpl_imul_two_u16_eflags[] =
309{
310 iemAImpl_imul_two_u16,
311 iemAImpl_imul_two_u16_intel,
312 iemAImpl_imul_two_u16_amd,
313 iemAImpl_imul_two_u16,
314};
315
316/** EFLAGS variation selection table for the 32-bit IMUL instruction. */
317IEM_STATIC PFNIEMAIMPLBINU32 const g_iemAImpl_imul_two_u32_eflags[] =
318{
319 iemAImpl_imul_two_u32,
320 iemAImpl_imul_two_u32_intel,
321 iemAImpl_imul_two_u32_amd,
322 iemAImpl_imul_two_u32,
323};
324
325/** EFLAGS variation selection table for the 64-bit IMUL instruction. */
326IEM_STATIC PFNIEMAIMPLBINU64 const g_iemAImpl_imul_two_u64_eflags[] =
327{
328 iemAImpl_imul_two_u64,
329 iemAImpl_imul_two_u64_intel,
330 iemAImpl_imul_two_u64_amd,
331 iemAImpl_imul_two_u64,
332};
333
334/** Group 1 /r lookup table. */
335IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
336{
337 &g_iemAImpl_add,
338 &g_iemAImpl_or,
339 &g_iemAImpl_adc,
340 &g_iemAImpl_sbb,
341 &g_iemAImpl_and,
342 &g_iemAImpl_sub,
343 &g_iemAImpl_xor,
344 &g_iemAImpl_cmp
345};
346
347/** Function table for the INC instruction. */
348IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
349{
350 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
351 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
352 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
353 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
354};
355
356/** Function table for the DEC instruction. */
357IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
358{
359 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
360 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
361 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
362 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
363};
364
365/** Function table for the NEG instruction. */
366IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
367{
368 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
369 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
370 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
371 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
372};
373
374/** Function table for the NOT instruction. */
375IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
376{
377 iemAImpl_not_u8, iemAImpl_not_u8_locked,
378 iemAImpl_not_u16, iemAImpl_not_u16_locked,
379 iemAImpl_not_u32, iemAImpl_not_u32_locked,
380 iemAImpl_not_u64, iemAImpl_not_u64_locked
381};
382
383
384/** Function table for the ROL instruction. */
385IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
386{
387 iemAImpl_rol_u8,
388 iemAImpl_rol_u16,
389 iemAImpl_rol_u32,
390 iemAImpl_rol_u64
391};
392
393/** Function table for the ROL instruction, AMD EFLAGS variant. */
394IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_amd =
395{
396 iemAImpl_rol_u8_amd,
397 iemAImpl_rol_u16_amd,
398 iemAImpl_rol_u32_amd,
399 iemAImpl_rol_u64_amd
400};
401
402/** Function table for the ROL instruction, Intel EFLAGS variant. */
403IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_intel =
404{
405 iemAImpl_rol_u8_intel,
406 iemAImpl_rol_u16_intel,
407 iemAImpl_rol_u32_intel,
408 iemAImpl_rol_u64_intel
409};
410
411/** EFLAGS variation selection table for the ROL instruction. */
412IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rol_eflags[] =
413{
414 &g_iemAImpl_rol,
415 &g_iemAImpl_rol_intel,
416 &g_iemAImpl_rol_amd,
417 &g_iemAImpl_rol,
418};
419
420
421/** Function table for the ROR instruction. */
422IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
423{
424 iemAImpl_ror_u8,
425 iemAImpl_ror_u16,
426 iemAImpl_ror_u32,
427 iemAImpl_ror_u64
428};
429
430/** Function table for the ROR instruction, AMD EFLAGS variant. */
431IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_amd =
432{
433 iemAImpl_ror_u8_amd,
434 iemAImpl_ror_u16_amd,
435 iemAImpl_ror_u32_amd,
436 iemAImpl_ror_u64_amd
437};
438
439/** Function table for the ROR instruction, Intel EFLAGS variant. */
440IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_intel =
441{
442 iemAImpl_ror_u8_intel,
443 iemAImpl_ror_u16_intel,
444 iemAImpl_ror_u32_intel,
445 iemAImpl_ror_u64_intel
446};
447
448/** EFLAGS variation selection table for the ROR instruction. */
449IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_ror_eflags[] =
450{
451 &g_iemAImpl_ror,
452 &g_iemAImpl_ror_intel,
453 &g_iemAImpl_ror_amd,
454 &g_iemAImpl_ror,
455};
456
457
458/** Function table for the RCL instruction. */
459IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
460{
461 iemAImpl_rcl_u8,
462 iemAImpl_rcl_u16,
463 iemAImpl_rcl_u32,
464 iemAImpl_rcl_u64
465};
466
467/** Function table for the RCL instruction, AMD EFLAGS variant. */
468IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_amd =
469{
470 iemAImpl_rcl_u8_amd,
471 iemAImpl_rcl_u16_amd,
472 iemAImpl_rcl_u32_amd,
473 iemAImpl_rcl_u64_amd
474};
475
476/** Function table for the RCL instruction, Intel EFLAGS variant. */
477IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_intel =
478{
479 iemAImpl_rcl_u8_intel,
480 iemAImpl_rcl_u16_intel,
481 iemAImpl_rcl_u32_intel,
482 iemAImpl_rcl_u64_intel
483};
484
485/** EFLAGS variation selection table for the RCL instruction. */
486IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcl_eflags[] =
487{
488 &g_iemAImpl_rcl,
489 &g_iemAImpl_rcl_intel,
490 &g_iemAImpl_rcl_amd,
491 &g_iemAImpl_rcl,
492};
493
494
495/** Function table for the RCR instruction. */
496IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
497{
498 iemAImpl_rcr_u8,
499 iemAImpl_rcr_u16,
500 iemAImpl_rcr_u32,
501 iemAImpl_rcr_u64
502};
503
504/** Function table for the RCR instruction, AMD EFLAGS variant. */
505IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_amd =
506{
507 iemAImpl_rcr_u8_amd,
508 iemAImpl_rcr_u16_amd,
509 iemAImpl_rcr_u32_amd,
510 iemAImpl_rcr_u64_amd
511};
512
513/** Function table for the RCR instruction, Intel EFLAGS variant. */
514IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_intel =
515{
516 iemAImpl_rcr_u8_intel,
517 iemAImpl_rcr_u16_intel,
518 iemAImpl_rcr_u32_intel,
519 iemAImpl_rcr_u64_intel
520};
521
522/** EFLAGS variation selection table for the RCR instruction. */
523IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcr_eflags[] =
524{
525 &g_iemAImpl_rcr,
526 &g_iemAImpl_rcr_intel,
527 &g_iemAImpl_rcr_amd,
528 &g_iemAImpl_rcr,
529};
530
531
532/** Function table for the SHL instruction. */
533IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
534{
535 iemAImpl_shl_u8,
536 iemAImpl_shl_u16,
537 iemAImpl_shl_u32,
538 iemAImpl_shl_u64
539};
540
541/** Function table for the SHL instruction, AMD EFLAGS variant. */
542IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_amd =
543{
544 iemAImpl_shl_u8_amd,
545 iemAImpl_shl_u16_amd,
546 iemAImpl_shl_u32_amd,
547 iemAImpl_shl_u64_amd
548};
549
550/** Function table for the SHL instruction, Intel EFLAGS variant. */
551IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_intel =
552{
553 iemAImpl_shl_u8_intel,
554 iemAImpl_shl_u16_intel,
555 iemAImpl_shl_u32_intel,
556 iemAImpl_shl_u64_intel
557};
558
559/** EFLAGS variation selection table for the SHL instruction. */
560IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shl_eflags[] =
561{
562 &g_iemAImpl_shl,
563 &g_iemAImpl_shl_intel,
564 &g_iemAImpl_shl_amd,
565 &g_iemAImpl_shl,
566};
567
568
569/** Function table for the SHR instruction. */
570IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
571{
572 iemAImpl_shr_u8,
573 iemAImpl_shr_u16,
574 iemAImpl_shr_u32,
575 iemAImpl_shr_u64
576};
577
578/** Function table for the SHR instruction, AMD EFLAGS variant. */
579IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_amd =
580{
581 iemAImpl_shr_u8_amd,
582 iemAImpl_shr_u16_amd,
583 iemAImpl_shr_u32_amd,
584 iemAImpl_shr_u64_amd
585};
586
587/** Function table for the SHR instruction, Intel EFLAGS variant. */
588IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_intel =
589{
590 iemAImpl_shr_u8_intel,
591 iemAImpl_shr_u16_intel,
592 iemAImpl_shr_u32_intel,
593 iemAImpl_shr_u64_intel
594};
595
596/** EFLAGS variation selection table for the SHR instruction. */
597IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shr_eflags[] =
598{
599 &g_iemAImpl_shr,
600 &g_iemAImpl_shr_intel,
601 &g_iemAImpl_shr_amd,
602 &g_iemAImpl_shr,
603};
604
605
606/** Function table for the SAR instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
608{
609 iemAImpl_sar_u8,
610 iemAImpl_sar_u16,
611 iemAImpl_sar_u32,
612 iemAImpl_sar_u64
613};
614
615/** Function table for the SAR instruction, AMD EFLAGS variant. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_amd =
617{
618 iemAImpl_sar_u8_amd,
619 iemAImpl_sar_u16_amd,
620 iemAImpl_sar_u32_amd,
621 iemAImpl_sar_u64_amd
622};
623
624/** Function table for the SAR instruction, Intel EFLAGS variant. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_intel =
626{
627 iemAImpl_sar_u8_intel,
628 iemAImpl_sar_u16_intel,
629 iemAImpl_sar_u32_intel,
630 iemAImpl_sar_u64_intel
631};
632
633/** EFLAGS variation selection table for the SAR instruction. */
634IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_sar_eflags[] =
635{
636 &g_iemAImpl_sar,
637 &g_iemAImpl_sar_intel,
638 &g_iemAImpl_sar_amd,
639 &g_iemAImpl_sar,
640};
641
642
643/** Function table for the MUL instruction. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
645{
646 iemAImpl_mul_u8,
647 iemAImpl_mul_u16,
648 iemAImpl_mul_u32,
649 iemAImpl_mul_u64
650};
651
652/** Function table for the MUL instruction, AMD EFLAGS variation. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_amd =
654{
655 iemAImpl_mul_u8_amd,
656 iemAImpl_mul_u16_amd,
657 iemAImpl_mul_u32_amd,
658 iemAImpl_mul_u64_amd
659};
660
661/** Function table for the MUL instruction, Intel EFLAGS variation. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_intel =
663{
664 iemAImpl_mul_u8_intel,
665 iemAImpl_mul_u16_intel,
666 iemAImpl_mul_u32_intel,
667 iemAImpl_mul_u64_intel
668};
669
670/** EFLAGS variation selection table for the MUL instruction. */
671IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_mul_eflags[] =
672{
673 &g_iemAImpl_mul,
674 &g_iemAImpl_mul_intel,
675 &g_iemAImpl_mul_amd,
676 &g_iemAImpl_mul,
677};
678
679/** EFLAGS variation selection table for the 8-bit MUL instruction. */
680IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_mul_u8_eflags[] =
681{
682 iemAImpl_mul_u8,
683 iemAImpl_mul_u8_intel,
684 iemAImpl_mul_u8_amd,
685 iemAImpl_mul_u8
686};
687
688
689/** Function table for the IMUL instruction working implicitly on rAX. */
690IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
691{
692 iemAImpl_imul_u8,
693 iemAImpl_imul_u16,
694 iemAImpl_imul_u32,
695 iemAImpl_imul_u64
696};
697
698/** Function table for the IMUL instruction working implicitly on rAX, AMD EFLAGS variation. */
699IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_amd =
700{
701 iemAImpl_imul_u8_amd,
702 iemAImpl_imul_u16_amd,
703 iemAImpl_imul_u32_amd,
704 iemAImpl_imul_u64_amd
705};
706
707/** Function table for the IMUL instruction working implicitly on rAX, Intel EFLAGS variation. */
708IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_intel =
709{
710 iemAImpl_imul_u8_intel,
711 iemAImpl_imul_u16_intel,
712 iemAImpl_imul_u32_intel,
713 iemAImpl_imul_u64_intel
714};
715
716/** EFLAGS variation selection table for the IMUL instruction. */
717IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_imul_eflags[] =
718{
719 &g_iemAImpl_imul,
720 &g_iemAImpl_imul_intel,
721 &g_iemAImpl_imul_amd,
722 &g_iemAImpl_imul,
723};
724
725/** EFLAGS variation selection table for the 8-bit IMUL instruction. */
726IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_imul_u8_eflags[] =
727{
728 iemAImpl_imul_u8,
729 iemAImpl_imul_u8_intel,
730 iemAImpl_imul_u8_amd,
731 iemAImpl_imul_u8
732};
733
734
735/** Function table for the DIV instruction. */
736IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
737{
738 iemAImpl_div_u8,
739 iemAImpl_div_u16,
740 iemAImpl_div_u32,
741 iemAImpl_div_u64
742};
743
744/** Function table for the DIV instruction, AMD EFLAGS variation. */
745IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_amd =
746{
747 iemAImpl_div_u8_amd,
748 iemAImpl_div_u16_amd,
749 iemAImpl_div_u32_amd,
750 iemAImpl_div_u64_amd
751};
752
753/** Function table for the DIV instruction, Intel EFLAGS variation. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_intel =
755{
756 iemAImpl_div_u8_intel,
757 iemAImpl_div_u16_intel,
758 iemAImpl_div_u32_intel,
759 iemAImpl_div_u64_intel
760};
761
762/** EFLAGS variation selection table for the DIV instruction. */
763IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_div_eflags[] =
764{
765 &g_iemAImpl_div,
766 &g_iemAImpl_div_intel,
767 &g_iemAImpl_div_amd,
768 &g_iemAImpl_div,
769};
770
771/** EFLAGS variation selection table for the 8-bit DIV instruction. */
772IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_div_u8_eflags[] =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u8_intel,
776 iemAImpl_div_u8_amd,
777 iemAImpl_div_u8
778};
779
780
781/** Function table for the IDIV instruction. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
783{
784 iemAImpl_idiv_u8,
785 iemAImpl_idiv_u16,
786 iemAImpl_idiv_u32,
787 iemAImpl_idiv_u64
788};
789
790/** Function table for the IDIV instruction, AMD EFLAGS variation. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_amd =
792{
793 iemAImpl_idiv_u8_amd,
794 iemAImpl_idiv_u16_amd,
795 iemAImpl_idiv_u32_amd,
796 iemAImpl_idiv_u64_amd
797};
798
799/** Function table for the IDIV instruction, Intel EFLAGS variation. */
800IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_intel =
801{
802 iemAImpl_idiv_u8_intel,
803 iemAImpl_idiv_u16_intel,
804 iemAImpl_idiv_u32_intel,
805 iemAImpl_idiv_u64_intel
806};
807
808/** EFLAGS variation selection table for the IDIV instruction. */
809IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_idiv_eflags[] =
810{
811 &g_iemAImpl_idiv,
812 &g_iemAImpl_idiv_intel,
813 &g_iemAImpl_idiv_amd,
814 &g_iemAImpl_idiv,
815};
816
817/** EFLAGS variation selection table for the 8-bit IDIV instruction. */
818IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_idiv_u8_eflags[] =
819{
820 iemAImpl_idiv_u8,
821 iemAImpl_idiv_u8_intel,
822 iemAImpl_idiv_u8_amd,
823 iemAImpl_idiv_u8
824};
825
826
827/** Function table for the SHLD instruction. */
828IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
829{
830 iemAImpl_shld_u16,
831 iemAImpl_shld_u32,
832 iemAImpl_shld_u64,
833};
834
835/** Function table for the SHLD instruction, AMD EFLAGS variation. */
836IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_amd =
837{
838 iemAImpl_shld_u16_amd,
839 iemAImpl_shld_u32_amd,
840 iemAImpl_shld_u64_amd
841};
842
843/** Function table for the SHLD instruction, Intel EFLAGS variation. */
844IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_intel =
845{
846 iemAImpl_shld_u16_intel,
847 iemAImpl_shld_u32_intel,
848 iemAImpl_shld_u64_intel
849};
850
851/** EFLAGS variation selection table for the SHLD instruction. */
852IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shld_eflags[] =
853{
854 &g_iemAImpl_shld,
855 &g_iemAImpl_shld_intel,
856 &g_iemAImpl_shld_amd,
857 &g_iemAImpl_shld
858};
859
860/** Function table for the SHRD instruction. */
861IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
862{
863 iemAImpl_shrd_u16,
864 iemAImpl_shrd_u32,
865 iemAImpl_shrd_u64
866};
867
868/** Function table for the SHRD instruction, AMD EFLAGS variation. */
869IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_amd =
870{
871 iemAImpl_shrd_u16_amd,
872 iemAImpl_shrd_u32_amd,
873 iemAImpl_shrd_u64_amd
874};
875
876/** Function table for the SHRD instruction, Intel EFLAGS variation. */
877IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_intel =
878{
879 iemAImpl_shrd_u16_intel,
880 iemAImpl_shrd_u32_intel,
881 iemAImpl_shrd_u64_intel
882};
883
884/** EFLAGS variation selection table for the SHRD instruction. */
885IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shrd_eflags[] =
886{
887 &g_iemAImpl_shrd,
888 &g_iemAImpl_shrd_intel,
889 &g_iemAImpl_shrd_amd,
890 &g_iemAImpl_shrd
891};
892
893
894# ifndef IEM_WITHOUT_ASSEMBLY
895/** Function table for the VPXOR instruction */
896IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpand = { iemAImpl_vpand_u128, iemAImpl_vpand_u256 };
897/** Function table for the VPXORN instruction */
898IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpandn = { iemAImpl_vpandn_u128, iemAImpl_vpandn_u256 };
899/** Function table for the VPOR instruction */
900IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpor = { iemAImpl_vpor_u128, iemAImpl_vpor_u256 };
901/** Function table for the VPXOR instruction */
902IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpxor = { iemAImpl_vpxor_u128, iemAImpl_vpxor_u256 };
903# endif
904
905/** Function table for the VPAND instruction, software fallback. */
906IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpand_fallback = { iemAImpl_vpand_u128_fallback, iemAImpl_vpand_u256_fallback };
907/** Function table for the VPANDN instruction, software fallback. */
908IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpandn_fallback= { iemAImpl_vpandn_u128_fallback, iemAImpl_vpandn_u256_fallback };
909/** Function table for the VPOR instruction, software fallback. */
910IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpor_fallback = { iemAImpl_vpor_u128_fallback, iemAImpl_vpor_u256_fallback };
911/** Function table for the VPXOR instruction, software fallback. */
912IEM_STATIC const IEMOPMEDIAF3 g_iemAImpl_vpxor_fallback = { iemAImpl_vpxor_u128_fallback, iemAImpl_vpxor_u256_fallback };
913
914#endif /* !TST_IEM_CHECK_MC */
915
916
917/**
918 * Common worker for instructions like ADD, AND, OR, ++ with a byte
919 * memory/register as the destination.
920 *
921 * @param pImpl Pointer to the instruction implementation (assembly).
922 */
923FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
924{
925 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
926
927 /*
928 * If rm is denoting a register, no more instruction bytes.
929 */
930 if (IEM_IS_MODRM_REG_MODE(bRm))
931 {
932 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
933
934 IEM_MC_BEGIN(3, 0);
935 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
936 IEM_MC_ARG(uint8_t, u8Src, 1);
937 IEM_MC_ARG(uint32_t *, pEFlags, 2);
938
939 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_REG(pVCpu, bRm));
940 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
941 IEM_MC_REF_EFLAGS(pEFlags);
942 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
943
944 IEM_MC_ADVANCE_RIP();
945 IEM_MC_END();
946 }
947 else
948 {
949 /*
950 * We're accessing memory.
951 * Note! We're putting the eflags on the stack here so we can commit them
952 * after the memory.
953 */
954 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
955 IEM_MC_BEGIN(3, 2);
956 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
957 IEM_MC_ARG(uint8_t, u8Src, 1);
958 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
959 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
960
961 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
962 if (!pImpl->pfnLockedU8)
963 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
964 IEM_MC_MEM_MAP(pu8Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
965 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_REG(pVCpu, bRm));
966 IEM_MC_FETCH_EFLAGS(EFlags);
967 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
968 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
969 else
970 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
971
972 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
973 IEM_MC_COMMIT_EFLAGS(EFlags);
974 IEM_MC_ADVANCE_RIP();
975 IEM_MC_END();
976 }
977 return VINF_SUCCESS;
978}
979
980
981/**
982 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
983 * memory/register as the destination.
984 *
985 * @param pImpl Pointer to the instruction implementation (assembly).
986 */
987FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
988{
989 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
990
991 /*
992 * If rm is denoting a register, no more instruction bytes.
993 */
994 if (IEM_IS_MODRM_REG_MODE(bRm))
995 {
996 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
997
998 switch (pVCpu->iem.s.enmEffOpSize)
999 {
1000 case IEMMODE_16BIT:
1001 IEM_MC_BEGIN(3, 0);
1002 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1003 IEM_MC_ARG(uint16_t, u16Src, 1);
1004 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1005
1006 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1007 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
1008 IEM_MC_REF_EFLAGS(pEFlags);
1009 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1010
1011 IEM_MC_ADVANCE_RIP();
1012 IEM_MC_END();
1013 break;
1014
1015 case IEMMODE_32BIT:
1016 IEM_MC_BEGIN(3, 0);
1017 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1018 IEM_MC_ARG(uint32_t, u32Src, 1);
1019 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1020
1021 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1022 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
1023 IEM_MC_REF_EFLAGS(pEFlags);
1024 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1025
1026 if (pImpl != &g_iemAImpl_test)
1027 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1028 IEM_MC_ADVANCE_RIP();
1029 IEM_MC_END();
1030 break;
1031
1032 case IEMMODE_64BIT:
1033 IEM_MC_BEGIN(3, 0);
1034 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1035 IEM_MC_ARG(uint64_t, u64Src, 1);
1036 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1037
1038 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1039 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm));
1040 IEM_MC_REF_EFLAGS(pEFlags);
1041 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1042
1043 IEM_MC_ADVANCE_RIP();
1044 IEM_MC_END();
1045 break;
1046 }
1047 }
1048 else
1049 {
1050 /*
1051 * We're accessing memory.
1052 * Note! We're putting the eflags on the stack here so we can commit them
1053 * after the memory.
1054 */
1055 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
1056 switch (pVCpu->iem.s.enmEffOpSize)
1057 {
1058 case IEMMODE_16BIT:
1059 IEM_MC_BEGIN(3, 2);
1060 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1061 IEM_MC_ARG(uint16_t, u16Src, 1);
1062 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1063 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1064
1065 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1066 if (!pImpl->pfnLockedU16)
1067 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1068 IEM_MC_MEM_MAP(pu16Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1069 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1070 IEM_MC_FETCH_EFLAGS(EFlags);
1071 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1072 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1073 else
1074 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
1075
1076 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
1077 IEM_MC_COMMIT_EFLAGS(EFlags);
1078 IEM_MC_ADVANCE_RIP();
1079 IEM_MC_END();
1080 break;
1081
1082 case IEMMODE_32BIT:
1083 IEM_MC_BEGIN(3, 2);
1084 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1085 IEM_MC_ARG(uint32_t, u32Src, 1);
1086 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1087 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1088
1089 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1090 if (!pImpl->pfnLockedU32)
1091 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1092 IEM_MC_MEM_MAP(pu32Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1093 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1094 IEM_MC_FETCH_EFLAGS(EFlags);
1095 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1096 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1097 else
1098 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
1099
1100 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
1101 IEM_MC_COMMIT_EFLAGS(EFlags);
1102 IEM_MC_ADVANCE_RIP();
1103 IEM_MC_END();
1104 break;
1105
1106 case IEMMODE_64BIT:
1107 IEM_MC_BEGIN(3, 2);
1108 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1109 IEM_MC_ARG(uint64_t, u64Src, 1);
1110 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1111 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1112
1113 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1114 if (!pImpl->pfnLockedU64)
1115 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1116 IEM_MC_MEM_MAP(pu64Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1117 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_REG(pVCpu, bRm));
1118 IEM_MC_FETCH_EFLAGS(EFlags);
1119 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1120 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1121 else
1122 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
1123
1124 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
1125 IEM_MC_COMMIT_EFLAGS(EFlags);
1126 IEM_MC_ADVANCE_RIP();
1127 IEM_MC_END();
1128 break;
1129 }
1130 }
1131 return VINF_SUCCESS;
1132}
1133
1134
1135/**
1136 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
1137 * the destination.
1138 *
1139 * @param pImpl Pointer to the instruction implementation (assembly).
1140 */
1141FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
1142{
1143 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1144
1145 /*
1146 * If rm is denoting a register, no more instruction bytes.
1147 */
1148 if (IEM_IS_MODRM_REG_MODE(bRm))
1149 {
1150 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1151 IEM_MC_BEGIN(3, 0);
1152 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1153 IEM_MC_ARG(uint8_t, u8Src, 1);
1154 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1155
1156 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1157 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1158 IEM_MC_REF_EFLAGS(pEFlags);
1159 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1160
1161 IEM_MC_ADVANCE_RIP();
1162 IEM_MC_END();
1163 }
1164 else
1165 {
1166 /*
1167 * We're accessing memory.
1168 */
1169 IEM_MC_BEGIN(3, 1);
1170 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1171 IEM_MC_ARG(uint8_t, u8Src, 1);
1172 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1173 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1174
1175 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1176 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1177 IEM_MC_FETCH_MEM_U8(u8Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1178 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1179 IEM_MC_REF_EFLAGS(pEFlags);
1180 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1181
1182 IEM_MC_ADVANCE_RIP();
1183 IEM_MC_END();
1184 }
1185 return VINF_SUCCESS;
1186}
1187
1188
1189/**
1190 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
1191 * register as the destination.
1192 *
1193 * @param pImpl Pointer to the instruction implementation (assembly).
1194 */
1195FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
1196{
1197 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1198
1199 /*
1200 * If rm is denoting a register, no more instruction bytes.
1201 */
1202 if (IEM_IS_MODRM_REG_MODE(bRm))
1203 {
1204 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1205 switch (pVCpu->iem.s.enmEffOpSize)
1206 {
1207 case IEMMODE_16BIT:
1208 IEM_MC_BEGIN(3, 0);
1209 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1210 IEM_MC_ARG(uint16_t, u16Src, 1);
1211 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1212
1213 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1214 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1215 IEM_MC_REF_EFLAGS(pEFlags);
1216 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1217
1218 IEM_MC_ADVANCE_RIP();
1219 IEM_MC_END();
1220 break;
1221
1222 case IEMMODE_32BIT:
1223 IEM_MC_BEGIN(3, 0);
1224 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1225 IEM_MC_ARG(uint32_t, u32Src, 1);
1226 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1227
1228 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1229 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1230 IEM_MC_REF_EFLAGS(pEFlags);
1231 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1232
1233 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1234 IEM_MC_ADVANCE_RIP();
1235 IEM_MC_END();
1236 break;
1237
1238 case IEMMODE_64BIT:
1239 IEM_MC_BEGIN(3, 0);
1240 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1241 IEM_MC_ARG(uint64_t, u64Src, 1);
1242 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1243
1244 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, bRm));
1245 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1246 IEM_MC_REF_EFLAGS(pEFlags);
1247 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1248
1249 IEM_MC_ADVANCE_RIP();
1250 IEM_MC_END();
1251 break;
1252 }
1253 }
1254 else
1255 {
1256 /*
1257 * We're accessing memory.
1258 */
1259 switch (pVCpu->iem.s.enmEffOpSize)
1260 {
1261 case IEMMODE_16BIT:
1262 IEM_MC_BEGIN(3, 1);
1263 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1264 IEM_MC_ARG(uint16_t, u16Src, 1);
1265 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1266 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1267
1268 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1269 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1270 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1271 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1272 IEM_MC_REF_EFLAGS(pEFlags);
1273 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1274
1275 IEM_MC_ADVANCE_RIP();
1276 IEM_MC_END();
1277 break;
1278
1279 case IEMMODE_32BIT:
1280 IEM_MC_BEGIN(3, 1);
1281 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1282 IEM_MC_ARG(uint32_t, u32Src, 1);
1283 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1284 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1285
1286 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1287 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1288 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1289 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1290 IEM_MC_REF_EFLAGS(pEFlags);
1291 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1292
1293 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1294 IEM_MC_ADVANCE_RIP();
1295 IEM_MC_END();
1296 break;
1297
1298 case IEMMODE_64BIT:
1299 IEM_MC_BEGIN(3, 1);
1300 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1301 IEM_MC_ARG(uint64_t, u64Src, 1);
1302 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1303 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1304
1305 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1306 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1307 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1308 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm));
1309 IEM_MC_REF_EFLAGS(pEFlags);
1310 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1311
1312 IEM_MC_ADVANCE_RIP();
1313 IEM_MC_END();
1314 break;
1315 }
1316 }
1317 return VINF_SUCCESS;
1318}
1319
1320
1321/**
1322 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
1323 * a byte immediate.
1324 *
1325 * @param pImpl Pointer to the instruction implementation (assembly).
1326 */
1327FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
1328{
1329 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
1330 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1331
1332 IEM_MC_BEGIN(3, 0);
1333 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1334 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
1335 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1336
1337 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
1338 IEM_MC_REF_EFLAGS(pEFlags);
1339 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1340
1341 IEM_MC_ADVANCE_RIP();
1342 IEM_MC_END();
1343 return VINF_SUCCESS;
1344}
1345
1346
1347/**
1348 * Common worker for instructions like ADD, AND, OR, ++ with working on
1349 * AX/EAX/RAX with a word/dword immediate.
1350 *
1351 * @param pImpl Pointer to the instruction implementation (assembly).
1352 */
1353FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
1354{
1355 switch (pVCpu->iem.s.enmEffOpSize)
1356 {
1357 case IEMMODE_16BIT:
1358 {
1359 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
1360 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1361
1362 IEM_MC_BEGIN(3, 0);
1363 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1364 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
1365 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1366
1367 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
1368 IEM_MC_REF_EFLAGS(pEFlags);
1369 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1370
1371 IEM_MC_ADVANCE_RIP();
1372 IEM_MC_END();
1373 return VINF_SUCCESS;
1374 }
1375
1376 case IEMMODE_32BIT:
1377 {
1378 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
1379 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1380
1381 IEM_MC_BEGIN(3, 0);
1382 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1383 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
1384 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1385
1386 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
1387 IEM_MC_REF_EFLAGS(pEFlags);
1388 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1389
1390 if (pImpl != &g_iemAImpl_test)
1391 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1392 IEM_MC_ADVANCE_RIP();
1393 IEM_MC_END();
1394 return VINF_SUCCESS;
1395 }
1396
1397 case IEMMODE_64BIT:
1398 {
1399 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
1400 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1401
1402 IEM_MC_BEGIN(3, 0);
1403 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1404 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
1405 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1406
1407 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
1408 IEM_MC_REF_EFLAGS(pEFlags);
1409 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1410
1411 IEM_MC_ADVANCE_RIP();
1412 IEM_MC_END();
1413 return VINF_SUCCESS;
1414 }
1415
1416 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1417 }
1418}
1419
1420
1421/** Opcodes 0xf1, 0xd6. */
1422FNIEMOP_DEF(iemOp_Invalid)
1423{
1424 IEMOP_MNEMONIC(Invalid, "Invalid");
1425 return IEMOP_RAISE_INVALID_OPCODE();
1426}
1427
1428
1429/** Invalid with RM byte . */
1430FNIEMOPRM_DEF(iemOp_InvalidWithRM)
1431{
1432 RT_NOREF_PV(bRm);
1433 IEMOP_MNEMONIC(InvalidWithRm, "InvalidWithRM");
1434 return IEMOP_RAISE_INVALID_OPCODE();
1435}
1436
1437
1438/** Invalid with RM byte where intel decodes any additional address encoding
1439 * bytes. */
1440FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedDecode)
1441{
1442 IEMOP_MNEMONIC(InvalidWithRMNeedDecode, "InvalidWithRMNeedDecode");
1443 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1444 {
1445#ifndef TST_IEM_CHECK_MC
1446 if (IEM_IS_MODRM_MEM_MODE(bRm))
1447 {
1448 RTGCPTR GCPtrEff;
1449 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1450 if (rcStrict != VINF_SUCCESS)
1451 return rcStrict;
1452 }
1453#endif
1454 }
1455 IEMOP_HLP_DONE_DECODING();
1456 return IEMOP_RAISE_INVALID_OPCODE();
1457}
1458
1459
1460/** Invalid with RM byte where both AMD and Intel decodes any additional
1461 * address encoding bytes. */
1462FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeeded)
1463{
1464 IEMOP_MNEMONIC(InvalidWithRMAllNeeded, "InvalidWithRMAllNeeded");
1465#ifndef TST_IEM_CHECK_MC
1466 if (IEM_IS_MODRM_MEM_MODE(bRm))
1467 {
1468 RTGCPTR GCPtrEff;
1469 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1470 if (rcStrict != VINF_SUCCESS)
1471 return rcStrict;
1472 }
1473#endif
1474 IEMOP_HLP_DONE_DECODING();
1475 return IEMOP_RAISE_INVALID_OPCODE();
1476}
1477
1478
1479/** Invalid with RM byte where intel requires 8-byte immediate.
1480 * Intel will also need SIB and displacement if bRm indicates memory. */
1481FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedImm8)
1482{
1483 IEMOP_MNEMONIC(InvalidWithRMNeedImm8, "InvalidWithRMNeedImm8");
1484 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1485 {
1486#ifndef TST_IEM_CHECK_MC
1487 if (IEM_IS_MODRM_MEM_MODE(bRm))
1488 {
1489 RTGCPTR GCPtrEff;
1490 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1491 if (rcStrict != VINF_SUCCESS)
1492 return rcStrict;
1493 }
1494#endif
1495 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bRm);
1496 }
1497 IEMOP_HLP_DONE_DECODING();
1498 return IEMOP_RAISE_INVALID_OPCODE();
1499}
1500
1501
1502/** Invalid with RM byte where intel requires 8-byte immediate.
1503 * Both AMD and Intel also needs SIB and displacement according to bRm. */
1504FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeedImm8)
1505{
1506 IEMOP_MNEMONIC(InvalidWithRMAllNeedImm8, "InvalidWithRMAllNeedImm8");
1507#ifndef TST_IEM_CHECK_MC
1508 if (IEM_IS_MODRM_MEM_MODE(bRm))
1509 {
1510 RTGCPTR GCPtrEff;
1511 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1512 if (rcStrict != VINF_SUCCESS)
1513 return rcStrict;
1514 }
1515#endif
1516 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bRm);
1517 IEMOP_HLP_DONE_DECODING();
1518 return IEMOP_RAISE_INVALID_OPCODE();
1519}
1520
1521
1522/** Invalid opcode where intel requires Mod R/M sequence. */
1523FNIEMOP_DEF(iemOp_InvalidNeedRM)
1524{
1525 IEMOP_MNEMONIC(InvalidNeedRM, "InvalidNeedRM");
1526 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1527 {
1528 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1529#ifndef TST_IEM_CHECK_MC
1530 if (IEM_IS_MODRM_MEM_MODE(bRm))
1531 {
1532 RTGCPTR GCPtrEff;
1533 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1534 if (rcStrict != VINF_SUCCESS)
1535 return rcStrict;
1536 }
1537#endif
1538 }
1539 IEMOP_HLP_DONE_DECODING();
1540 return IEMOP_RAISE_INVALID_OPCODE();
1541}
1542
1543
1544/** Invalid opcode where both AMD and Intel requires Mod R/M sequence. */
1545FNIEMOP_DEF(iemOp_InvalidAllNeedRM)
1546{
1547 IEMOP_MNEMONIC(InvalidAllNeedRM, "InvalidAllNeedRM");
1548 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1549#ifndef TST_IEM_CHECK_MC
1550 if (IEM_IS_MODRM_MEM_MODE(bRm))
1551 {
1552 RTGCPTR GCPtrEff;
1553 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1554 if (rcStrict != VINF_SUCCESS)
1555 return rcStrict;
1556 }
1557#endif
1558 IEMOP_HLP_DONE_DECODING();
1559 return IEMOP_RAISE_INVALID_OPCODE();
1560}
1561
1562
1563/** Invalid opcode where intel requires Mod R/M sequence and 8-byte
1564 * immediate. */
1565FNIEMOP_DEF(iemOp_InvalidNeedRMImm8)
1566{
1567 IEMOP_MNEMONIC(InvalidNeedRMImm8, "InvalidNeedRMImm8");
1568 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1569 {
1570 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1571#ifndef TST_IEM_CHECK_MC
1572 if (IEM_IS_MODRM_MEM_MODE(bRm))
1573 {
1574 RTGCPTR GCPtrEff;
1575 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1576 if (rcStrict != VINF_SUCCESS)
1577 return rcStrict;
1578 }
1579#endif
1580 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
1581 }
1582 IEMOP_HLP_DONE_DECODING();
1583 return IEMOP_RAISE_INVALID_OPCODE();
1584}
1585
1586
1587/** Invalid opcode where intel requires a 3rd escape byte and a Mod R/M
1588 * sequence. */
1589FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRM)
1590{
1591 IEMOP_MNEMONIC(InvalidNeed3ByteEscRM, "InvalidNeed3ByteEscRM");
1592 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1593 {
1594 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
1595 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1596#ifndef TST_IEM_CHECK_MC
1597 if (IEM_IS_MODRM_MEM_MODE(bRm))
1598 {
1599 RTGCPTR GCPtrEff;
1600 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1601 if (rcStrict != VINF_SUCCESS)
1602 return rcStrict;
1603 }
1604#endif
1605 }
1606 IEMOP_HLP_DONE_DECODING();
1607 return IEMOP_RAISE_INVALID_OPCODE();
1608}
1609
1610
1611/** Invalid opcode where intel requires a 3rd escape byte, Mod R/M sequence, and
1612 * a 8-byte immediate. */
1613FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRMImm8)
1614{
1615 IEMOP_MNEMONIC(InvalidNeed3ByteEscRMImm8, "InvalidNeed3ByteEscRMImm8");
1616 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1617 {
1618 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
1619 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1620#ifndef TST_IEM_CHECK_MC
1621 if (IEM_IS_MODRM_MEM_MODE(bRm))
1622 {
1623 RTGCPTR GCPtrEff;
1624 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 1, &GCPtrEff);
1625 if (rcStrict != VINF_SUCCESS)
1626 return rcStrict;
1627 }
1628#endif
1629 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
1630 IEMOP_HLP_DONE_DECODING();
1631 }
1632 return IEMOP_RAISE_INVALID_OPCODE();
1633}
1634
1635
1636/** Repeats a_fn four times. For decoding tables. */
1637#define IEMOP_X4(a_fn) a_fn, a_fn, a_fn, a_fn
1638
1639/*
1640 * Include the tables.
1641 */
1642#ifdef IEM_WITH_3DNOW
1643# include "IEMAllInstructions3DNow.cpp.h"
1644#endif
1645#ifdef IEM_WITH_THREE_0F_38
1646# include "IEMAllInstructionsThree0f38.cpp.h"
1647#endif
1648#ifdef IEM_WITH_THREE_0F_3A
1649# include "IEMAllInstructionsThree0f3a.cpp.h"
1650#endif
1651#include "IEMAllInstructionsTwoByte0f.cpp.h"
1652#ifdef IEM_WITH_VEX
1653# include "IEMAllInstructionsVexMap1.cpp.h"
1654# include "IEMAllInstructionsVexMap2.cpp.h"
1655# include "IEMAllInstructionsVexMap3.cpp.h"
1656#endif
1657#include "IEMAllInstructionsOneByte.cpp.h"
1658
1659
1660#ifdef _MSC_VER
1661# pragma warning(pop)
1662#endif
1663
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette