VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsInterpretOnly.cpp@ 95307

Last change on this file since 95307 was 94768, checked in by vboxsync, 3 years ago

VMM/IEM: Split up IEMAll.cpp into a few more compilation units. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.6 KB
Line 
1/* $Id: IEMAllInstructionsInterpretOnly.cpp 94768 2022-05-01 22:02:17Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
23# define LOG_GROUP LOG_GROUP_IEM
24#endif
25#define VMCPU_INCL_CPUM_GST_CTX
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/apic.h>
29#include <VBox/vmm/pdm.h>
30#include <VBox/vmm/pgm.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/nem.h>
35#include <VBox/vmm/gim.h>
36#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
37# include <VBox/vmm/em.h>
38# include <VBox/vmm/hm_svm.h>
39#endif
40#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
41# include <VBox/vmm/hmvmxinline.h>
42#endif
43#include <VBox/vmm/tm.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/vmm/dbgftrace.h>
46#ifndef TST_IEM_CHECK_MC
47# include "IEMInternal.h"
48#endif
49#include <VBox/vmm/vmcc.h>
50#include <VBox/log.h>
51#include <VBox/err.h>
52#include <VBox/param.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55#include <iprt/asm-math.h>
56#include <iprt/assert.h>
57#include <iprt/string.h>
58#include <iprt/x86.h>
59
60#ifndef TST_IEM_CHECK_MC
61# include "IEMInline.h"
62# include "IEMOpHlp.h"
63# include "IEMMc.h"
64#endif
65
66
67#ifdef _MSC_VER
68# pragma warning(push)
69# pragma warning(disable: 4702) /* Unreachable code like return in iemOp_Grp6_lldt. */
70#endif
71
72
73/*********************************************************************************************************************************
74* Global Variables *
75*********************************************************************************************************************************/
76#ifndef TST_IEM_CHECK_MC
77/** Function table for the ADD instruction. */
78IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
79{
80 iemAImpl_add_u8, iemAImpl_add_u8_locked,
81 iemAImpl_add_u16, iemAImpl_add_u16_locked,
82 iemAImpl_add_u32, iemAImpl_add_u32_locked,
83 iemAImpl_add_u64, iemAImpl_add_u64_locked
84};
85
86/** Function table for the ADC instruction. */
87IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
88{
89 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
90 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
91 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
92 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
93};
94
95/** Function table for the SUB instruction. */
96IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
97{
98 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
99 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
100 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
101 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
102};
103
104/** Function table for the SBB instruction. */
105IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
106{
107 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
108 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
109 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
110 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
111};
112
113/** Function table for the OR instruction. */
114IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
115{
116 iemAImpl_or_u8, iemAImpl_or_u8_locked,
117 iemAImpl_or_u16, iemAImpl_or_u16_locked,
118 iemAImpl_or_u32, iemAImpl_or_u32_locked,
119 iemAImpl_or_u64, iemAImpl_or_u64_locked
120};
121
122/** Function table for the XOR instruction. */
123IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
124{
125 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
126 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
127 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
128 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
129};
130
131/** Function table for the AND instruction. */
132IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
133{
134 iemAImpl_and_u8, iemAImpl_and_u8_locked,
135 iemAImpl_and_u16, iemAImpl_and_u16_locked,
136 iemAImpl_and_u32, iemAImpl_and_u32_locked,
137 iemAImpl_and_u64, iemAImpl_and_u64_locked
138};
139
140/** Function table for the CMP instruction.
141 * @remarks Making operand order ASSUMPTIONS.
142 */
143IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
144{
145 iemAImpl_cmp_u8, NULL,
146 iemAImpl_cmp_u16, NULL,
147 iemAImpl_cmp_u32, NULL,
148 iemAImpl_cmp_u64, NULL
149};
150
151/** Function table for the TEST instruction.
152 * @remarks Making operand order ASSUMPTIONS.
153 */
154IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
155{
156 iemAImpl_test_u8, NULL,
157 iemAImpl_test_u16, NULL,
158 iemAImpl_test_u32, NULL,
159 iemAImpl_test_u64, NULL
160};
161
162
163/** Function table for the BT instruction. */
164IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
165{
166 NULL, NULL,
167 iemAImpl_bt_u16, NULL,
168 iemAImpl_bt_u32, NULL,
169 iemAImpl_bt_u64, NULL
170};
171
172/** Function table for the BTC instruction. */
173IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
174{
175 NULL, NULL,
176 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
177 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
178 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
179};
180
181/** Function table for the BTR instruction. */
182IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
183{
184 NULL, NULL,
185 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
186 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
187 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
188};
189
190/** Function table for the BTS instruction. */
191IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
192{
193 NULL, NULL,
194 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
195 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
196 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
197};
198
199/** Function table for the BSF instruction. */
200IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
201{
202 NULL, NULL,
203 iemAImpl_bsf_u16, NULL,
204 iemAImpl_bsf_u32, NULL,
205 iemAImpl_bsf_u64, NULL
206};
207
208/** Function table for the BSF instruction, AMD EFLAGS variant. */
209IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_amd =
210{
211 NULL, NULL,
212 iemAImpl_bsf_u16_amd, NULL,
213 iemAImpl_bsf_u32_amd, NULL,
214 iemAImpl_bsf_u64_amd, NULL
215};
216
217/** Function table for the BSF instruction, Intel EFLAGS variant. */
218IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_intel =
219{
220 NULL, NULL,
221 iemAImpl_bsf_u16_intel, NULL,
222 iemAImpl_bsf_u32_intel, NULL,
223 iemAImpl_bsf_u64_intel, NULL
224};
225
226/** EFLAGS variation selection table for the BSF instruction. */
227IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsf_eflags[] =
228{
229 &g_iemAImpl_bsf,
230 &g_iemAImpl_bsf_intel,
231 &g_iemAImpl_bsf_amd,
232 &g_iemAImpl_bsf,
233};
234
235/** Function table for the BSR instruction. */
236IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
237{
238 NULL, NULL,
239 iemAImpl_bsr_u16, NULL,
240 iemAImpl_bsr_u32, NULL,
241 iemAImpl_bsr_u64, NULL
242};
243
244/** Function table for the BSR instruction, AMD EFLAGS variant. */
245IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_amd =
246{
247 NULL, NULL,
248 iemAImpl_bsr_u16_amd, NULL,
249 iemAImpl_bsr_u32_amd, NULL,
250 iemAImpl_bsr_u64_amd, NULL
251};
252
253/** Function table for the BSR instruction, Intel EFLAGS variant. */
254IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_intel =
255{
256 NULL, NULL,
257 iemAImpl_bsr_u16_intel, NULL,
258 iemAImpl_bsr_u32_intel, NULL,
259 iemAImpl_bsr_u64_intel, NULL
260};
261
262/** EFLAGS variation selection table for the BSR instruction. */
263IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsr_eflags[] =
264{
265 &g_iemAImpl_bsr,
266 &g_iemAImpl_bsr_intel,
267 &g_iemAImpl_bsr_amd,
268 &g_iemAImpl_bsr,
269};
270
271/** Function table for the IMUL instruction. */
272IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
273{
274 NULL, NULL,
275 iemAImpl_imul_two_u16, NULL,
276 iemAImpl_imul_two_u32, NULL,
277 iemAImpl_imul_two_u64, NULL
278};
279
280/** Function table for the IMUL instruction, AMD EFLAGS variant. */
281IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_amd =
282{
283 NULL, NULL,
284 iemAImpl_imul_two_u16_amd, NULL,
285 iemAImpl_imul_two_u32_amd, NULL,
286 iemAImpl_imul_two_u64_amd, NULL
287};
288
289/** Function table for the IMUL instruction, Intel EFLAGS variant. */
290IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_intel =
291{
292 NULL, NULL,
293 iemAImpl_imul_two_u16_intel, NULL,
294 iemAImpl_imul_two_u32_intel, NULL,
295 iemAImpl_imul_two_u64_intel, NULL
296};
297
298/** EFLAGS variation selection table for the IMUL instruction. */
299IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_imul_two_eflags[] =
300{
301 &g_iemAImpl_imul_two,
302 &g_iemAImpl_imul_two_intel,
303 &g_iemAImpl_imul_two_amd,
304 &g_iemAImpl_imul_two,
305};
306
307/** EFLAGS variation selection table for the 16-bit IMUL instruction. */
308IEM_STATIC PFNIEMAIMPLBINU16 const g_iemAImpl_imul_two_u16_eflags[] =
309{
310 iemAImpl_imul_two_u16,
311 iemAImpl_imul_two_u16_intel,
312 iemAImpl_imul_two_u16_amd,
313 iemAImpl_imul_two_u16,
314};
315
316/** EFLAGS variation selection table for the 32-bit IMUL instruction. */
317IEM_STATIC PFNIEMAIMPLBINU32 const g_iemAImpl_imul_two_u32_eflags[] =
318{
319 iemAImpl_imul_two_u32,
320 iemAImpl_imul_two_u32_intel,
321 iemAImpl_imul_two_u32_amd,
322 iemAImpl_imul_two_u32,
323};
324
325/** EFLAGS variation selection table for the 64-bit IMUL instruction. */
326IEM_STATIC PFNIEMAIMPLBINU64 const g_iemAImpl_imul_two_u64_eflags[] =
327{
328 iemAImpl_imul_two_u64,
329 iemAImpl_imul_two_u64_intel,
330 iemAImpl_imul_two_u64_amd,
331 iemAImpl_imul_two_u64,
332};
333
334/** Group 1 /r lookup table. */
335IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
336{
337 &g_iemAImpl_add,
338 &g_iemAImpl_or,
339 &g_iemAImpl_adc,
340 &g_iemAImpl_sbb,
341 &g_iemAImpl_and,
342 &g_iemAImpl_sub,
343 &g_iemAImpl_xor,
344 &g_iemAImpl_cmp
345};
346
347/** Function table for the INC instruction. */
348IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
349{
350 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
351 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
352 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
353 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
354};
355
356/** Function table for the DEC instruction. */
357IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
358{
359 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
360 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
361 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
362 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
363};
364
365/** Function table for the NEG instruction. */
366IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
367{
368 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
369 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
370 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
371 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
372};
373
374/** Function table for the NOT instruction. */
375IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
376{
377 iemAImpl_not_u8, iemAImpl_not_u8_locked,
378 iemAImpl_not_u16, iemAImpl_not_u16_locked,
379 iemAImpl_not_u32, iemAImpl_not_u32_locked,
380 iemAImpl_not_u64, iemAImpl_not_u64_locked
381};
382
383
384/** Function table for the ROL instruction. */
385IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
386{
387 iemAImpl_rol_u8,
388 iemAImpl_rol_u16,
389 iemAImpl_rol_u32,
390 iemAImpl_rol_u64
391};
392
393/** Function table for the ROL instruction, AMD EFLAGS variant. */
394IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_amd =
395{
396 iemAImpl_rol_u8_amd,
397 iemAImpl_rol_u16_amd,
398 iemAImpl_rol_u32_amd,
399 iemAImpl_rol_u64_amd
400};
401
402/** Function table for the ROL instruction, Intel EFLAGS variant. */
403IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_intel =
404{
405 iemAImpl_rol_u8_intel,
406 iemAImpl_rol_u16_intel,
407 iemAImpl_rol_u32_intel,
408 iemAImpl_rol_u64_intel
409};
410
411/** EFLAGS variation selection table for the ROL instruction. */
412IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rol_eflags[] =
413{
414 &g_iemAImpl_rol,
415 &g_iemAImpl_rol_intel,
416 &g_iemAImpl_rol_amd,
417 &g_iemAImpl_rol,
418};
419
420
421/** Function table for the ROR instruction. */
422IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
423{
424 iemAImpl_ror_u8,
425 iemAImpl_ror_u16,
426 iemAImpl_ror_u32,
427 iemAImpl_ror_u64
428};
429
430/** Function table for the ROR instruction, AMD EFLAGS variant. */
431IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_amd =
432{
433 iemAImpl_ror_u8_amd,
434 iemAImpl_ror_u16_amd,
435 iemAImpl_ror_u32_amd,
436 iemAImpl_ror_u64_amd
437};
438
439/** Function table for the ROR instruction, Intel EFLAGS variant. */
440IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_intel =
441{
442 iemAImpl_ror_u8_intel,
443 iemAImpl_ror_u16_intel,
444 iemAImpl_ror_u32_intel,
445 iemAImpl_ror_u64_intel
446};
447
448/** EFLAGS variation selection table for the ROR instruction. */
449IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_ror_eflags[] =
450{
451 &g_iemAImpl_ror,
452 &g_iemAImpl_ror_intel,
453 &g_iemAImpl_ror_amd,
454 &g_iemAImpl_ror,
455};
456
457
458/** Function table for the RCL instruction. */
459IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
460{
461 iemAImpl_rcl_u8,
462 iemAImpl_rcl_u16,
463 iemAImpl_rcl_u32,
464 iemAImpl_rcl_u64
465};
466
467/** Function table for the RCL instruction, AMD EFLAGS variant. */
468IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_amd =
469{
470 iemAImpl_rcl_u8_amd,
471 iemAImpl_rcl_u16_amd,
472 iemAImpl_rcl_u32_amd,
473 iemAImpl_rcl_u64_amd
474};
475
476/** Function table for the RCL instruction, Intel EFLAGS variant. */
477IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_intel =
478{
479 iemAImpl_rcl_u8_intel,
480 iemAImpl_rcl_u16_intel,
481 iemAImpl_rcl_u32_intel,
482 iemAImpl_rcl_u64_intel
483};
484
485/** EFLAGS variation selection table for the RCL instruction. */
486IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcl_eflags[] =
487{
488 &g_iemAImpl_rcl,
489 &g_iemAImpl_rcl_intel,
490 &g_iemAImpl_rcl_amd,
491 &g_iemAImpl_rcl,
492};
493
494
495/** Function table for the RCR instruction. */
496IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
497{
498 iemAImpl_rcr_u8,
499 iemAImpl_rcr_u16,
500 iemAImpl_rcr_u32,
501 iemAImpl_rcr_u64
502};
503
504/** Function table for the RCR instruction, AMD EFLAGS variant. */
505IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_amd =
506{
507 iemAImpl_rcr_u8_amd,
508 iemAImpl_rcr_u16_amd,
509 iemAImpl_rcr_u32_amd,
510 iemAImpl_rcr_u64_amd
511};
512
513/** Function table for the RCR instruction, Intel EFLAGS variant. */
514IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_intel =
515{
516 iemAImpl_rcr_u8_intel,
517 iemAImpl_rcr_u16_intel,
518 iemAImpl_rcr_u32_intel,
519 iemAImpl_rcr_u64_intel
520};
521
522/** EFLAGS variation selection table for the RCR instruction. */
523IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcr_eflags[] =
524{
525 &g_iemAImpl_rcr,
526 &g_iemAImpl_rcr_intel,
527 &g_iemAImpl_rcr_amd,
528 &g_iemAImpl_rcr,
529};
530
531
532/** Function table for the SHL instruction. */
533IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
534{
535 iemAImpl_shl_u8,
536 iemAImpl_shl_u16,
537 iemAImpl_shl_u32,
538 iemAImpl_shl_u64
539};
540
541/** Function table for the SHL instruction, AMD EFLAGS variant. */
542IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_amd =
543{
544 iemAImpl_shl_u8_amd,
545 iemAImpl_shl_u16_amd,
546 iemAImpl_shl_u32_amd,
547 iemAImpl_shl_u64_amd
548};
549
550/** Function table for the SHL instruction, Intel EFLAGS variant. */
551IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_intel =
552{
553 iemAImpl_shl_u8_intel,
554 iemAImpl_shl_u16_intel,
555 iemAImpl_shl_u32_intel,
556 iemAImpl_shl_u64_intel
557};
558
559/** EFLAGS variation selection table for the SHL instruction. */
560IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shl_eflags[] =
561{
562 &g_iemAImpl_shl,
563 &g_iemAImpl_shl_intel,
564 &g_iemAImpl_shl_amd,
565 &g_iemAImpl_shl,
566};
567
568
569/** Function table for the SHR instruction. */
570IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
571{
572 iemAImpl_shr_u8,
573 iemAImpl_shr_u16,
574 iemAImpl_shr_u32,
575 iemAImpl_shr_u64
576};
577
578/** Function table for the SHR instruction, AMD EFLAGS variant. */
579IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_amd =
580{
581 iemAImpl_shr_u8_amd,
582 iemAImpl_shr_u16_amd,
583 iemAImpl_shr_u32_amd,
584 iemAImpl_shr_u64_amd
585};
586
587/** Function table for the SHR instruction, Intel EFLAGS variant. */
588IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_intel =
589{
590 iemAImpl_shr_u8_intel,
591 iemAImpl_shr_u16_intel,
592 iemAImpl_shr_u32_intel,
593 iemAImpl_shr_u64_intel
594};
595
596/** EFLAGS variation selection table for the SHR instruction. */
597IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shr_eflags[] =
598{
599 &g_iemAImpl_shr,
600 &g_iemAImpl_shr_intel,
601 &g_iemAImpl_shr_amd,
602 &g_iemAImpl_shr,
603};
604
605
606/** Function table for the SAR instruction. */
607IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
608{
609 iemAImpl_sar_u8,
610 iemAImpl_sar_u16,
611 iemAImpl_sar_u32,
612 iemAImpl_sar_u64
613};
614
615/** Function table for the SAR instruction, AMD EFLAGS variant. */
616IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_amd =
617{
618 iemAImpl_sar_u8_amd,
619 iemAImpl_sar_u16_amd,
620 iemAImpl_sar_u32_amd,
621 iemAImpl_sar_u64_amd
622};
623
624/** Function table for the SAR instruction, Intel EFLAGS variant. */
625IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_intel =
626{
627 iemAImpl_sar_u8_intel,
628 iemAImpl_sar_u16_intel,
629 iemAImpl_sar_u32_intel,
630 iemAImpl_sar_u64_intel
631};
632
633/** EFLAGS variation selection table for the SAR instruction. */
634IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_sar_eflags[] =
635{
636 &g_iemAImpl_sar,
637 &g_iemAImpl_sar_intel,
638 &g_iemAImpl_sar_amd,
639 &g_iemAImpl_sar,
640};
641
642
643/** Function table for the MUL instruction. */
644IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
645{
646 iemAImpl_mul_u8,
647 iemAImpl_mul_u16,
648 iemAImpl_mul_u32,
649 iemAImpl_mul_u64
650};
651
652/** Function table for the MUL instruction, AMD EFLAGS variation. */
653IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_amd =
654{
655 iemAImpl_mul_u8_amd,
656 iemAImpl_mul_u16_amd,
657 iemAImpl_mul_u32_amd,
658 iemAImpl_mul_u64_amd
659};
660
661/** Function table for the MUL instruction, Intel EFLAGS variation. */
662IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_intel =
663{
664 iemAImpl_mul_u8_intel,
665 iemAImpl_mul_u16_intel,
666 iemAImpl_mul_u32_intel,
667 iemAImpl_mul_u64_intel
668};
669
670/** EFLAGS variation selection table for the MUL instruction. */
671IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_mul_eflags[] =
672{
673 &g_iemAImpl_mul,
674 &g_iemAImpl_mul_intel,
675 &g_iemAImpl_mul_amd,
676 &g_iemAImpl_mul,
677};
678
679/** EFLAGS variation selection table for the 8-bit MUL instruction. */
680IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_mul_u8_eflags[] =
681{
682 iemAImpl_mul_u8,
683 iemAImpl_mul_u8_intel,
684 iemAImpl_mul_u8_amd,
685 iemAImpl_mul_u8
686};
687
688
689/** Function table for the IMUL instruction working implicitly on rAX. */
690IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
691{
692 iemAImpl_imul_u8,
693 iemAImpl_imul_u16,
694 iemAImpl_imul_u32,
695 iemAImpl_imul_u64
696};
697
698/** Function table for the IMUL instruction working implicitly on rAX, AMD EFLAGS variation. */
699IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_amd =
700{
701 iemAImpl_imul_u8_amd,
702 iemAImpl_imul_u16_amd,
703 iemAImpl_imul_u32_amd,
704 iemAImpl_imul_u64_amd
705};
706
707/** Function table for the IMUL instruction working implicitly on rAX, Intel EFLAGS variation. */
708IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_intel =
709{
710 iemAImpl_imul_u8_intel,
711 iemAImpl_imul_u16_intel,
712 iemAImpl_imul_u32_intel,
713 iemAImpl_imul_u64_intel
714};
715
716/** EFLAGS variation selection table for the IMUL instruction. */
717IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_imul_eflags[] =
718{
719 &g_iemAImpl_imul,
720 &g_iemAImpl_imul_intel,
721 &g_iemAImpl_imul_amd,
722 &g_iemAImpl_imul,
723};
724
725/** EFLAGS variation selection table for the 8-bit IMUL instruction. */
726IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_imul_u8_eflags[] =
727{
728 iemAImpl_imul_u8,
729 iemAImpl_imul_u8_intel,
730 iemAImpl_imul_u8_amd,
731 iemAImpl_imul_u8
732};
733
734
735/** Function table for the DIV instruction. */
736IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
737{
738 iemAImpl_div_u8,
739 iemAImpl_div_u16,
740 iemAImpl_div_u32,
741 iemAImpl_div_u64
742};
743
744/** Function table for the DIV instruction, AMD EFLAGS variation. */
745IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_amd =
746{
747 iemAImpl_div_u8_amd,
748 iemAImpl_div_u16_amd,
749 iemAImpl_div_u32_amd,
750 iemAImpl_div_u64_amd
751};
752
753/** Function table for the DIV instruction, Intel EFLAGS variation. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_intel =
755{
756 iemAImpl_div_u8_intel,
757 iemAImpl_div_u16_intel,
758 iemAImpl_div_u32_intel,
759 iemAImpl_div_u64_intel
760};
761
762/** EFLAGS variation selection table for the DIV instruction. */
763IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_div_eflags[] =
764{
765 &g_iemAImpl_div,
766 &g_iemAImpl_div_intel,
767 &g_iemAImpl_div_amd,
768 &g_iemAImpl_div,
769};
770
771/** EFLAGS variation selection table for the 8-bit DIV instruction. */
772IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_div_u8_eflags[] =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u8_intel,
776 iemAImpl_div_u8_amd,
777 iemAImpl_div_u8
778};
779
780
781/** Function table for the IDIV instruction. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
783{
784 iemAImpl_idiv_u8,
785 iemAImpl_idiv_u16,
786 iemAImpl_idiv_u32,
787 iemAImpl_idiv_u64
788};
789
790/** Function table for the IDIV instruction, AMD EFLAGS variation. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_amd =
792{
793 iemAImpl_idiv_u8_amd,
794 iemAImpl_idiv_u16_amd,
795 iemAImpl_idiv_u32_amd,
796 iemAImpl_idiv_u64_amd
797};
798
799/** Function table for the IDIV instruction, Intel EFLAGS variation. */
800IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_intel =
801{
802 iemAImpl_idiv_u8_intel,
803 iemAImpl_idiv_u16_intel,
804 iemAImpl_idiv_u32_intel,
805 iemAImpl_idiv_u64_intel
806};
807
808/** EFLAGS variation selection table for the IDIV instruction. */
809IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_idiv_eflags[] =
810{
811 &g_iemAImpl_idiv,
812 &g_iemAImpl_idiv_intel,
813 &g_iemAImpl_idiv_amd,
814 &g_iemAImpl_idiv,
815};
816
817/** EFLAGS variation selection table for the 8-bit IDIV instruction. */
818IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_idiv_u8_eflags[] =
819{
820 iemAImpl_idiv_u8,
821 iemAImpl_idiv_u8_intel,
822 iemAImpl_idiv_u8_amd,
823 iemAImpl_idiv_u8
824};
825
826
827/** Function table for the SHLD instruction. */
828IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
829{
830 iemAImpl_shld_u16,
831 iemAImpl_shld_u32,
832 iemAImpl_shld_u64,
833};
834
835/** Function table for the SHLD instruction, AMD EFLAGS variation. */
836IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_amd =
837{
838 iemAImpl_shld_u16_amd,
839 iemAImpl_shld_u32_amd,
840 iemAImpl_shld_u64_amd
841};
842
843/** Function table for the SHLD instruction, Intel EFLAGS variation. */
844IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_intel =
845{
846 iemAImpl_shld_u16_intel,
847 iemAImpl_shld_u32_intel,
848 iemAImpl_shld_u64_intel
849};
850
851/** EFLAGS variation selection table for the SHLD instruction. */
852IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shld_eflags[] =
853{
854 &g_iemAImpl_shld,
855 &g_iemAImpl_shld_intel,
856 &g_iemAImpl_shld_amd,
857 &g_iemAImpl_shld
858};
859
860/** Function table for the SHRD instruction. */
861IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
862{
863 iemAImpl_shrd_u16,
864 iemAImpl_shrd_u32,
865 iemAImpl_shrd_u64
866};
867
868/** Function table for the SHRD instruction, AMD EFLAGS variation. */
869IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_amd =
870{
871 iemAImpl_shrd_u16_amd,
872 iemAImpl_shrd_u32_amd,
873 iemAImpl_shrd_u64_amd
874};
875
876/** Function table for the SHRD instruction, Intel EFLAGS variation. */
877IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_intel =
878{
879 iemAImpl_shrd_u16_intel,
880 iemAImpl_shrd_u32_intel,
881 iemAImpl_shrd_u64_intel
882};
883
884/** EFLAGS variation selection table for the SHRD instruction. */
885IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shrd_eflags[] =
886{
887 &g_iemAImpl_shrd,
888 &g_iemAImpl_shrd_intel,
889 &g_iemAImpl_shrd_amd,
890 &g_iemAImpl_shrd
891};
892
893
894/** Function table for the PUNPCKLBW instruction */
895IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
896/** Function table for the PUNPCKLBD instruction */
897IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
898/** Function table for the PUNPCKLDQ instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
900/** Function table for the PUNPCKLQDQ instruction */
901IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
902
903/** Function table for the PUNPCKHBW instruction */
904IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
905/** Function table for the PUNPCKHBD instruction */
906IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
907/** Function table for the PUNPCKHDQ instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
909/** Function table for the PUNPCKHQDQ instruction */
910IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
911
912/** Function table for the PXOR instruction */
913IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
914/** Function table for the PCMPEQB instruction */
915IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
916/** Function table for the PCMPEQW instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
918/** Function table for the PCMPEQD instruction */
919IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
920
921#endif /* !TST_IEM_CHECK_MC */
922
923
924/**
925 * Common worker for instructions like ADD, AND, OR, ++ with a byte
926 * memory/register as the destination.
927 *
928 * @param pImpl Pointer to the instruction implementation (assembly).
929 */
930FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
931{
932 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
933
934 /*
935 * If rm is denoting a register, no more instruction bytes.
936 */
937 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
938 {
939 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
940
941 IEM_MC_BEGIN(3, 0);
942 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
943 IEM_MC_ARG(uint8_t, u8Src, 1);
944 IEM_MC_ARG(uint32_t *, pEFlags, 2);
945
946 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
947 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
948 IEM_MC_REF_EFLAGS(pEFlags);
949 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
950
951 IEM_MC_ADVANCE_RIP();
952 IEM_MC_END();
953 }
954 else
955 {
956 /*
957 * We're accessing memory.
958 * Note! We're putting the eflags on the stack here so we can commit them
959 * after the memory.
960 */
961 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
962 IEM_MC_BEGIN(3, 2);
963 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
964 IEM_MC_ARG(uint8_t, u8Src, 1);
965 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
966 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
967
968 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
969 if (!pImpl->pfnLockedU8)
970 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
971 IEM_MC_MEM_MAP(pu8Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
972 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
973 IEM_MC_FETCH_EFLAGS(EFlags);
974 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
975 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
976 else
977 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
978
979 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
980 IEM_MC_COMMIT_EFLAGS(EFlags);
981 IEM_MC_ADVANCE_RIP();
982 IEM_MC_END();
983 }
984 return VINF_SUCCESS;
985}
986
987
988/**
989 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
990 * memory/register as the destination.
991 *
992 * @param pImpl Pointer to the instruction implementation (assembly).
993 */
994FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
995{
996 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
997
998 /*
999 * If rm is denoting a register, no more instruction bytes.
1000 */
1001 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1002 {
1003 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1004
1005 switch (pVCpu->iem.s.enmEffOpSize)
1006 {
1007 case IEMMODE_16BIT:
1008 IEM_MC_BEGIN(3, 0);
1009 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1010 IEM_MC_ARG(uint16_t, u16Src, 1);
1011 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1012
1013 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1014 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
1015 IEM_MC_REF_EFLAGS(pEFlags);
1016 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1017
1018 IEM_MC_ADVANCE_RIP();
1019 IEM_MC_END();
1020 break;
1021
1022 case IEMMODE_32BIT:
1023 IEM_MC_BEGIN(3, 0);
1024 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1025 IEM_MC_ARG(uint32_t, u32Src, 1);
1026 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1027
1028 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1029 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
1030 IEM_MC_REF_EFLAGS(pEFlags);
1031 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1032
1033 if (pImpl != &g_iemAImpl_test)
1034 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1035 IEM_MC_ADVANCE_RIP();
1036 IEM_MC_END();
1037 break;
1038
1039 case IEMMODE_64BIT:
1040 IEM_MC_BEGIN(3, 0);
1041 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1042 IEM_MC_ARG(uint64_t, u64Src, 1);
1043 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1044
1045 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1046 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
1047 IEM_MC_REF_EFLAGS(pEFlags);
1048 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1049
1050 IEM_MC_ADVANCE_RIP();
1051 IEM_MC_END();
1052 break;
1053 }
1054 }
1055 else
1056 {
1057 /*
1058 * We're accessing memory.
1059 * Note! We're putting the eflags on the stack here so we can commit them
1060 * after the memory.
1061 */
1062 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
1063 switch (pVCpu->iem.s.enmEffOpSize)
1064 {
1065 case IEMMODE_16BIT:
1066 IEM_MC_BEGIN(3, 2);
1067 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1068 IEM_MC_ARG(uint16_t, u16Src, 1);
1069 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1070 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1071
1072 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1073 if (!pImpl->pfnLockedU16)
1074 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1075 IEM_MC_MEM_MAP(pu16Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1076 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1077 IEM_MC_FETCH_EFLAGS(EFlags);
1078 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1079 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1080 else
1081 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
1082
1083 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
1084 IEM_MC_COMMIT_EFLAGS(EFlags);
1085 IEM_MC_ADVANCE_RIP();
1086 IEM_MC_END();
1087 break;
1088
1089 case IEMMODE_32BIT:
1090 IEM_MC_BEGIN(3, 2);
1091 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1092 IEM_MC_ARG(uint32_t, u32Src, 1);
1093 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1094 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1095
1096 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1097 if (!pImpl->pfnLockedU32)
1098 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1099 IEM_MC_MEM_MAP(pu32Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1100 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1101 IEM_MC_FETCH_EFLAGS(EFlags);
1102 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1103 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1104 else
1105 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
1106
1107 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
1108 IEM_MC_COMMIT_EFLAGS(EFlags);
1109 IEM_MC_ADVANCE_RIP();
1110 IEM_MC_END();
1111 break;
1112
1113 case IEMMODE_64BIT:
1114 IEM_MC_BEGIN(3, 2);
1115 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1116 IEM_MC_ARG(uint64_t, u64Src, 1);
1117 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
1118 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1119
1120 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1121 if (!pImpl->pfnLockedU64)
1122 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1123 IEM_MC_MEM_MAP(pu64Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/);
1124 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1125 IEM_MC_FETCH_EFLAGS(EFlags);
1126 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
1127 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1128 else
1129 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
1130
1131 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
1132 IEM_MC_COMMIT_EFLAGS(EFlags);
1133 IEM_MC_ADVANCE_RIP();
1134 IEM_MC_END();
1135 break;
1136 }
1137 }
1138 return VINF_SUCCESS;
1139}
1140
1141
1142/**
1143 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
1144 * the destination.
1145 *
1146 * @param pImpl Pointer to the instruction implementation (assembly).
1147 */
1148FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
1149{
1150 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1151
1152 /*
1153 * If rm is denoting a register, no more instruction bytes.
1154 */
1155 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1156 {
1157 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1158 IEM_MC_BEGIN(3, 0);
1159 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1160 IEM_MC_ARG(uint8_t, u8Src, 1);
1161 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1162
1163 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
1164 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1165 IEM_MC_REF_EFLAGS(pEFlags);
1166 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1167
1168 IEM_MC_ADVANCE_RIP();
1169 IEM_MC_END();
1170 }
1171 else
1172 {
1173 /*
1174 * We're accessing memory.
1175 */
1176 IEM_MC_BEGIN(3, 1);
1177 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1178 IEM_MC_ARG(uint8_t, u8Src, 1);
1179 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1180 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1181
1182 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1183 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1184 IEM_MC_FETCH_MEM_U8(u8Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1185 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1186 IEM_MC_REF_EFLAGS(pEFlags);
1187 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1188
1189 IEM_MC_ADVANCE_RIP();
1190 IEM_MC_END();
1191 }
1192 return VINF_SUCCESS;
1193}
1194
1195
1196/**
1197 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
1198 * register as the destination.
1199 *
1200 * @param pImpl Pointer to the instruction implementation (assembly).
1201 */
1202FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
1203{
1204 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1205
1206 /*
1207 * If rm is denoting a register, no more instruction bytes.
1208 */
1209 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1210 {
1211 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1212 switch (pVCpu->iem.s.enmEffOpSize)
1213 {
1214 case IEMMODE_16BIT:
1215 IEM_MC_BEGIN(3, 0);
1216 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1217 IEM_MC_ARG(uint16_t, u16Src, 1);
1218 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1219
1220 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
1221 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1222 IEM_MC_REF_EFLAGS(pEFlags);
1223 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1224
1225 IEM_MC_ADVANCE_RIP();
1226 IEM_MC_END();
1227 break;
1228
1229 case IEMMODE_32BIT:
1230 IEM_MC_BEGIN(3, 0);
1231 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1232 IEM_MC_ARG(uint32_t, u32Src, 1);
1233 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1234
1235 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
1236 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1237 IEM_MC_REF_EFLAGS(pEFlags);
1238 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1239
1240 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1241 IEM_MC_ADVANCE_RIP();
1242 IEM_MC_END();
1243 break;
1244
1245 case IEMMODE_64BIT:
1246 IEM_MC_BEGIN(3, 0);
1247 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1248 IEM_MC_ARG(uint64_t, u64Src, 1);
1249 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1250
1251 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
1252 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1253 IEM_MC_REF_EFLAGS(pEFlags);
1254 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1255
1256 IEM_MC_ADVANCE_RIP();
1257 IEM_MC_END();
1258 break;
1259 }
1260 }
1261 else
1262 {
1263 /*
1264 * We're accessing memory.
1265 */
1266 switch (pVCpu->iem.s.enmEffOpSize)
1267 {
1268 case IEMMODE_16BIT:
1269 IEM_MC_BEGIN(3, 1);
1270 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1271 IEM_MC_ARG(uint16_t, u16Src, 1);
1272 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1273 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1274
1275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1276 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1277 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1278 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1279 IEM_MC_REF_EFLAGS(pEFlags);
1280 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1281
1282 IEM_MC_ADVANCE_RIP();
1283 IEM_MC_END();
1284 break;
1285
1286 case IEMMODE_32BIT:
1287 IEM_MC_BEGIN(3, 1);
1288 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1289 IEM_MC_ARG(uint32_t, u32Src, 1);
1290 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1291 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1292
1293 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1294 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1295 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1296 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1297 IEM_MC_REF_EFLAGS(pEFlags);
1298 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1299
1300 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1301 IEM_MC_ADVANCE_RIP();
1302 IEM_MC_END();
1303 break;
1304
1305 case IEMMODE_64BIT:
1306 IEM_MC_BEGIN(3, 1);
1307 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1308 IEM_MC_ARG(uint64_t, u64Src, 1);
1309 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1310 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1311
1312 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1313 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1314 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
1315 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
1316 IEM_MC_REF_EFLAGS(pEFlags);
1317 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1318
1319 IEM_MC_ADVANCE_RIP();
1320 IEM_MC_END();
1321 break;
1322 }
1323 }
1324 return VINF_SUCCESS;
1325}
1326
1327
1328/**
1329 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
1330 * a byte immediate.
1331 *
1332 * @param pImpl Pointer to the instruction implementation (assembly).
1333 */
1334FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
1335{
1336 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
1337 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1338
1339 IEM_MC_BEGIN(3, 0);
1340 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
1341 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
1342 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1343
1344 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
1345 IEM_MC_REF_EFLAGS(pEFlags);
1346 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
1347
1348 IEM_MC_ADVANCE_RIP();
1349 IEM_MC_END();
1350 return VINF_SUCCESS;
1351}
1352
1353
1354/**
1355 * Common worker for instructions like ADD, AND, OR, ++ with working on
1356 * AX/EAX/RAX with a word/dword immediate.
1357 *
1358 * @param pImpl Pointer to the instruction implementation (assembly).
1359 */
1360FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
1361{
1362 switch (pVCpu->iem.s.enmEffOpSize)
1363 {
1364 case IEMMODE_16BIT:
1365 {
1366 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
1367 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1368
1369 IEM_MC_BEGIN(3, 0);
1370 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1371 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
1372 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1373
1374 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
1375 IEM_MC_REF_EFLAGS(pEFlags);
1376 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
1377
1378 IEM_MC_ADVANCE_RIP();
1379 IEM_MC_END();
1380 return VINF_SUCCESS;
1381 }
1382
1383 case IEMMODE_32BIT:
1384 {
1385 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
1386 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1387
1388 IEM_MC_BEGIN(3, 0);
1389 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
1390 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
1391 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1392
1393 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
1394 IEM_MC_REF_EFLAGS(pEFlags);
1395 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
1396
1397 if (pImpl != &g_iemAImpl_test)
1398 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
1399 IEM_MC_ADVANCE_RIP();
1400 IEM_MC_END();
1401 return VINF_SUCCESS;
1402 }
1403
1404 case IEMMODE_64BIT:
1405 {
1406 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
1407 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1408
1409 IEM_MC_BEGIN(3, 0);
1410 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1411 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
1412 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1413
1414 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
1415 IEM_MC_REF_EFLAGS(pEFlags);
1416 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
1417
1418 IEM_MC_ADVANCE_RIP();
1419 IEM_MC_END();
1420 return VINF_SUCCESS;
1421 }
1422
1423 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1424 }
1425}
1426
1427
1428/** Opcodes 0xf1, 0xd6. */
1429FNIEMOP_DEF(iemOp_Invalid)
1430{
1431 IEMOP_MNEMONIC(Invalid, "Invalid");
1432 return IEMOP_RAISE_INVALID_OPCODE();
1433}
1434
1435
1436/** Invalid with RM byte . */
1437FNIEMOPRM_DEF(iemOp_InvalidWithRM)
1438{
1439 RT_NOREF_PV(bRm);
1440 IEMOP_MNEMONIC(InvalidWithRm, "InvalidWithRM");
1441 return IEMOP_RAISE_INVALID_OPCODE();
1442}
1443
1444
1445/** Invalid with RM byte where intel decodes any additional address encoding
1446 * bytes. */
1447FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedDecode)
1448{
1449 IEMOP_MNEMONIC(InvalidWithRMNeedDecode, "InvalidWithRMNeedDecode");
1450 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1451 {
1452#ifndef TST_IEM_CHECK_MC
1453 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1454 {
1455 RTGCPTR GCPtrEff;
1456 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1457 if (rcStrict != VINF_SUCCESS)
1458 return rcStrict;
1459 }
1460#endif
1461 }
1462 IEMOP_HLP_DONE_DECODING();
1463 return IEMOP_RAISE_INVALID_OPCODE();
1464}
1465
1466
1467/** Invalid with RM byte where both AMD and Intel decodes any additional
1468 * address encoding bytes. */
1469FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeeded)
1470{
1471 IEMOP_MNEMONIC(InvalidWithRMAllNeeded, "InvalidWithRMAllNeeded");
1472#ifndef TST_IEM_CHECK_MC
1473 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1474 {
1475 RTGCPTR GCPtrEff;
1476 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1477 if (rcStrict != VINF_SUCCESS)
1478 return rcStrict;
1479 }
1480#endif
1481 IEMOP_HLP_DONE_DECODING();
1482 return IEMOP_RAISE_INVALID_OPCODE();
1483}
1484
1485
1486/** Invalid with RM byte where intel requires 8-byte immediate.
1487 * Intel will also need SIB and displacement if bRm indicates memory. */
1488FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedImm8)
1489{
1490 IEMOP_MNEMONIC(InvalidWithRMNeedImm8, "InvalidWithRMNeedImm8");
1491 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1492 {
1493#ifndef TST_IEM_CHECK_MC
1494 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1495 {
1496 RTGCPTR GCPtrEff;
1497 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1498 if (rcStrict != VINF_SUCCESS)
1499 return rcStrict;
1500 }
1501#endif
1502 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bRm);
1503 }
1504 IEMOP_HLP_DONE_DECODING();
1505 return IEMOP_RAISE_INVALID_OPCODE();
1506}
1507
1508
1509/** Invalid with RM byte where intel requires 8-byte immediate.
1510 * Both AMD and Intel also needs SIB and displacement according to bRm. */
1511FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeedImm8)
1512{
1513 IEMOP_MNEMONIC(InvalidWithRMAllNeedImm8, "InvalidWithRMAllNeedImm8");
1514#ifndef TST_IEM_CHECK_MC
1515 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1516 {
1517 RTGCPTR GCPtrEff;
1518 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1519 if (rcStrict != VINF_SUCCESS)
1520 return rcStrict;
1521 }
1522#endif
1523 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bRm);
1524 IEMOP_HLP_DONE_DECODING();
1525 return IEMOP_RAISE_INVALID_OPCODE();
1526}
1527
1528
1529/** Invalid opcode where intel requires Mod R/M sequence. */
1530FNIEMOP_DEF(iemOp_InvalidNeedRM)
1531{
1532 IEMOP_MNEMONIC(InvalidNeedRM, "InvalidNeedRM");
1533 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1534 {
1535 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1536#ifndef TST_IEM_CHECK_MC
1537 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1538 {
1539 RTGCPTR GCPtrEff;
1540 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1541 if (rcStrict != VINF_SUCCESS)
1542 return rcStrict;
1543 }
1544#endif
1545 }
1546 IEMOP_HLP_DONE_DECODING();
1547 return IEMOP_RAISE_INVALID_OPCODE();
1548}
1549
1550
1551/** Invalid opcode where both AMD and Intel requires Mod R/M sequence. */
1552FNIEMOP_DEF(iemOp_InvalidAllNeedRM)
1553{
1554 IEMOP_MNEMONIC(InvalidAllNeedRM, "InvalidAllNeedRM");
1555 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1556#ifndef TST_IEM_CHECK_MC
1557 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1558 {
1559 RTGCPTR GCPtrEff;
1560 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1561 if (rcStrict != VINF_SUCCESS)
1562 return rcStrict;
1563 }
1564#endif
1565 IEMOP_HLP_DONE_DECODING();
1566 return IEMOP_RAISE_INVALID_OPCODE();
1567}
1568
1569
1570/** Invalid opcode where intel requires Mod R/M sequence and 8-byte
1571 * immediate. */
1572FNIEMOP_DEF(iemOp_InvalidNeedRMImm8)
1573{
1574 IEMOP_MNEMONIC(InvalidNeedRMImm8, "InvalidNeedRMImm8");
1575 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1576 {
1577 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1578#ifndef TST_IEM_CHECK_MC
1579 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1580 {
1581 RTGCPTR GCPtrEff;
1582 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1583 if (rcStrict != VINF_SUCCESS)
1584 return rcStrict;
1585 }
1586#endif
1587 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
1588 }
1589 IEMOP_HLP_DONE_DECODING();
1590 return IEMOP_RAISE_INVALID_OPCODE();
1591}
1592
1593
1594/** Invalid opcode where intel requires a 3rd escape byte and a Mod R/M
1595 * sequence. */
1596FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRM)
1597{
1598 IEMOP_MNEMONIC(InvalidNeed3ByteEscRM, "InvalidNeed3ByteEscRM");
1599 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1600 {
1601 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
1602 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1603#ifndef TST_IEM_CHECK_MC
1604 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1605 {
1606 RTGCPTR GCPtrEff;
1607 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
1608 if (rcStrict != VINF_SUCCESS)
1609 return rcStrict;
1610 }
1611#endif
1612 }
1613 IEMOP_HLP_DONE_DECODING();
1614 return IEMOP_RAISE_INVALID_OPCODE();
1615}
1616
1617
1618/** Invalid opcode where intel requires a 3rd escape byte, Mod R/M sequence, and
1619 * a 8-byte immediate. */
1620FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRMImm8)
1621{
1622 IEMOP_MNEMONIC(InvalidNeed3ByteEscRMImm8, "InvalidNeed3ByteEscRMImm8");
1623 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1624 {
1625 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
1626 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
1627#ifndef TST_IEM_CHECK_MC
1628 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1629 {
1630 RTGCPTR GCPtrEff;
1631 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 1, &GCPtrEff);
1632 if (rcStrict != VINF_SUCCESS)
1633 return rcStrict;
1634 }
1635#endif
1636 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
1637 IEMOP_HLP_DONE_DECODING();
1638 }
1639 return IEMOP_RAISE_INVALID_OPCODE();
1640}
1641
1642
1643/** Repeats a_fn four times. For decoding tables. */
1644#define IEMOP_X4(a_fn) a_fn, a_fn, a_fn, a_fn
1645
1646/*
1647 * Include the tables.
1648 */
1649#ifdef IEM_WITH_3DNOW
1650# include "IEMAllInstructions3DNow.cpp.h"
1651#endif
1652#ifdef IEM_WITH_THREE_0F_38
1653# include "IEMAllInstructionsThree0f38.cpp.h"
1654#endif
1655#ifdef IEM_WITH_THREE_0F_3A
1656# include "IEMAllInstructionsThree0f3a.cpp.h"
1657#endif
1658#include "IEMAllInstructionsTwoByte0f.cpp.h"
1659#ifdef IEM_WITH_VEX
1660# include "IEMAllInstructionsVexMap1.cpp.h"
1661# include "IEMAllInstructionsVexMap2.cpp.h"
1662# include "IEMAllInstructionsVexMap3.cpp.h"
1663#endif
1664#include "IEMAllInstructionsOneByte.cpp.h"
1665
1666
1667#ifdef _MSC_VER
1668# pragma warning(pop)
1669#endif
1670
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette