VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstCommon.cpp.h@ 107044

Last change on this file since 107044 was 106061, checked in by vboxsync, 2 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.0 KB
Line 
1/* $Id: IEMAllInstCommon.cpp.h 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Common Bits.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_IEMAllInstCommon_cpp_h
29#define VMM_INCLUDED_SRC_VMMAll_IEMAllInstCommon_cpp_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/*********************************************************************************************************************************
36* Defined Constants And Macros *
37*********************************************************************************************************************************/
38/** Repeats a_fn four times. For decoding tables. */
39#define IEMOP_X4(a_fn) a_fn, a_fn, a_fn, a_fn
40
41
42/*********************************************************************************************************************************
43* Global Variables *
44*********************************************************************************************************************************/
45#ifndef TST_IEM_CHECK_MC
46
47/** Function table for the BSF instruction. */
48IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
49{
50 NULL, NULL,
51 iemAImpl_bsf_u16, NULL,
52 iemAImpl_bsf_u32, NULL,
53 iemAImpl_bsf_u64, NULL
54};
55
56/** Function table for the BSF instruction, AMD EFLAGS variant. */
57IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_amd =
58{
59 NULL, NULL,
60 iemAImpl_bsf_u16_amd, NULL,
61 iemAImpl_bsf_u32_amd, NULL,
62 iemAImpl_bsf_u64_amd, NULL
63};
64
65/** Function table for the BSF instruction, Intel EFLAGS variant. */
66IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_intel =
67{
68 NULL, NULL,
69 iemAImpl_bsf_u16_intel, NULL,
70 iemAImpl_bsf_u32_intel, NULL,
71 iemAImpl_bsf_u64_intel, NULL
72};
73
74/** EFLAGS variation selection table for the BSF instruction. */
75IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsf_eflags[] =
76{
77 &g_iemAImpl_bsf,
78 &g_iemAImpl_bsf_intel,
79 &g_iemAImpl_bsf_amd,
80 &g_iemAImpl_bsf,
81};
82
83/** Function table for the BSR instruction. */
84IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
85{
86 NULL, NULL,
87 iemAImpl_bsr_u16, NULL,
88 iemAImpl_bsr_u32, NULL,
89 iemAImpl_bsr_u64, NULL
90};
91
92/** Function table for the BSR instruction, AMD EFLAGS variant. */
93IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_amd =
94{
95 NULL, NULL,
96 iemAImpl_bsr_u16_amd, NULL,
97 iemAImpl_bsr_u32_amd, NULL,
98 iemAImpl_bsr_u64_amd, NULL
99};
100
101/** Function table for the BSR instruction, Intel EFLAGS variant. */
102IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_intel =
103{
104 NULL, NULL,
105 iemAImpl_bsr_u16_intel, NULL,
106 iemAImpl_bsr_u32_intel, NULL,
107 iemAImpl_bsr_u64_intel, NULL
108};
109
110/** EFLAGS variation selection table for the BSR instruction. */
111IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsr_eflags[] =
112{
113 &g_iemAImpl_bsr,
114 &g_iemAImpl_bsr_intel,
115 &g_iemAImpl_bsr_amd,
116 &g_iemAImpl_bsr,
117};
118
119/** Function table for the IMUL instruction. */
120IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
121{
122 NULL, NULL,
123 iemAImpl_imul_two_u16, NULL,
124 iemAImpl_imul_two_u32, NULL,
125 iemAImpl_imul_two_u64, NULL
126};
127
128/** Function table for the IMUL instruction, AMD EFLAGS variant. */
129IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_amd =
130{
131 NULL, NULL,
132 iemAImpl_imul_two_u16_amd, NULL,
133 iemAImpl_imul_two_u32_amd, NULL,
134 iemAImpl_imul_two_u64_amd, NULL
135};
136
137/** Function table for the IMUL instruction, Intel EFLAGS variant. */
138IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_intel =
139{
140 NULL, NULL,
141 iemAImpl_imul_two_u16_intel, NULL,
142 iemAImpl_imul_two_u32_intel, NULL,
143 iemAImpl_imul_two_u64_intel, NULL
144};
145
146/** EFLAGS variation selection table for the IMUL instruction. */
147IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_imul_two_eflags[] =
148{
149 &g_iemAImpl_imul_two,
150 &g_iemAImpl_imul_two_intel,
151 &g_iemAImpl_imul_two_amd,
152 &g_iemAImpl_imul_two,
153};
154
155/** EFLAGS variation selection table for the 16-bit IMUL instruction. */
156IEM_STATIC PFNIEMAIMPLBINU16 const g_iemAImpl_imul_two_u16_eflags[] =
157{
158 iemAImpl_imul_two_u16,
159 iemAImpl_imul_two_u16_intel,
160 iemAImpl_imul_two_u16_amd,
161 iemAImpl_imul_two_u16,
162};
163
164/** EFLAGS variation selection table for the 32-bit IMUL instruction. */
165IEM_STATIC PFNIEMAIMPLBINU32 const g_iemAImpl_imul_two_u32_eflags[] =
166{
167 iemAImpl_imul_two_u32,
168 iemAImpl_imul_two_u32_intel,
169 iemAImpl_imul_two_u32_amd,
170 iemAImpl_imul_two_u32,
171};
172
173/** EFLAGS variation selection table for the 64-bit IMUL instruction. */
174IEM_STATIC PFNIEMAIMPLBINU64 const g_iemAImpl_imul_two_u64_eflags[] =
175{
176 iemAImpl_imul_two_u64,
177 iemAImpl_imul_two_u64_intel,
178 iemAImpl_imul_two_u64_amd,
179 iemAImpl_imul_two_u64,
180};
181
182/** Function table for the ROL instruction. */
183IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
184{
185 iemAImpl_rol_u8,
186 iemAImpl_rol_u16,
187 iemAImpl_rol_u32,
188 iemAImpl_rol_u64
189};
190
191/** Function table for the ROL instruction, AMD EFLAGS variant. */
192IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_amd =
193{
194 iemAImpl_rol_u8_amd,
195 iemAImpl_rol_u16_amd,
196 iemAImpl_rol_u32_amd,
197 iemAImpl_rol_u64_amd
198};
199
200/** Function table for the ROL instruction, Intel EFLAGS variant. */
201IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_intel =
202{
203 iemAImpl_rol_u8_intel,
204 iemAImpl_rol_u16_intel,
205 iemAImpl_rol_u32_intel,
206 iemAImpl_rol_u64_intel
207};
208
209/** EFLAGS variation selection table for the ROL instruction. */
210IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rol_eflags[] =
211{
212 &g_iemAImpl_rol,
213 &g_iemAImpl_rol_intel,
214 &g_iemAImpl_rol_amd,
215 &g_iemAImpl_rol,
216};
217
218
219/** Function table for the ROR instruction. */
220IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
221{
222 iemAImpl_ror_u8,
223 iemAImpl_ror_u16,
224 iemAImpl_ror_u32,
225 iemAImpl_ror_u64
226};
227
228/** Function table for the ROR instruction, AMD EFLAGS variant. */
229IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_amd =
230{
231 iemAImpl_ror_u8_amd,
232 iemAImpl_ror_u16_amd,
233 iemAImpl_ror_u32_amd,
234 iemAImpl_ror_u64_amd
235};
236
237/** Function table for the ROR instruction, Intel EFLAGS variant. */
238IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_intel =
239{
240 iemAImpl_ror_u8_intel,
241 iemAImpl_ror_u16_intel,
242 iemAImpl_ror_u32_intel,
243 iemAImpl_ror_u64_intel
244};
245
246/** EFLAGS variation selection table for the ROR instruction. */
247IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_ror_eflags[] =
248{
249 &g_iemAImpl_ror,
250 &g_iemAImpl_ror_intel,
251 &g_iemAImpl_ror_amd,
252 &g_iemAImpl_ror,
253};
254
255
256/** Function table for the RCL instruction. */
257IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
258{
259 iemAImpl_rcl_u8,
260 iemAImpl_rcl_u16,
261 iemAImpl_rcl_u32,
262 iemAImpl_rcl_u64
263};
264
265/** Function table for the RCL instruction, AMD EFLAGS variant. */
266IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_amd =
267{
268 iemAImpl_rcl_u8_amd,
269 iemAImpl_rcl_u16_amd,
270 iemAImpl_rcl_u32_amd,
271 iemAImpl_rcl_u64_amd
272};
273
274/** Function table for the RCL instruction, Intel EFLAGS variant. */
275IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_intel =
276{
277 iemAImpl_rcl_u8_intel,
278 iemAImpl_rcl_u16_intel,
279 iemAImpl_rcl_u32_intel,
280 iemAImpl_rcl_u64_intel
281};
282
283/** EFLAGS variation selection table for the RCL instruction. */
284IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcl_eflags[] =
285{
286 &g_iemAImpl_rcl,
287 &g_iemAImpl_rcl_intel,
288 &g_iemAImpl_rcl_amd,
289 &g_iemAImpl_rcl,
290};
291
292
293/** Function table for the RCR instruction. */
294IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
295{
296 iemAImpl_rcr_u8,
297 iemAImpl_rcr_u16,
298 iemAImpl_rcr_u32,
299 iemAImpl_rcr_u64
300};
301
302/** Function table for the RCR instruction, AMD EFLAGS variant. */
303IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_amd =
304{
305 iemAImpl_rcr_u8_amd,
306 iemAImpl_rcr_u16_amd,
307 iemAImpl_rcr_u32_amd,
308 iemAImpl_rcr_u64_amd
309};
310
311/** Function table for the RCR instruction, Intel EFLAGS variant. */
312IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_intel =
313{
314 iemAImpl_rcr_u8_intel,
315 iemAImpl_rcr_u16_intel,
316 iemAImpl_rcr_u32_intel,
317 iemAImpl_rcr_u64_intel
318};
319
320/** EFLAGS variation selection table for the RCR instruction. */
321IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcr_eflags[] =
322{
323 &g_iemAImpl_rcr,
324 &g_iemAImpl_rcr_intel,
325 &g_iemAImpl_rcr_amd,
326 &g_iemAImpl_rcr,
327};
328
329
330/** Function table for the SHL instruction. */
331IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
332{
333 iemAImpl_shl_u8,
334 iemAImpl_shl_u16,
335 iemAImpl_shl_u32,
336 iemAImpl_shl_u64
337};
338
339/** Function table for the SHL instruction, AMD EFLAGS variant. */
340IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_amd =
341{
342 iemAImpl_shl_u8_amd,
343 iemAImpl_shl_u16_amd,
344 iemAImpl_shl_u32_amd,
345 iemAImpl_shl_u64_amd
346};
347
348/** Function table for the SHL instruction, Intel EFLAGS variant. */
349IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_intel =
350{
351 iemAImpl_shl_u8_intel,
352 iemAImpl_shl_u16_intel,
353 iemAImpl_shl_u32_intel,
354 iemAImpl_shl_u64_intel
355};
356
357/** EFLAGS variation selection table for the SHL instruction. */
358IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shl_eflags[] =
359{
360 &g_iemAImpl_shl,
361 &g_iemAImpl_shl_intel,
362 &g_iemAImpl_shl_amd,
363 &g_iemAImpl_shl,
364};
365
366
367/** Function table for the SHR instruction. */
368IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
369{
370 iemAImpl_shr_u8,
371 iemAImpl_shr_u16,
372 iemAImpl_shr_u32,
373 iemAImpl_shr_u64
374};
375
376/** Function table for the SHR instruction, AMD EFLAGS variant. */
377IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_amd =
378{
379 iemAImpl_shr_u8_amd,
380 iemAImpl_shr_u16_amd,
381 iemAImpl_shr_u32_amd,
382 iemAImpl_shr_u64_amd
383};
384
385/** Function table for the SHR instruction, Intel EFLAGS variant. */
386IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_intel =
387{
388 iemAImpl_shr_u8_intel,
389 iemAImpl_shr_u16_intel,
390 iemAImpl_shr_u32_intel,
391 iemAImpl_shr_u64_intel
392};
393
394/** EFLAGS variation selection table for the SHR instruction. */
395IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shr_eflags[] =
396{
397 &g_iemAImpl_shr,
398 &g_iemAImpl_shr_intel,
399 &g_iemAImpl_shr_amd,
400 &g_iemAImpl_shr,
401};
402
403
404/** Function table for the SAR instruction. */
405IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
406{
407 iemAImpl_sar_u8,
408 iemAImpl_sar_u16,
409 iemAImpl_sar_u32,
410 iemAImpl_sar_u64
411};
412
413/** Function table for the SAR instruction, AMD EFLAGS variant. */
414IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_amd =
415{
416 iemAImpl_sar_u8_amd,
417 iemAImpl_sar_u16_amd,
418 iemAImpl_sar_u32_amd,
419 iemAImpl_sar_u64_amd
420};
421
422/** Function table for the SAR instruction, Intel EFLAGS variant. */
423IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_intel =
424{
425 iemAImpl_sar_u8_intel,
426 iemAImpl_sar_u16_intel,
427 iemAImpl_sar_u32_intel,
428 iemAImpl_sar_u64_intel
429};
430
431/** EFLAGS variation selection table for the SAR instruction. */
432IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_sar_eflags[] =
433{
434 &g_iemAImpl_sar,
435 &g_iemAImpl_sar_intel,
436 &g_iemAImpl_sar_amd,
437 &g_iemAImpl_sar,
438};
439
440
441/** Function table for the MUL instruction. */
442IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
443{
444 iemAImpl_mul_u8,
445 iemAImpl_mul_u16,
446 iemAImpl_mul_u32,
447 iemAImpl_mul_u64
448};
449
450/** Function table for the MUL instruction, AMD EFLAGS variation. */
451IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_amd =
452{
453 iemAImpl_mul_u8_amd,
454 iemAImpl_mul_u16_amd,
455 iemAImpl_mul_u32_amd,
456 iemAImpl_mul_u64_amd
457};
458
459/** Function table for the MUL instruction, Intel EFLAGS variation. */
460IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_intel =
461{
462 iemAImpl_mul_u8_intel,
463 iemAImpl_mul_u16_intel,
464 iemAImpl_mul_u32_intel,
465 iemAImpl_mul_u64_intel
466};
467
468/** EFLAGS variation selection table for the MUL instruction. */
469IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_mul_eflags[] =
470{
471 &g_iemAImpl_mul,
472 &g_iemAImpl_mul_intel,
473 &g_iemAImpl_mul_amd,
474 &g_iemAImpl_mul,
475};
476
477/** EFLAGS variation selection table for the 8-bit MUL instruction. */
478IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_mul_u8_eflags[] =
479{
480 iemAImpl_mul_u8,
481 iemAImpl_mul_u8_intel,
482 iemAImpl_mul_u8_amd,
483 iemAImpl_mul_u8
484};
485
486
487/** Function table for the IMUL instruction working implicitly on rAX. */
488IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
489{
490 iemAImpl_imul_u8,
491 iemAImpl_imul_u16,
492 iemAImpl_imul_u32,
493 iemAImpl_imul_u64
494};
495
496/** Function table for the IMUL instruction working implicitly on rAX, AMD EFLAGS variation. */
497IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_amd =
498{
499 iemAImpl_imul_u8_amd,
500 iemAImpl_imul_u16_amd,
501 iemAImpl_imul_u32_amd,
502 iemAImpl_imul_u64_amd
503};
504
505/** Function table for the IMUL instruction working implicitly on rAX, Intel EFLAGS variation. */
506IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_intel =
507{
508 iemAImpl_imul_u8_intel,
509 iemAImpl_imul_u16_intel,
510 iemAImpl_imul_u32_intel,
511 iemAImpl_imul_u64_intel
512};
513
514/** EFLAGS variation selection table for the IMUL instruction. */
515IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_imul_eflags[] =
516{
517 &g_iemAImpl_imul,
518 &g_iemAImpl_imul_intel,
519 &g_iemAImpl_imul_amd,
520 &g_iemAImpl_imul,
521};
522
523/** EFLAGS variation selection table for the 8-bit IMUL instruction. */
524IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_imul_u8_eflags[] =
525{
526 iemAImpl_imul_u8,
527 iemAImpl_imul_u8_intel,
528 iemAImpl_imul_u8_amd,
529 iemAImpl_imul_u8
530};
531
532
533/** Function table for the DIV instruction. */
534IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
535{
536 iemAImpl_div_u8,
537 iemAImpl_div_u16,
538 iemAImpl_div_u32,
539 iemAImpl_div_u64
540};
541
542/** Function table for the DIV instruction, AMD EFLAGS variation. */
543IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_amd =
544{
545 iemAImpl_div_u8_amd,
546 iemAImpl_div_u16_amd,
547 iemAImpl_div_u32_amd,
548 iemAImpl_div_u64_amd
549};
550
551/** Function table for the DIV instruction, Intel EFLAGS variation. */
552IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_intel =
553{
554 iemAImpl_div_u8_intel,
555 iemAImpl_div_u16_intel,
556 iemAImpl_div_u32_intel,
557 iemAImpl_div_u64_intel
558};
559
560/** EFLAGS variation selection table for the DIV instruction. */
561IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_div_eflags[] =
562{
563 &g_iemAImpl_div,
564 &g_iemAImpl_div_intel,
565 &g_iemAImpl_div_amd,
566 &g_iemAImpl_div,
567};
568
569/** EFLAGS variation selection table for the 8-bit DIV instruction. */
570IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_div_u8_eflags[] =
571{
572 iemAImpl_div_u8,
573 iemAImpl_div_u8_intel,
574 iemAImpl_div_u8_amd,
575 iemAImpl_div_u8
576};
577
578
579/** Function table for the IDIV instruction. */
580IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
581{
582 iemAImpl_idiv_u8,
583 iemAImpl_idiv_u16,
584 iemAImpl_idiv_u32,
585 iemAImpl_idiv_u64
586};
587
588/** Function table for the IDIV instruction, AMD EFLAGS variation. */
589IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_amd =
590{
591 iemAImpl_idiv_u8_amd,
592 iemAImpl_idiv_u16_amd,
593 iemAImpl_idiv_u32_amd,
594 iemAImpl_idiv_u64_amd
595};
596
597/** Function table for the IDIV instruction, Intel EFLAGS variation. */
598IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_intel =
599{
600 iemAImpl_idiv_u8_intel,
601 iemAImpl_idiv_u16_intel,
602 iemAImpl_idiv_u32_intel,
603 iemAImpl_idiv_u64_intel
604};
605
606/** EFLAGS variation selection table for the IDIV instruction. */
607IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_idiv_eflags[] =
608{
609 &g_iemAImpl_idiv,
610 &g_iemAImpl_idiv_intel,
611 &g_iemAImpl_idiv_amd,
612 &g_iemAImpl_idiv,
613};
614
615/** EFLAGS variation selection table for the 8-bit IDIV instruction. */
616IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_idiv_u8_eflags[] =
617{
618 iemAImpl_idiv_u8,
619 iemAImpl_idiv_u8_intel,
620 iemAImpl_idiv_u8_amd,
621 iemAImpl_idiv_u8
622};
623
624
625/** Function table for the SHLD instruction. */
626IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
627{
628 iemAImpl_shld_u16,
629 iemAImpl_shld_u32,
630 iemAImpl_shld_u64,
631};
632
633/** Function table for the SHLD instruction, AMD EFLAGS variation. */
634IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_amd =
635{
636 iemAImpl_shld_u16_amd,
637 iemAImpl_shld_u32_amd,
638 iemAImpl_shld_u64_amd
639};
640
641/** Function table for the SHLD instruction, Intel EFLAGS variation. */
642IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_intel =
643{
644 iemAImpl_shld_u16_intel,
645 iemAImpl_shld_u32_intel,
646 iemAImpl_shld_u64_intel
647};
648
649/** EFLAGS variation selection table for the SHLD instruction. */
650IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shld_eflags[] =
651{
652 &g_iemAImpl_shld,
653 &g_iemAImpl_shld_intel,
654 &g_iemAImpl_shld_amd,
655 &g_iemAImpl_shld
656};
657
658/** Function table for the SHRD instruction. */
659IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
660{
661 iemAImpl_shrd_u16,
662 iemAImpl_shrd_u32,
663 iemAImpl_shrd_u64
664};
665
666/** Function table for the SHRD instruction, AMD EFLAGS variation. */
667IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_amd =
668{
669 iemAImpl_shrd_u16_amd,
670 iemAImpl_shrd_u32_amd,
671 iemAImpl_shrd_u64_amd
672};
673
674/** Function table for the SHRD instruction, Intel EFLAGS variation. */
675IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_intel =
676{
677 iemAImpl_shrd_u16_intel,
678 iemAImpl_shrd_u32_intel,
679 iemAImpl_shrd_u64_intel
680};
681
682/** EFLAGS variation selection table for the SHRD instruction. */
683IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shrd_eflags[] =
684{
685 &g_iemAImpl_shrd,
686 &g_iemAImpl_shrd_intel,
687 &g_iemAImpl_shrd_amd,
688 &g_iemAImpl_shrd
689};
690
691
692# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
693/** Function table for the VPXOR instruction */
694IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpand = { iemAImpl_vpand_u128, iemAImpl_vpand_u256 };
695/** Function table for the VPXORN instruction */
696IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpandn = { iemAImpl_vpandn_u128, iemAImpl_vpandn_u256 };
697/** Function table for the VPOR instruction */
698IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpor = { iemAImpl_vpor_u128, iemAImpl_vpor_u256 };
699/** Function table for the VPXOR instruction */
700IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpxor = { iemAImpl_vpxor_u128, iemAImpl_vpxor_u256 };
701# endif
702
703/** Function table for the VPAND instruction, software fallback. */
704IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpand_fallback = { iemAImpl_vpand_u128_fallback, iemAImpl_vpand_u256_fallback };
705/** Function table for the VPANDN instruction, software fallback. */
706IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpandn_fallback= { iemAImpl_vpandn_u128_fallback, iemAImpl_vpandn_u256_fallback };
707/** Function table for the VPOR instruction, software fallback. */
708IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpor_fallback = { iemAImpl_vpor_u128_fallback, iemAImpl_vpor_u256_fallback };
709/** Function table for the VPXOR instruction, software fallback. */
710IEM_STATIC const IEMOPMEDIAOPTF3 g_iemAImpl_vpxor_fallback = { iemAImpl_vpxor_u128_fallback, iemAImpl_vpxor_u256_fallback };
711
712#endif /* !TST_IEM_CHECK_MC */
713
714
715
716#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_ONE_BYTE_TABLE) || defined(IEM_WITH_TWO_BYTE_TABLE)
717/** Opcodes 0xf1, 0xd6. */
718FNIEMOP_DEF(iemOp_Invalid)
719{
720 IEMOP_MNEMONIC(Invalid, "Invalid");
721 IEMOP_RAISE_INVALID_OPCODE_RET();
722}
723#endif
724
725
726#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE) || defined(IEM_WITH_VEX_TABLES)
727/** Invalid with RM byte . */
728FNIEMOPRM_DEF(iemOp_InvalidWithRM)
729{
730 RT_NOREF_PV(bRm);
731 IEMOP_MNEMONIC(InvalidWithRm, "InvalidWithRM");
732 IEMOP_RAISE_INVALID_OPCODE_RET();
733}
734#endif
735
736
737#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE)
738/** Invalid with RM byte where intel decodes any additional address encoding
739 * bytes. */
740FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedDecode)
741{
742 IEMOP_MNEMONIC(InvalidWithRMNeedDecode, "InvalidWithRMNeedDecode");
743 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL && IEM_IS_MODRM_MEM_MODE(bRm))
744 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
745 IEMOP_HLP_DONE_DECODING();
746 IEMOP_RAISE_INVALID_OPCODE_RET();
747}
748#endif
749
750
751#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE)
752/** Invalid with RM byte where both AMD and Intel decodes any additional
753 * address encoding bytes. */
754FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeeded)
755{
756 IEMOP_MNEMONIC(InvalidWithRMAllNeeded, "InvalidWithRMAllNeeded");
757 if (IEM_IS_MODRM_MEM_MODE(bRm))
758 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
759 IEMOP_HLP_DONE_DECODING();
760 IEMOP_RAISE_INVALID_OPCODE_RET();
761}
762#endif
763
764
765#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE) || defined(IEM_WITH_VEX_TABLES)
766/** Invalid with RM byte where intel requires 8-byte immediate.
767 * Intel will also need SIB and displacement if bRm indicates memory. */
768FNIEMOPRM_DEF(iemOp_InvalidWithRMNeedImm8)
769{
770 IEMOP_MNEMONIC(InvalidWithRMNeedImm8, "InvalidWithRMNeedImm8");
771 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
772 {
773 if (IEM_IS_MODRM_MEM_MODE(bRm))
774 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
775 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bImm8);
776 }
777 IEMOP_HLP_DONE_DECODING();
778 IEMOP_RAISE_INVALID_OPCODE_RET();
779}
780#endif
781
782
783#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE)
784/** Invalid with RM byte where intel requires 8-byte immediate.
785 * Both AMD and Intel also needs SIB and displacement according to bRm. */
786FNIEMOPRM_DEF(iemOp_InvalidWithRMAllNeedImm8)
787{
788 IEMOP_MNEMONIC(InvalidWithRMAllNeedImm8, "InvalidWithRMAllNeedImm8");
789 if (IEM_IS_MODRM_MEM_MODE(bRm))
790 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
791 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); RT_NOREF(bImm8);
792 IEMOP_HLP_DONE_DECODING();
793 IEMOP_RAISE_INVALID_OPCODE_RET();
794}
795#endif
796
797
798#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE) || defined(IEM_WITH_THREE_BYTE_TABLES) || defined(IEM_WITH_VEX_TABLES)
799/** Invalid opcode where intel requires Mod R/M sequence. */
800FNIEMOP_DEF(iemOp_InvalidNeedRM)
801{
802 IEMOP_MNEMONIC(InvalidNeedRM, "InvalidNeedRM");
803 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
804 {
805 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
806 if (IEM_IS_MODRM_MEM_MODE(bRm))
807 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
808 }
809 IEMOP_HLP_DONE_DECODING();
810 IEMOP_RAISE_INVALID_OPCODE_RET();
811}
812#endif
813
814
815#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_ONE_BYTE_TABLE)
816/** Invalid opcode where both AMD and Intel requires Mod R/M sequence. */
817FNIEMOP_DEF(iemOp_InvalidAllNeedRM)
818{
819 IEMOP_MNEMONIC(InvalidAllNeedRM, "InvalidAllNeedRM");
820 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
821 if (IEM_IS_MODRM_MEM_MODE(bRm))
822 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
823 IEMOP_HLP_DONE_DECODING();
824 IEMOP_RAISE_INVALID_OPCODE_RET();
825}
826#endif
827
828
829#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE) || defined(IEM_WITH_THREE_BYTE_TABLES) || defined(IEM_WITH_VEX_TABLES)
830/** Invalid opcode where intel requires Mod R/M sequence and 8-byte
831 * immediate. */
832FNIEMOP_DEF(iemOp_InvalidNeedRMImm8)
833{
834 IEMOP_MNEMONIC(InvalidNeedRMImm8, "InvalidNeedRMImm8");
835 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
836 {
837 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
838 if (IEM_IS_MODRM_MEM_MODE(bRm))
839 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
840 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
841 }
842 IEMOP_HLP_DONE_DECODING();
843 IEMOP_RAISE_INVALID_OPCODE_RET();
844}
845#endif
846
847
848#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE)
849/** Invalid opcode where intel requires a 3rd escape byte and a Mod R/M
850 * sequence. */
851FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRM)
852{
853 IEMOP_MNEMONIC(InvalidNeed3ByteEscRM, "InvalidNeed3ByteEscRM");
854 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
855 {
856 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
857 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
858 if (IEM_IS_MODRM_MEM_MODE(bRm))
859 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
860 }
861 IEMOP_HLP_DONE_DECODING();
862 IEMOP_RAISE_INVALID_OPCODE_RET();
863}
864#endif
865
866
867#if defined(TST_IEM_CHECK_MC) || defined(IEM_WITH_TWO_BYTE_TABLE)
868/** Invalid opcode where intel requires a 3rd escape byte, Mod R/M sequence, and
869 * a 8-byte immediate. */
870FNIEMOP_DEF(iemOp_InvalidNeed3ByteEscRMImm8)
871{
872 IEMOP_MNEMONIC(InvalidNeed3ByteEscRMImm8, "InvalidNeed3ByteEscRMImm8");
873 if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
874 {
875 uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
876 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
877 if (IEM_IS_MODRM_MEM_MODE(bRm))
878 IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
879 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
880 }
881 IEMOP_HLP_DONE_DECODING();
882 IEMOP_RAISE_INVALID_OPCODE_RET();
883}
884#endif
885
886#if defined(IEM_WITH_ONE_BYTE_TABLE) || defined(IEM_WITH_TWO_BYTE_TABLE)
887
888/**
889 * Common 'push segment-register' helper.
890 */
891FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
892{
893 Assert(iReg >= X86_SREG_FS || !IEM_IS_64BIT_CODE(pVCpu));
894 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
895
896 switch (pVCpu->iem.s.enmEffOpSize)
897 {
898 case IEMMODE_16BIT:
899 IEM_MC_BEGIN(0, 0);
900 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
901 IEM_MC_LOCAL(uint16_t, u16Value);
902 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
903 IEM_MC_PUSH_U16(u16Value);
904 IEM_MC_ADVANCE_RIP_AND_FINISH();
905 IEM_MC_END();
906 break;
907
908 case IEMMODE_32BIT:
909 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0);
910 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
911 IEM_MC_LOCAL(uint32_t, u32Value);
912 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
913 IEM_MC_PUSH_U32_SREG(u32Value); /* Intel CPUs do funny things with this instruction. */
914 IEM_MC_ADVANCE_RIP_AND_FINISH();
915 IEM_MC_END();
916 break;
917
918 case IEMMODE_64BIT:
919 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0);
920 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
921 IEM_MC_LOCAL(uint64_t, u64Value);
922 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
923 IEM_MC_PUSH_U64(u64Value);
924 IEM_MC_ADVANCE_RIP_AND_FINISH();
925 IEM_MC_END();
926 break;
927
928 IEM_NOT_REACHED_DEFAULT_CASE_RET();
929 }
930}
931
932
933FNIEMOP_DEF_2(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg, uint8_t, bRm)
934{
935 Assert(IEM_IS_MODRM_MEM_MODE(bRm)); /* Caller checks this */
936 uint8_t const iGReg = IEM_GET_MODRM_REG(pVCpu, bRm);
937
938 switch (pVCpu->iem.s.enmEffOpSize)
939 {
940 case IEMMODE_16BIT:
941 if (iSegReg >= X86_SREG_FS || !IEM_IS_32BIT_CODE(pVCpu)) /* IEM_CIMPL_F_XXX flag are combined for whole MC block, */
942 { /* thus the duplication. */
943 IEM_MC_BEGIN(0, 0);
944 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
945 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
946 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
947 IEM_MC_ARG(uint16_t, offSeg, 1);
948 IEM_MC_FETCH_MEM_U16(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff); /** @todo check memory access pattern */
949 IEM_MC_ARG(uint16_t, uSel, 0);
950 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 2);
951 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
952 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
953 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4);
954 IEM_MC_CALL_CIMPL_5( 0,
955 RT_BIT_64(kIemNativeGstReg_GprFirst + iGReg)
956 | RT_BIT_64(kIemNativeGstReg_SegSelFirst + iSegReg)
957 | RT_BIT_64(kIemNativeGstReg_SegBaseFirst + iSegReg)
958 | RT_BIT_64(kIemNativeGstReg_SegLimitFirst + iSegReg)
959 | RT_BIT_64(kIemNativeGstReg_SegAttribFirst + iSegReg),
960 iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
961 IEM_MC_END();
962 }
963 else
964 {
965 IEM_MC_BEGIN(0, 0);
966 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
967 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
968 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
969 IEM_MC_ARG(uint16_t, offSeg, 1);
970 IEM_MC_FETCH_MEM_U16(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff); /** @todo check memory access pattern */
971 IEM_MC_ARG(uint16_t, uSel, 0);
972 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 2);
973 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
974 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
975 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4);
976 IEM_MC_CALL_CIMPL_5(IEM_CIMPL_F_MODE,
977 RT_BIT_64(kIemNativeGstReg_GprFirst + iGReg)
978 | RT_BIT_64(kIemNativeGstReg_SegSelFirst + iSegReg)
979 | RT_BIT_64(kIemNativeGstReg_SegBaseFirst + iSegReg)
980 | RT_BIT_64(kIemNativeGstReg_SegLimitFirst + iSegReg)
981 | RT_BIT_64(kIemNativeGstReg_SegAttribFirst + iSegReg),
982 iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
983 IEM_MC_END();
984 }
985
986 case IEMMODE_32BIT:
987 if (iSegReg >= X86_SREG_FS || !IEM_IS_32BIT_CODE(pVCpu))
988 {
989 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0);
990 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
991 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
992 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
993 IEM_MC_ARG(uint32_t, offSeg, 1); /** @todo check memory access pattern */
994 IEM_MC_FETCH_MEM_U32(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff);
995 IEM_MC_ARG(uint16_t, uSel, 0);
996 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 4);
997 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
998 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
999 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4);
1000 IEM_MC_CALL_CIMPL_5( 0,
1001 RT_BIT_64(kIemNativeGstReg_GprFirst + iGReg)
1002 | RT_BIT_64(kIemNativeGstReg_SegSelFirst + iSegReg)
1003 | RT_BIT_64(kIemNativeGstReg_SegBaseFirst + iSegReg)
1004 | RT_BIT_64(kIemNativeGstReg_SegLimitFirst + iSegReg)
1005 | RT_BIT_64(kIemNativeGstReg_SegAttribFirst + iSegReg),
1006 iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
1007 IEM_MC_END();
1008 }
1009 else
1010 {
1011 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0);
1012 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
1013 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
1014 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1015 IEM_MC_ARG(uint32_t, offSeg, 1); /** @todo check memory access pattern */
1016 IEM_MC_FETCH_MEM_U32(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff);
1017 IEM_MC_ARG(uint16_t, uSel, 0);
1018 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 4);
1019 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
1020 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
1021 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4);
1022 IEM_MC_CALL_CIMPL_5(IEM_CIMPL_F_MODE,
1023 RT_BIT_64(kIemNativeGstReg_GprFirst + iGReg)
1024 | RT_BIT_64(kIemNativeGstReg_SegSelFirst + iSegReg)
1025 | RT_BIT_64(kIemNativeGstReg_SegBaseFirst + iSegReg)
1026 | RT_BIT_64(kIemNativeGstReg_SegLimitFirst + iSegReg)
1027 | RT_BIT_64(kIemNativeGstReg_SegAttribFirst + iSegReg),
1028 iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
1029 IEM_MC_END();
1030 }
1031
1032 case IEMMODE_64BIT:
1033 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0);
1034 IEM_MC_ARG(uint16_t, uSel, 0);
1035 IEM_MC_ARG(uint64_t, offSeg, 1);
1036 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
1037 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
1038 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1039IEM_MC_NO_NATIVE_RECOMPILE(); /** @todo sort out the IEM_IS_GUEST_CPU_AMD stuff. */
1040 if (IEM_IS_GUEST_CPU_AMD(pVCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */
1041 IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff);
1042 else
1043 IEM_MC_FETCH_MEM_U64(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff);
1044 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 8);
1045 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
1046 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
1047 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4);
1048 IEM_MC_CALL_CIMPL_5(0,
1049 RT_BIT_64(kIemNativeGstReg_GprFirst + iGReg)
1050 | RT_BIT_64(kIemNativeGstReg_SegSelFirst + iSegReg)
1051 | RT_BIT_64(kIemNativeGstReg_SegBaseFirst + iSegReg)
1052 | RT_BIT_64(kIemNativeGstReg_SegLimitFirst + iSegReg)
1053 | RT_BIT_64(kIemNativeGstReg_SegAttribFirst + iSegReg),
1054 iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
1055 IEM_MC_END();
1056
1057 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1058 }
1059}
1060
1061#endif
1062
1063#endif /* !VMM_INCLUDED_SRC_VMMAll_IEMAllInstCommon_cpp_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette