VirtualBox

source: vbox/trunk/src/recompiler/target-i386/translate.c@ 54266

Last change on this file since 54266 was 46121, checked in by vboxsync, 11 years ago

REM: Undid error introduced by following Intel documentation.

  • Property svn:eol-style set to native
File size: 269.7 KB
Line 
1/*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#endif /* !VBOX */
37
38#include "cpu.h"
39#include "exec-all.h"
40#include "disas.h"
41#include "tcg-op.h"
42
43#include "helper.h"
44#define GEN_HELPER 1
45#include "helper.h"
46
47#define PREFIX_REPZ 0x01
48#define PREFIX_REPNZ 0x02
49#define PREFIX_LOCK 0x04
50#define PREFIX_DATA 0x08
51#define PREFIX_ADR 0x10
52
53#ifdef TARGET_X86_64
54#define X86_64_ONLY(x) x
55#define X86_64_DEF(...) __VA_ARGS__
56#define CODE64(s) ((s)->code64)
57#define REX_X(s) ((s)->rex_x)
58#define REX_B(s) ((s)->rex_b)
59# ifdef VBOX
60# define IS_LONG_MODE(s) ((s)->lma)
61# endif
62/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
63#if 1
64#define BUGGY_64(x) NULL
65#endif
66#else
67#define X86_64_ONLY(x) NULL
68#define X86_64_DEF(...)
69#define CODE64(s) 0
70#define REX_X(s) 0
71#define REX_B(s) 0
72# ifdef VBOX
73# define IS_LONG_MODE(s) 0
74# endif
75#endif
76
77//#define MACRO_TEST 1
78
79/* global register indexes */
80static TCGv_ptr cpu_env;
81static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
82static TCGv_i32 cpu_cc_op;
83static TCGv cpu_regs[CPU_NB_REGS];
84/* local temps */
85static TCGv cpu_T[2], cpu_T3;
86/* local register indexes (only used inside old micro ops) */
87static TCGv cpu_tmp0, cpu_tmp4;
88static TCGv_ptr cpu_ptr0, cpu_ptr1;
89static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
90static TCGv_i64 cpu_tmp1_i64;
91static TCGv cpu_tmp5;
92
93static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
94
95#include "gen-icount.h"
96
97#ifdef TARGET_X86_64
98static int x86_64_hregs;
99#endif
100
101#ifdef VBOX
102
103/* Special/override code readers to hide patched code. */
104
105uint8_t ldub_code_raw(target_ulong pc)
106{
107 uint8_t b;
108
109# ifdef VBOX_WITH_RAW_MODE
110 if (!remR3GetOpcode(cpu_single_env, pc, &b))
111# endif
112 b = ldub_code(pc);
113 return b;
114}
115# define ldub_code(a) ldub_code_raw(a)
116
117uint16_t lduw_code_raw(target_ulong pc)
118{
119 uint16_t u16;
120 u16 = (uint16_t)ldub_code_raw(pc);
121 u16 |= (uint16_t)ldub_code_raw(pc + 1) << 8;
122 return u16;
123}
124# define lduw_code(a) lduw_code_raw(a)
125
126
127uint32_t ldl_code_raw(target_ulong pc)
128{
129 uint32_t u32;
130 u32 = (uint32_t)ldub_code_raw(pc);
131 u32 |= (uint32_t)ldub_code_raw(pc + 1) << 8;
132 u32 |= (uint32_t)ldub_code_raw(pc + 2) << 16;
133 u32 |= (uint32_t)ldub_code_raw(pc + 3) << 24;
134 return u32;
135}
136# define ldl_code(a) ldl_code_raw(a)
137
138#endif /* VBOX */
139
140typedef struct DisasContext {
141 /* current insn context */
142 int override; /* -1 if no override */
143 int prefix;
144 int aflag, dflag;
145 target_ulong pc; /* pc = eip + cs_base */
146 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
147 static state change (stop translation) */
148 /* current block context */
149 target_ulong cs_base; /* base of CS segment */
150 int pe; /* protected mode */
151 int code32; /* 32 bit code segment */
152#ifdef TARGET_X86_64
153 int lma; /* long mode active */
154 int code64; /* 64 bit code segment */
155 int rex_x, rex_b;
156#endif
157 int ss32; /* 32 bit stack segment */
158 int cc_op; /* current CC operation */
159 int addseg; /* non zero if either DS/ES/SS have a non zero base */
160 int f_st; /* currently unused */
161 int vm86; /* vm86 mode */
162#ifdef VBOX
163 int vme; /* CR4.VME */
164 int pvi; /* CR4.PVI */
165 int record_call; /* record calls for CSAM or not? */
166#endif
167 int cpl;
168 int iopl;
169 int tf; /* TF cpu flag */
170 int singlestep_enabled; /* "hardware" single step enabled */
171 int jmp_opt; /* use direct block chaining for direct jumps */
172 int mem_index; /* select memory access functions */
173 uint64_t flags; /* all execution flags */
174 struct TranslationBlock *tb;
175 int popl_esp_hack; /* for correct popl with esp base handling */
176 int rip_offset; /* only used in x86_64, but left for simplicity */
177 int cpuid_features;
178 int cpuid_ext_features;
179 int cpuid_ext2_features;
180 int cpuid_ext3_features;
181} DisasContext;
182
183static void gen_eob(DisasContext *s);
184static void gen_jmp(DisasContext *s, target_ulong eip);
185static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
186
187#ifdef VBOX
188static void gen_check_external_event(void);
189#endif
190
191/* i386 arith/logic operations */
192enum {
193 OP_ADDL,
194 OP_ORL,
195 OP_ADCL,
196 OP_SBBL,
197 OP_ANDL,
198 OP_SUBL,
199 OP_XORL,
200 OP_CMPL,
201};
202
203/* i386 shift ops */
204enum {
205 OP_ROL,
206 OP_ROR,
207 OP_RCL,
208 OP_RCR,
209 OP_SHL,
210 OP_SHR,
211 OP_SHL1, /* undocumented */
212 OP_SAR = 7,
213};
214
215enum {
216 JCC_O,
217 JCC_B,
218 JCC_Z,
219 JCC_BE,
220 JCC_S,
221 JCC_P,
222 JCC_L,
223 JCC_LE,
224};
225
226/* operand size */
227enum {
228 OT_BYTE = 0,
229 OT_WORD,
230 OT_LONG,
231 OT_QUAD,
232};
233
234enum {
235 /* I386 int registers */
236 OR_EAX, /* MUST be even numbered */
237 OR_ECX,
238 OR_EDX,
239 OR_EBX,
240 OR_ESP,
241 OR_EBP,
242 OR_ESI,
243 OR_EDI,
244
245 OR_TMP0 = 16, /* temporary operand register */
246 OR_TMP1,
247 OR_A0, /* temporary register used when doing address evaluation */
248};
249
250static inline void gen_op_movl_T0_0(void)
251{
252 tcg_gen_movi_tl(cpu_T[0], 0);
253}
254
255static inline void gen_op_movl_T0_im(int32_t val)
256{
257 tcg_gen_movi_tl(cpu_T[0], val);
258}
259
260static inline void gen_op_movl_T0_imu(uint32_t val)
261{
262 tcg_gen_movi_tl(cpu_T[0], val);
263}
264
265static inline void gen_op_movl_T1_im(int32_t val)
266{
267 tcg_gen_movi_tl(cpu_T[1], val);
268}
269
270static inline void gen_op_movl_T1_imu(uint32_t val)
271{
272 tcg_gen_movi_tl(cpu_T[1], val);
273}
274
275static inline void gen_op_movl_A0_im(uint32_t val)
276{
277 tcg_gen_movi_tl(cpu_A0, val);
278}
279
280#ifdef TARGET_X86_64
281static inline void gen_op_movq_A0_im(int64_t val)
282{
283 tcg_gen_movi_tl(cpu_A0, val);
284}
285#endif
286
287static inline void gen_movtl_T0_im(target_ulong val)
288{
289 tcg_gen_movi_tl(cpu_T[0], val);
290}
291
292static inline void gen_movtl_T1_im(target_ulong val)
293{
294 tcg_gen_movi_tl(cpu_T[1], val);
295}
296
297static inline void gen_op_andl_T0_ffff(void)
298{
299 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
300}
301
302static inline void gen_op_andl_T0_im(uint32_t val)
303{
304 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
305}
306
307static inline void gen_op_movl_T0_T1(void)
308{
309 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
310}
311
312static inline void gen_op_andl_A0_ffff(void)
313{
314 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
315}
316
317#ifdef TARGET_X86_64
318
319#define NB_OP_SIZES 4
320
321#else /* !TARGET_X86_64 */
322
323#define NB_OP_SIZES 3
324
325#endif /* !TARGET_X86_64 */
326
327#if defined(HOST_WORDS_BIGENDIAN)
328#define REG_B_OFFSET (sizeof(target_ulong) - 1)
329#define REG_H_OFFSET (sizeof(target_ulong) - 2)
330#define REG_W_OFFSET (sizeof(target_ulong) - 2)
331#define REG_L_OFFSET (sizeof(target_ulong) - 4)
332#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
333#else
334#define REG_B_OFFSET 0
335#define REG_H_OFFSET 1
336#define REG_W_OFFSET 0
337#define REG_L_OFFSET 0
338#define REG_LH_OFFSET 4
339#endif
340
341static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
342{
343 TCGv tmp;
344
345 switch(ot) {
346 case OT_BYTE:
347 tmp = tcg_temp_new();
348 tcg_gen_ext8u_tl(tmp, t0);
349 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
350 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xff);
351 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
352 } else {
353 tcg_gen_shli_tl(tmp, tmp, 8);
354 tcg_gen_andi_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], ~0xff00);
355 tcg_gen_or_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], tmp);
356 }
357 tcg_temp_free(tmp);
358 break;
359 case OT_WORD:
360 tmp = tcg_temp_new();
361 tcg_gen_ext16u_tl(tmp, t0);
362 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
363 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
364 tcg_temp_free(tmp);
365 break;
366 default: /* XXX this shouldn't be reached; abort? */
367 case OT_LONG:
368 /* For x86_64, this sets the higher half of register to zero.
369 For i386, this is equivalent to a mov. */
370 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
371 break;
372#ifdef TARGET_X86_64
373 case OT_QUAD:
374 tcg_gen_mov_tl(cpu_regs[reg], t0);
375 break;
376#endif
377 }
378}
379
380static inline void gen_op_mov_reg_T0(int ot, int reg)
381{
382 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
383}
384
385static inline void gen_op_mov_reg_T1(int ot, int reg)
386{
387 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
388}
389
390static inline void gen_op_mov_reg_A0(int size, int reg)
391{
392 TCGv tmp;
393
394 switch(size) {
395 case 0:
396 tmp = tcg_temp_new();
397 tcg_gen_ext16u_tl(tmp, cpu_A0);
398 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
399 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
400 tcg_temp_free(tmp);
401 break;
402 default: /* XXX this shouldn't be reached; abort? */
403 case 1:
404 /* For x86_64, this sets the higher half of register to zero.
405 For i386, this is equivalent to a mov. */
406 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
407 break;
408#ifdef TARGET_X86_64
409 case 2:
410 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
411 break;
412#endif
413 }
414}
415
416static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
417{
418 switch(ot) {
419 case OT_BYTE:
420 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
421 goto std_case;
422 } else {
423 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
424 tcg_gen_ext8u_tl(t0, t0);
425 }
426 break;
427 default:
428 std_case:
429 tcg_gen_mov_tl(t0, cpu_regs[reg]);
430 break;
431 }
432}
433
434static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
435{
436 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
437}
438
439static inline void gen_op_movl_A0_reg(int reg)
440{
441 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
442}
443
444static inline void gen_op_addl_A0_im(int32_t val)
445{
446 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
447#ifdef TARGET_X86_64
448 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
449#endif
450}
451
452#ifdef TARGET_X86_64
453static inline void gen_op_addq_A0_im(int64_t val)
454{
455 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
456}
457#endif
458
459static void gen_add_A0_im(DisasContext *s, int val)
460{
461#ifdef TARGET_X86_64
462 if (CODE64(s))
463 gen_op_addq_A0_im(val);
464 else
465#endif
466 gen_op_addl_A0_im(val);
467}
468
469static inline void gen_op_addl_T0_T1(void)
470{
471 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
472}
473
474static inline void gen_op_jmp_T0(void)
475{
476 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
477}
478
479static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
480{
481 switch(size) {
482 case 0:
483 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
484 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
485 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
486 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
487 break;
488 case 1:
489 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
490 /* For x86_64, this sets the higher half of register to zero.
491 For i386, this is equivalent to a nop. */
492 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
493 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
494 break;
495#ifdef TARGET_X86_64
496 case 2:
497 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
498 break;
499#endif
500 }
501}
502
503static inline void gen_op_add_reg_T0(int size, int reg)
504{
505 switch(size) {
506 case 0:
507 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
508 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
509 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
510 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
511 break;
512 case 1:
513 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
514 /* For x86_64, this sets the higher half of register to zero.
515 For i386, this is equivalent to a nop. */
516 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
517 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
518 break;
519#ifdef TARGET_X86_64
520 case 2:
521 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
522 break;
523#endif
524 }
525}
526
527static inline void gen_op_set_cc_op(int32_t val)
528{
529 tcg_gen_movi_i32(cpu_cc_op, val);
530}
531
532static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
533{
534 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
535 if (shift != 0)
536 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
537 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
538 /* For x86_64, this sets the higher half of register to zero.
539 For i386, this is equivalent to a nop. */
540 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
541}
542
543#ifdef VBOX
544DECLINLINE(void) gen_op_seg_check(int reg, bool keepA0)
545{
546 /* It seems segments doesn't get out of sync - if they do in fact - enable below code. */
547# ifdef FORCE_SEGMENT_SYNC
548# if 1
549 TCGv t0;
550
551 /* Considering poor quality of TCG optimizer - better call directly */
552 t0 = tcg_temp_local_new(TCG_TYPE_TL);
553 tcg_gen_movi_tl(t0, reg);
554 tcg_gen_helper_0_1(helper_sync_seg, t0);
555 tcg_temp_free(t0);
556# else
557 /* Our segments could be outdated, thus check for newselector field to see if update really needed */
558 int skip_label;
559 TCGv t0, a0;
560
561 /* For other segments this check is waste of time, and also TCG is unable to cope with this code,
562 for data/stack segments, as expects alive cpu_T[0] */
563 if (reg != R_GS)
564 return;
565
566 if (keepA0)
567 {
568 /* we need to store old cpu_A0 */
569 a0 = tcg_temp_local_new(TCG_TYPE_TL);
570 tcg_gen_mov_tl(a0, cpu_A0);
571 }
572
573 skip_label = gen_new_label();
574 t0 = tcg_temp_local_new(TCG_TYPE_TL);
575
576 tcg_gen_ld32u_tl(t0, cpu_env, offsetof(CPUState, segs[reg].newselector) + REG_L_OFFSET);
577 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, skip_label);
578 tcg_gen_ld32u_tl(t0, cpu_env, offsetof(CPUState, eflags) + REG_L_OFFSET);
579 tcg_gen_andi_tl(t0, t0, VM_MASK);
580 tcg_gen_brcondi_i32(TCG_COND_NE, t0, 0, skip_label);
581 tcg_gen_movi_tl(t0, reg);
582
583 tcg_gen_helper_0_1(helper_sync_seg, t0);
584
585 tcg_temp_free(t0);
586
587 gen_set_label(skip_label);
588 if (keepA0)
589 {
590 tcg_gen_mov_tl(cpu_A0, a0);
591 tcg_temp_free(a0);
592 }
593# endif /* 0 */
594# endif /* FORCE_SEGMENT_SYNC */
595}
596#endif /* VBOX */
597
598static inline void gen_op_movl_A0_seg(int reg)
599{
600#ifdef VBOX
601 gen_op_seg_check(reg, false);
602#endif
603 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
604}
605
606static inline void gen_op_addl_A0_seg(int reg)
607{
608#ifdef VBOX
609 gen_op_seg_check(reg, true);
610#endif
611 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
612 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
613#ifdef TARGET_X86_64
614 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
615#endif
616}
617
618#ifdef TARGET_X86_64
619static inline void gen_op_movq_A0_seg(int reg)
620{
621#ifdef VBOX
622 gen_op_seg_check(reg, false);
623#endif
624 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
625}
626
627static inline void gen_op_addq_A0_seg(int reg)
628{
629#ifdef VBOX
630 gen_op_seg_check(reg, true);
631#endif
632 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
633 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
634}
635
636static inline void gen_op_movq_A0_reg(int reg)
637{
638 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
639}
640
641static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
642{
643 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
644 if (shift != 0)
645 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
646 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
647}
648#endif
649
650static inline void gen_op_lds_T0_A0(int idx)
651{
652 int mem_index = (idx >> 2) - 1;
653 switch(idx & 3) {
654 case 0:
655 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
656 break;
657 case 1:
658 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
659 break;
660 default:
661 case 2:
662 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
663 break;
664 }
665}
666
667static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
668{
669 int mem_index = (idx >> 2) - 1;
670 switch(idx & 3) {
671 case 0:
672 tcg_gen_qemu_ld8u(t0, a0, mem_index);
673 break;
674 case 1:
675 tcg_gen_qemu_ld16u(t0, a0, mem_index);
676 break;
677 case 2:
678 tcg_gen_qemu_ld32u(t0, a0, mem_index);
679 break;
680 default:
681 case 3:
682 /* Should never happen on 32-bit targets. */
683#ifdef TARGET_X86_64
684 tcg_gen_qemu_ld64(t0, a0, mem_index);
685#endif
686 break;
687 }
688}
689
690/* XXX: always use ldu or lds */
691static inline void gen_op_ld_T0_A0(int idx)
692{
693 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
694}
695
696static inline void gen_op_ldu_T0_A0(int idx)
697{
698 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
699}
700
701static inline void gen_op_ld_T1_A0(int idx)
702{
703 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
704}
705
706static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
707{
708 int mem_index = (idx >> 2) - 1;
709 switch(idx & 3) {
710 case 0:
711 tcg_gen_qemu_st8(t0, a0, mem_index);
712 break;
713 case 1:
714 tcg_gen_qemu_st16(t0, a0, mem_index);
715 break;
716 case 2:
717 tcg_gen_qemu_st32(t0, a0, mem_index);
718 break;
719 default:
720 case 3:
721 /* Should never happen on 32-bit targets. */
722#ifdef TARGET_X86_64
723 tcg_gen_qemu_st64(t0, a0, mem_index);
724#endif
725 break;
726 }
727}
728
729static inline void gen_op_st_T0_A0(int idx)
730{
731 gen_op_st_v(idx, cpu_T[0], cpu_A0);
732}
733
734static inline void gen_op_st_T1_A0(int idx)
735{
736 gen_op_st_v(idx, cpu_T[1], cpu_A0);
737}
738
739#ifdef VBOX
740
741static void gen_check_external_event(void)
742{
743# if 1
744 /** @todo: once TCG codegen improves, we may want to use version
745 from else version */
746 gen_helper_check_external_event();
747# else
748 int skip_label;
749 TCGv t0;
750
751 skip_label = gen_new_label();
752 t0 = tcg_temp_local_new(TCG_TYPE_TL);
753 /* t0 = cpu_tmp0; */
754
755 tcg_gen_ld32u_tl(t0, cpu_env, offsetof(CPUState, interrupt_request));
756 /* Keep in sync with helper_check_external_event() */
757 tcg_gen_andi_tl(t0, t0,
758 CPU_INTERRUPT_EXTERNAL_EXIT
759 | CPU_INTERRUPT_EXTERNAL_TIMER
760 | CPU_INTERRUPT_EXTERNAL_DMA
761 | CPU_INTERRUPT_EXTERNAL_HARD);
762 /** @todo: predict branch as taken */
763 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, skip_label);
764 tcg_temp_free(t0);
765
766 gen_helper_check_external_event();
767
768 gen_set_label(skip_label);
769# endif
770}
771
772#endif /* VBOX */
773
774static inline void gen_jmp_im(target_ulong pc)
775{
776 tcg_gen_movi_tl(cpu_tmp0, pc);
777 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
778}
779
780#ifdef VBOX
781DECLINLINE(void) gen_update_eip(target_ulong pc)
782{
783 gen_jmp_im(pc);
784# ifdef VBOX_DUMP_STATE
785 gen_helper_dump_state();
786# endif
787}
788#endif /* VBOX */
789
790static inline void gen_string_movl_A0_ESI(DisasContext *s)
791{
792 int override;
793
794 override = s->override;
795#ifdef TARGET_X86_64
796 if (s->aflag == 2) {
797 if (override >= 0) {
798 gen_op_movq_A0_seg(override);
799 gen_op_addq_A0_reg_sN(0, R_ESI);
800 } else {
801 gen_op_movq_A0_reg(R_ESI);
802 }
803 } else
804#endif
805 if (s->aflag) {
806 /* 32 bit address */
807 if (s->addseg && override < 0)
808 override = R_DS;
809 if (override >= 0) {
810 gen_op_movl_A0_seg(override);
811 gen_op_addl_A0_reg_sN(0, R_ESI);
812 } else {
813 gen_op_movl_A0_reg(R_ESI);
814 }
815 } else {
816 /* 16 address, always override */
817 if (override < 0)
818 override = R_DS;
819 gen_op_movl_A0_reg(R_ESI);
820 gen_op_andl_A0_ffff();
821 gen_op_addl_A0_seg(override);
822 }
823}
824
825static inline void gen_string_movl_A0_EDI(DisasContext *s)
826{
827#ifdef TARGET_X86_64
828 if (s->aflag == 2) {
829 gen_op_movq_A0_reg(R_EDI);
830 } else
831#endif
832 if (s->aflag) {
833 if (s->addseg) {
834 gen_op_movl_A0_seg(R_ES);
835 gen_op_addl_A0_reg_sN(0, R_EDI);
836 } else {
837 gen_op_movl_A0_reg(R_EDI);
838 }
839 } else {
840 gen_op_movl_A0_reg(R_EDI);
841 gen_op_andl_A0_ffff();
842 gen_op_addl_A0_seg(R_ES);
843 }
844}
845
846static inline void gen_op_movl_T0_Dshift(int ot)
847{
848 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
849 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
850};
851
852static void gen_extu(int ot, TCGv reg)
853{
854 switch(ot) {
855 case OT_BYTE:
856 tcg_gen_ext8u_tl(reg, reg);
857 break;
858 case OT_WORD:
859 tcg_gen_ext16u_tl(reg, reg);
860 break;
861 case OT_LONG:
862 tcg_gen_ext32u_tl(reg, reg);
863 break;
864 default:
865 break;
866 }
867}
868
869static void gen_exts(int ot, TCGv reg)
870{
871 switch(ot) {
872 case OT_BYTE:
873 tcg_gen_ext8s_tl(reg, reg);
874 break;
875 case OT_WORD:
876 tcg_gen_ext16s_tl(reg, reg);
877 break;
878 case OT_LONG:
879 tcg_gen_ext32s_tl(reg, reg);
880 break;
881 default:
882 break;
883 }
884}
885
886static inline void gen_op_jnz_ecx(int size, int label1)
887{
888 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
889 gen_extu(size + 1, cpu_tmp0);
890 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
891}
892
893static inline void gen_op_jz_ecx(int size, int label1)
894{
895 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
896 gen_extu(size + 1, cpu_tmp0);
897 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
898}
899
900static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
901{
902 switch (ot) {
903 case 0: gen_helper_inb(v, n); break;
904 case 1: gen_helper_inw(v, n); break;
905 case 2: gen_helper_inl(v, n); break;
906 }
907
908}
909
910static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
911{
912 switch (ot) {
913 case 0: gen_helper_outb(v, n); break;
914 case 1: gen_helper_outw(v, n); break;
915 case 2: gen_helper_outl(v, n); break;
916 }
917
918}
919
920static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
921 uint32_t svm_flags)
922{
923 int state_saved;
924 target_ulong next_eip;
925
926 state_saved = 0;
927 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
928 if (s->cc_op != CC_OP_DYNAMIC)
929 gen_op_set_cc_op(s->cc_op);
930 gen_jmp_im(cur_eip);
931 state_saved = 1;
932 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
933 switch (ot) {
934 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
935 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
936 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
937 }
938 }
939 if(s->flags & HF_SVMI_MASK) {
940 if (!state_saved) {
941 if (s->cc_op != CC_OP_DYNAMIC)
942 gen_op_set_cc_op(s->cc_op);
943 gen_jmp_im(cur_eip);
944 }
945 svm_flags |= (1 << (4 + ot));
946 next_eip = s->pc - s->cs_base;
947 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
948 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
949 tcg_const_i32(next_eip - cur_eip));
950 }
951}
952
953static inline void gen_movs(DisasContext *s, int ot)
954{
955 gen_string_movl_A0_ESI(s);
956 gen_op_ld_T0_A0(ot + s->mem_index);
957 gen_string_movl_A0_EDI(s);
958 gen_op_st_T0_A0(ot + s->mem_index);
959 gen_op_movl_T0_Dshift(ot);
960 gen_op_add_reg_T0(s->aflag, R_ESI);
961 gen_op_add_reg_T0(s->aflag, R_EDI);
962}
963
964static inline void gen_update_cc_op(DisasContext *s)
965{
966 if (s->cc_op != CC_OP_DYNAMIC) {
967 gen_op_set_cc_op(s->cc_op);
968 s->cc_op = CC_OP_DYNAMIC;
969 }
970}
971
972static void gen_op_update1_cc(void)
973{
974 tcg_gen_discard_tl(cpu_cc_src);
975 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
976}
977
978static void gen_op_update2_cc(void)
979{
980 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
981 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
982}
983
984static inline void gen_op_cmpl_T0_T1_cc(void)
985{
986 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
987 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
988}
989
990static inline void gen_op_testl_T0_T1_cc(void)
991{
992 tcg_gen_discard_tl(cpu_cc_src);
993 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
994}
995
996static void gen_op_update_neg_cc(void)
997{
998 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
999 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1000}
1001
1002/* compute eflags.C to reg */
1003static void gen_compute_eflags_c(TCGv reg)
1004{
1005 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
1006 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
1007}
1008
1009/* compute all eflags to cc_src */
1010static void gen_compute_eflags(TCGv reg)
1011{
1012 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
1013 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
1014}
1015
1016static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
1017{
1018 if (s->cc_op != CC_OP_DYNAMIC)
1019 gen_op_set_cc_op(s->cc_op);
1020 switch(jcc_op) {
1021 case JCC_O:
1022 gen_compute_eflags(cpu_T[0]);
1023 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
1024 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1025 break;
1026 case JCC_B:
1027 gen_compute_eflags_c(cpu_T[0]);
1028 break;
1029 case JCC_Z:
1030 gen_compute_eflags(cpu_T[0]);
1031 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
1032 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1033 break;
1034 case JCC_BE:
1035 gen_compute_eflags(cpu_tmp0);
1036 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
1037 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1038 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1039 break;
1040 case JCC_S:
1041 gen_compute_eflags(cpu_T[0]);
1042 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
1043 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1044 break;
1045 case JCC_P:
1046 gen_compute_eflags(cpu_T[0]);
1047 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
1048 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1049 break;
1050 case JCC_L:
1051 gen_compute_eflags(cpu_tmp0);
1052 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
1053 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
1054 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1055 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1056 break;
1057 default:
1058 case JCC_LE:
1059 gen_compute_eflags(cpu_tmp0);
1060 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
1061 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
1062 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
1063 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1064 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1065 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1066 break;
1067 }
1068}
1069
1070/* return true if setcc_slow is not needed (WARNING: must be kept in
1071 sync with gen_jcc1) */
1072static int is_fast_jcc_case(DisasContext *s, int b)
1073{
1074 int jcc_op;
1075 jcc_op = (b >> 1) & 7;
1076 switch(s->cc_op) {
1077 /* we optimize the cmp/jcc case */
1078 case CC_OP_SUBB:
1079 case CC_OP_SUBW:
1080 case CC_OP_SUBL:
1081 case CC_OP_SUBQ:
1082 if (jcc_op == JCC_O || jcc_op == JCC_P)
1083 goto slow_jcc;
1084 break;
1085
1086 /* some jumps are easy to compute */
1087 case CC_OP_ADDB:
1088 case CC_OP_ADDW:
1089 case CC_OP_ADDL:
1090 case CC_OP_ADDQ:
1091
1092 case CC_OP_LOGICB:
1093 case CC_OP_LOGICW:
1094 case CC_OP_LOGICL:
1095 case CC_OP_LOGICQ:
1096
1097 case CC_OP_INCB:
1098 case CC_OP_INCW:
1099 case CC_OP_INCL:
1100 case CC_OP_INCQ:
1101
1102 case CC_OP_DECB:
1103 case CC_OP_DECW:
1104 case CC_OP_DECL:
1105 case CC_OP_DECQ:
1106
1107 case CC_OP_SHLB:
1108 case CC_OP_SHLW:
1109 case CC_OP_SHLL:
1110 case CC_OP_SHLQ:
1111 if (jcc_op != JCC_Z && jcc_op != JCC_S)
1112 goto slow_jcc;
1113 break;
1114 default:
1115 slow_jcc:
1116 return 0;
1117 }
1118 return 1;
1119}
1120
1121/* generate a conditional jump to label 'l1' according to jump opcode
1122 value 'b'. In the fast case, T0 is guaranted not to be used. */
1123static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
1124{
1125 int inv, jcc_op, size, cond;
1126 TCGv t0;
1127
1128 inv = b & 1;
1129 jcc_op = (b >> 1) & 7;
1130
1131 switch(cc_op) {
1132 /* we optimize the cmp/jcc case */
1133 case CC_OP_SUBB:
1134 case CC_OP_SUBW:
1135 case CC_OP_SUBL:
1136 case CC_OP_SUBQ:
1137
1138 size = cc_op - CC_OP_SUBB;
1139 switch(jcc_op) {
1140 case JCC_Z:
1141 fast_jcc_z:
1142 switch(size) {
1143 case 0:
1144 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
1145 t0 = cpu_tmp0;
1146 break;
1147 case 1:
1148 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
1149 t0 = cpu_tmp0;
1150 break;
1151#ifdef TARGET_X86_64
1152 case 2:
1153 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
1154 t0 = cpu_tmp0;
1155 break;
1156#endif
1157 default:
1158 t0 = cpu_cc_dst;
1159 break;
1160 }
1161 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
1162 break;
1163 case JCC_S:
1164 fast_jcc_s:
1165 switch(size) {
1166 case 0:
1167 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
1168 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1169 0, l1);
1170 break;
1171 case 1:
1172 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
1173 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1174 0, l1);
1175 break;
1176#ifdef TARGET_X86_64
1177 case 2:
1178 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
1179 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1180 0, l1);
1181 break;
1182#endif
1183 default:
1184 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1185 0, l1);
1186 break;
1187 }
1188 break;
1189
1190 case JCC_B:
1191 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1192 goto fast_jcc_b;
1193 case JCC_BE:
1194 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1195 fast_jcc_b:
1196 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1197 switch(size) {
1198 case 0:
1199 t0 = cpu_tmp0;
1200 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1201 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1202 break;
1203 case 1:
1204 t0 = cpu_tmp0;
1205 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1206 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1207 break;
1208#ifdef TARGET_X86_64
1209 case 2:
1210 t0 = cpu_tmp0;
1211 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1212 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1213 break;
1214#endif
1215 default:
1216 t0 = cpu_cc_src;
1217 break;
1218 }
1219 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1220 break;
1221
1222 case JCC_L:
1223 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1224 goto fast_jcc_l;
1225 case JCC_LE:
1226 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1227 fast_jcc_l:
1228 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1229 switch(size) {
1230 case 0:
1231 t0 = cpu_tmp0;
1232 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1233 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1234 break;
1235 case 1:
1236 t0 = cpu_tmp0;
1237 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1238 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1239 break;
1240#ifdef TARGET_X86_64
1241 case 2:
1242 t0 = cpu_tmp0;
1243 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1244 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1245 break;
1246#endif
1247 default:
1248 t0 = cpu_cc_src;
1249 break;
1250 }
1251 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1252 break;
1253
1254 default:
1255 goto slow_jcc;
1256 }
1257 break;
1258
1259 /* some jumps are easy to compute */
1260 case CC_OP_ADDB:
1261 case CC_OP_ADDW:
1262 case CC_OP_ADDL:
1263 case CC_OP_ADDQ:
1264
1265 case CC_OP_ADCB:
1266 case CC_OP_ADCW:
1267 case CC_OP_ADCL:
1268 case CC_OP_ADCQ:
1269
1270 case CC_OP_SBBB:
1271 case CC_OP_SBBW:
1272 case CC_OP_SBBL:
1273 case CC_OP_SBBQ:
1274
1275 case CC_OP_LOGICB:
1276 case CC_OP_LOGICW:
1277 case CC_OP_LOGICL:
1278 case CC_OP_LOGICQ:
1279
1280 case CC_OP_INCB:
1281 case CC_OP_INCW:
1282 case CC_OP_INCL:
1283 case CC_OP_INCQ:
1284
1285 case CC_OP_DECB:
1286 case CC_OP_DECW:
1287 case CC_OP_DECL:
1288 case CC_OP_DECQ:
1289
1290 case CC_OP_SHLB:
1291 case CC_OP_SHLW:
1292 case CC_OP_SHLL:
1293 case CC_OP_SHLQ:
1294
1295 case CC_OP_SARB:
1296 case CC_OP_SARW:
1297 case CC_OP_SARL:
1298 case CC_OP_SARQ:
1299 switch(jcc_op) {
1300 case JCC_Z:
1301 size = (cc_op - CC_OP_ADDB) & 3;
1302 goto fast_jcc_z;
1303 case JCC_S:
1304 size = (cc_op - CC_OP_ADDB) & 3;
1305 goto fast_jcc_s;
1306 default:
1307 goto slow_jcc;
1308 }
1309 break;
1310 default:
1311 slow_jcc:
1312 gen_setcc_slow_T0(s, jcc_op);
1313 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1314 cpu_T[0], 0, l1);
1315 break;
1316 }
1317}
1318
1319/* XXX: does not work with gdbstub "ice" single step - not a
1320 serious problem */
1321static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1322{
1323 int l1, l2;
1324
1325 l1 = gen_new_label();
1326 l2 = gen_new_label();
1327 gen_op_jnz_ecx(s->aflag, l1);
1328 gen_set_label(l2);
1329 gen_jmp_tb(s, next_eip, 1);
1330 gen_set_label(l1);
1331 return l2;
1332}
1333
1334static inline void gen_stos(DisasContext *s, int ot)
1335{
1336 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1337 gen_string_movl_A0_EDI(s);
1338 gen_op_st_T0_A0(ot + s->mem_index);
1339 gen_op_movl_T0_Dshift(ot);
1340 gen_op_add_reg_T0(s->aflag, R_EDI);
1341}
1342
1343static inline void gen_lods(DisasContext *s, int ot)
1344{
1345 gen_string_movl_A0_ESI(s);
1346 gen_op_ld_T0_A0(ot + s->mem_index);
1347 gen_op_mov_reg_T0(ot, R_EAX);
1348 gen_op_movl_T0_Dshift(ot);
1349 gen_op_add_reg_T0(s->aflag, R_ESI);
1350}
1351
1352static inline void gen_scas(DisasContext *s, int ot)
1353{
1354 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1355 gen_string_movl_A0_EDI(s);
1356 gen_op_ld_T1_A0(ot + s->mem_index);
1357 gen_op_cmpl_T0_T1_cc();
1358 gen_op_movl_T0_Dshift(ot);
1359 gen_op_add_reg_T0(s->aflag, R_EDI);
1360}
1361
1362static inline void gen_cmps(DisasContext *s, int ot)
1363{
1364 gen_string_movl_A0_ESI(s);
1365 gen_op_ld_T0_A0(ot + s->mem_index);
1366 gen_string_movl_A0_EDI(s);
1367 gen_op_ld_T1_A0(ot + s->mem_index);
1368 gen_op_cmpl_T0_T1_cc();
1369 gen_op_movl_T0_Dshift(ot);
1370 gen_op_add_reg_T0(s->aflag, R_ESI);
1371 gen_op_add_reg_T0(s->aflag, R_EDI);
1372}
1373
1374static inline void gen_ins(DisasContext *s, int ot)
1375{
1376 if (use_icount)
1377 gen_io_start();
1378 gen_string_movl_A0_EDI(s);
1379 /* Note: we must do this dummy write first to be restartable in
1380 case of page fault. */
1381 gen_op_movl_T0_0();
1382 gen_op_st_T0_A0(ot + s->mem_index);
1383 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1384 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1385 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1386 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1387 gen_op_st_T0_A0(ot + s->mem_index);
1388 gen_op_movl_T0_Dshift(ot);
1389 gen_op_add_reg_T0(s->aflag, R_EDI);
1390 if (use_icount)
1391 gen_io_end();
1392}
1393
1394static inline void gen_outs(DisasContext *s, int ot)
1395{
1396 if (use_icount)
1397 gen_io_start();
1398 gen_string_movl_A0_ESI(s);
1399 gen_op_ld_T0_A0(ot + s->mem_index);
1400
1401 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1402 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1403 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1404 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1405 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1406
1407 gen_op_movl_T0_Dshift(ot);
1408 gen_op_add_reg_T0(s->aflag, R_ESI);
1409 if (use_icount)
1410 gen_io_end();
1411}
1412
1413/* same method as Valgrind : we generate jumps to current or next
1414 instruction */
1415#define GEN_REPZ(op) \
1416static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1417 target_ulong cur_eip, target_ulong next_eip) \
1418{ \
1419 int l2;\
1420 gen_update_cc_op(s); \
1421 l2 = gen_jz_ecx_string(s, next_eip); \
1422 gen_ ## op(s, ot); \
1423 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1424 /* a loop would cause two single step exceptions if ECX = 1 \
1425 before rep string_insn */ \
1426 if (!s->jmp_opt) \
1427 gen_op_jz_ecx(s->aflag, l2); \
1428 gen_jmp(s, cur_eip); \
1429}
1430
1431#define GEN_REPZ2(op) \
1432static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1433 target_ulong cur_eip, \
1434 target_ulong next_eip, \
1435 int nz) \
1436{ \
1437 int l2;\
1438 gen_update_cc_op(s); \
1439 l2 = gen_jz_ecx_string(s, next_eip); \
1440 gen_ ## op(s, ot); \
1441 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1442 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1443 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1444 if (!s->jmp_opt) \
1445 gen_op_jz_ecx(s->aflag, l2); \
1446 gen_jmp(s, cur_eip); \
1447}
1448
1449GEN_REPZ(movs)
1450GEN_REPZ(stos)
1451GEN_REPZ(lods)
1452GEN_REPZ(ins)
1453GEN_REPZ(outs)
1454GEN_REPZ2(scas)
1455GEN_REPZ2(cmps)
1456
1457static void gen_helper_fp_arith_ST0_FT0(int op)
1458{
1459 switch (op) {
1460 case 0: gen_helper_fadd_ST0_FT0(); break;
1461 case 1: gen_helper_fmul_ST0_FT0(); break;
1462 case 2: gen_helper_fcom_ST0_FT0(); break;
1463 case 3: gen_helper_fcom_ST0_FT0(); break;
1464 case 4: gen_helper_fsub_ST0_FT0(); break;
1465 case 5: gen_helper_fsubr_ST0_FT0(); break;
1466 case 6: gen_helper_fdiv_ST0_FT0(); break;
1467 case 7: gen_helper_fdivr_ST0_FT0(); break;
1468 }
1469}
1470
1471/* NOTE the exception in "r" op ordering */
1472static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1473{
1474 TCGv_i32 tmp = tcg_const_i32(opreg);
1475 switch (op) {
1476 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1477 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1478 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1479 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1480 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1481 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1482 }
1483}
1484
1485/* if d == OR_TMP0, it means memory operand (address in A0) */
1486static void gen_op(DisasContext *s1, int op, int ot, int d)
1487{
1488 if (d != OR_TMP0) {
1489 gen_op_mov_TN_reg(ot, 0, d);
1490 } else {
1491 gen_op_ld_T0_A0(ot + s1->mem_index);
1492 }
1493 switch(op) {
1494 case OP_ADCL:
1495 if (s1->cc_op != CC_OP_DYNAMIC)
1496 gen_op_set_cc_op(s1->cc_op);
1497 gen_compute_eflags_c(cpu_tmp4);
1498 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1499 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1500 if (d != OR_TMP0)
1501 gen_op_mov_reg_T0(ot, d);
1502 else
1503 gen_op_st_T0_A0(ot + s1->mem_index);
1504 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1505 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1506 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1507 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1508 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1509 s1->cc_op = CC_OP_DYNAMIC;
1510 break;
1511 case OP_SBBL:
1512 if (s1->cc_op != CC_OP_DYNAMIC)
1513 gen_op_set_cc_op(s1->cc_op);
1514 gen_compute_eflags_c(cpu_tmp4);
1515 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1516 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1517 if (d != OR_TMP0)
1518 gen_op_mov_reg_T0(ot, d);
1519 else
1520 gen_op_st_T0_A0(ot + s1->mem_index);
1521 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1522 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1523 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1524 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1525 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1526 s1->cc_op = CC_OP_DYNAMIC;
1527 break;
1528 case OP_ADDL:
1529 gen_op_addl_T0_T1();
1530 if (d != OR_TMP0)
1531 gen_op_mov_reg_T0(ot, d);
1532 else
1533 gen_op_st_T0_A0(ot + s1->mem_index);
1534 gen_op_update2_cc();
1535 s1->cc_op = CC_OP_ADDB + ot;
1536 break;
1537 case OP_SUBL:
1538 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1539 if (d != OR_TMP0)
1540 gen_op_mov_reg_T0(ot, d);
1541 else
1542 gen_op_st_T0_A0(ot + s1->mem_index);
1543 gen_op_update2_cc();
1544 s1->cc_op = CC_OP_SUBB + ot;
1545 break;
1546 default:
1547 case OP_ANDL:
1548 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1549 if (d != OR_TMP0)
1550 gen_op_mov_reg_T0(ot, d);
1551 else
1552 gen_op_st_T0_A0(ot + s1->mem_index);
1553 gen_op_update1_cc();
1554 s1->cc_op = CC_OP_LOGICB + ot;
1555 break;
1556 case OP_ORL:
1557 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1558 if (d != OR_TMP0)
1559 gen_op_mov_reg_T0(ot, d);
1560 else
1561 gen_op_st_T0_A0(ot + s1->mem_index);
1562 gen_op_update1_cc();
1563 s1->cc_op = CC_OP_LOGICB + ot;
1564 break;
1565 case OP_XORL:
1566 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1567 if (d != OR_TMP0)
1568 gen_op_mov_reg_T0(ot, d);
1569 else
1570 gen_op_st_T0_A0(ot + s1->mem_index);
1571 gen_op_update1_cc();
1572 s1->cc_op = CC_OP_LOGICB + ot;
1573 break;
1574 case OP_CMPL:
1575 gen_op_cmpl_T0_T1_cc();
1576 s1->cc_op = CC_OP_SUBB + ot;
1577 break;
1578 }
1579}
1580
1581/* if d == OR_TMP0, it means memory operand (address in A0) */
1582static void gen_inc(DisasContext *s1, int ot, int d, int c)
1583{
1584 if (d != OR_TMP0)
1585 gen_op_mov_TN_reg(ot, 0, d);
1586 else
1587 gen_op_ld_T0_A0(ot + s1->mem_index);
1588 if (s1->cc_op != CC_OP_DYNAMIC)
1589 gen_op_set_cc_op(s1->cc_op);
1590 if (c > 0) {
1591 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1592 s1->cc_op = CC_OP_INCB + ot;
1593 } else {
1594 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1595 s1->cc_op = CC_OP_DECB + ot;
1596 }
1597 if (d != OR_TMP0)
1598 gen_op_mov_reg_T0(ot, d);
1599 else
1600 gen_op_st_T0_A0(ot + s1->mem_index);
1601 gen_compute_eflags_c(cpu_cc_src);
1602 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1603}
1604
1605static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1606 int is_right, int is_arith)
1607{
1608 target_ulong mask;
1609 int shift_label;
1610 TCGv t0, t1;
1611
1612 if (ot == OT_QUAD)
1613 mask = 0x3f;
1614 else
1615 mask = 0x1f;
1616
1617 /* load */
1618 if (op1 == OR_TMP0)
1619 gen_op_ld_T0_A0(ot + s->mem_index);
1620 else
1621 gen_op_mov_TN_reg(ot, 0, op1);
1622
1623 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1624
1625 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1626
1627 if (is_right) {
1628 if (is_arith) {
1629 gen_exts(ot, cpu_T[0]);
1630 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1631 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1632 } else {
1633 gen_extu(ot, cpu_T[0]);
1634 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1635 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1636 }
1637 } else {
1638 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1639 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1640 }
1641
1642 /* store */
1643 if (op1 == OR_TMP0)
1644 gen_op_st_T0_A0(ot + s->mem_index);
1645 else
1646 gen_op_mov_reg_T0(ot, op1);
1647
1648 /* update eflags if non zero shift */
1649 if (s->cc_op != CC_OP_DYNAMIC)
1650 gen_op_set_cc_op(s->cc_op);
1651
1652 /* XXX: inefficient */
1653 t0 = tcg_temp_local_new();
1654 t1 = tcg_temp_local_new();
1655
1656 tcg_gen_mov_tl(t0, cpu_T[0]);
1657 tcg_gen_mov_tl(t1, cpu_T3);
1658
1659 shift_label = gen_new_label();
1660 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1661
1662 tcg_gen_mov_tl(cpu_cc_src, t1);
1663 tcg_gen_mov_tl(cpu_cc_dst, t0);
1664 if (is_right)
1665 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1666 else
1667 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1668
1669 gen_set_label(shift_label);
1670 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1671
1672 tcg_temp_free(t0);
1673 tcg_temp_free(t1);
1674}
1675
1676static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1677 int is_right, int is_arith)
1678{
1679 int mask;
1680
1681 if (ot == OT_QUAD)
1682 mask = 0x3f;
1683 else
1684 mask = 0x1f;
1685
1686 /* load */
1687 if (op1 == OR_TMP0)
1688 gen_op_ld_T0_A0(ot + s->mem_index);
1689 else
1690 gen_op_mov_TN_reg(ot, 0, op1);
1691
1692 op2 &= mask;
1693 if (op2 != 0) {
1694 if (is_right) {
1695 if (is_arith) {
1696 gen_exts(ot, cpu_T[0]);
1697 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1698 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1699 } else {
1700 gen_extu(ot, cpu_T[0]);
1701 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1702 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1703 }
1704 } else {
1705 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1706 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1707 }
1708 }
1709
1710 /* store */
1711 if (op1 == OR_TMP0)
1712 gen_op_st_T0_A0(ot + s->mem_index);
1713 else
1714 gen_op_mov_reg_T0(ot, op1);
1715
1716 /* update eflags if non zero shift */
1717 if (op2 != 0) {
1718 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1719 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1720 if (is_right)
1721 s->cc_op = CC_OP_SARB + ot;
1722 else
1723 s->cc_op = CC_OP_SHLB + ot;
1724 }
1725}
1726
1727static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1728{
1729 if (arg2 >= 0)
1730 tcg_gen_shli_tl(ret, arg1, arg2);
1731 else
1732 tcg_gen_shri_tl(ret, arg1, -arg2);
1733}
1734
1735static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1736 int is_right)
1737{
1738 target_ulong mask;
1739 int label1, label2, data_bits;
1740 TCGv t0, t1, t2, a0;
1741
1742 /* XXX: inefficient, but we must use local temps */
1743 t0 = tcg_temp_local_new();
1744 t1 = tcg_temp_local_new();
1745 t2 = tcg_temp_local_new();
1746 a0 = tcg_temp_local_new();
1747
1748 if (ot == OT_QUAD)
1749 mask = 0x3f;
1750 else
1751 mask = 0x1f;
1752
1753 /* load */
1754 if (op1 == OR_TMP0) {
1755 tcg_gen_mov_tl(a0, cpu_A0);
1756 gen_op_ld_v(ot + s->mem_index, t0, a0);
1757 } else {
1758 gen_op_mov_v_reg(ot, t0, op1);
1759 }
1760
1761 tcg_gen_mov_tl(t1, cpu_T[1]);
1762
1763 tcg_gen_andi_tl(t1, t1, mask);
1764
1765 /* Must test zero case to avoid using undefined behaviour in TCG
1766 shifts. */
1767 label1 = gen_new_label();
1768 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1769
1770 if (ot <= OT_WORD)
1771 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1772 else
1773 tcg_gen_mov_tl(cpu_tmp0, t1);
1774
1775 gen_extu(ot, t0);
1776 tcg_gen_mov_tl(t2, t0);
1777
1778 data_bits = 8 << ot;
1779 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1780 fix TCG definition) */
1781 if (is_right) {
1782 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1783 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1784 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1785 } else {
1786 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1787 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1788 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1789 }
1790 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1791
1792 gen_set_label(label1);
1793 /* store */
1794 if (op1 == OR_TMP0) {
1795 gen_op_st_v(ot + s->mem_index, t0, a0);
1796 } else {
1797 gen_op_mov_reg_v(ot, op1, t0);
1798 }
1799
1800 /* update eflags */
1801 if (s->cc_op != CC_OP_DYNAMIC)
1802 gen_op_set_cc_op(s->cc_op);
1803
1804 label2 = gen_new_label();
1805 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1806
1807 gen_compute_eflags(cpu_cc_src);
1808 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1809 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1810 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1811 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1812 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1813 if (is_right) {
1814 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1815 }
1816 tcg_gen_andi_tl(t0, t0, CC_C);
1817 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1818
1819 tcg_gen_discard_tl(cpu_cc_dst);
1820 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1821
1822 gen_set_label(label2);
1823 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1824
1825 tcg_temp_free(t0);
1826 tcg_temp_free(t1);
1827 tcg_temp_free(t2);
1828 tcg_temp_free(a0);
1829}
1830
1831static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1832 int is_right)
1833{
1834 int mask;
1835 int data_bits;
1836 TCGv t0, t1, a0;
1837
1838 /* XXX: inefficient, but we must use local temps */
1839 t0 = tcg_temp_local_new();
1840 t1 = tcg_temp_local_new();
1841 a0 = tcg_temp_local_new();
1842
1843 if (ot == OT_QUAD)
1844 mask = 0x3f;
1845 else
1846 mask = 0x1f;
1847
1848 /* load */
1849 if (op1 == OR_TMP0) {
1850 tcg_gen_mov_tl(a0, cpu_A0);
1851 gen_op_ld_v(ot + s->mem_index, t0, a0);
1852 } else {
1853 gen_op_mov_v_reg(ot, t0, op1);
1854 }
1855
1856 gen_extu(ot, t0);
1857 tcg_gen_mov_tl(t1, t0);
1858
1859 op2 &= mask;
1860 data_bits = 8 << ot;
1861 if (op2 != 0) {
1862 int shift = op2 & ((1 << (3 + ot)) - 1);
1863 if (is_right) {
1864 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1865 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1866 }
1867 else {
1868 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1869 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1870 }
1871 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1872 }
1873
1874 /* store */
1875 if (op1 == OR_TMP0) {
1876 gen_op_st_v(ot + s->mem_index, t0, a0);
1877 } else {
1878 gen_op_mov_reg_v(ot, op1, t0);
1879 }
1880
1881 if (op2 != 0) {
1882 /* update eflags */
1883 if (s->cc_op != CC_OP_DYNAMIC)
1884 gen_op_set_cc_op(s->cc_op);
1885
1886 gen_compute_eflags(cpu_cc_src);
1887 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1888 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1889 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1890 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1891 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1892 if (is_right) {
1893 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1894 }
1895 tcg_gen_andi_tl(t0, t0, CC_C);
1896 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1897
1898 tcg_gen_discard_tl(cpu_cc_dst);
1899 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1900 s->cc_op = CC_OP_EFLAGS;
1901 }
1902
1903 tcg_temp_free(t0);
1904 tcg_temp_free(t1);
1905 tcg_temp_free(a0);
1906}
1907
1908/* XXX: add faster immediate = 1 case */
1909static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1910 int is_right)
1911{
1912 int label1;
1913
1914 if (s->cc_op != CC_OP_DYNAMIC)
1915 gen_op_set_cc_op(s->cc_op);
1916
1917 /* load */
1918 if (op1 == OR_TMP0)
1919 gen_op_ld_T0_A0(ot + s->mem_index);
1920 else
1921 gen_op_mov_TN_reg(ot, 0, op1);
1922
1923 if (is_right) {
1924 switch (ot) {
1925 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1926 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1927 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1928#ifdef TARGET_X86_64
1929 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1930#endif
1931 }
1932 } else {
1933 switch (ot) {
1934 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1935 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1936 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1937#ifdef TARGET_X86_64
1938 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1939#endif
1940 }
1941 }
1942 /* store */
1943 if (op1 == OR_TMP0)
1944 gen_op_st_T0_A0(ot + s->mem_index);
1945 else
1946 gen_op_mov_reg_T0(ot, op1);
1947
1948 /* update eflags */
1949 label1 = gen_new_label();
1950 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1951
1952 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1953 tcg_gen_discard_tl(cpu_cc_dst);
1954 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1955
1956 gen_set_label(label1);
1957 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1958}
1959
1960/* XXX: add faster immediate case */
1961static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1962 int is_right)
1963{
1964 int label1, label2, data_bits;
1965 target_ulong mask;
1966 TCGv t0, t1, t2, a0;
1967
1968 t0 = tcg_temp_local_new();
1969 t1 = tcg_temp_local_new();
1970 t2 = tcg_temp_local_new();
1971 a0 = tcg_temp_local_new();
1972
1973 if (ot == OT_QUAD)
1974 mask = 0x3f;
1975 else
1976 mask = 0x1f;
1977
1978 /* load */
1979 if (op1 == OR_TMP0) {
1980 tcg_gen_mov_tl(a0, cpu_A0);
1981 gen_op_ld_v(ot + s->mem_index, t0, a0);
1982 } else {
1983 gen_op_mov_v_reg(ot, t0, op1);
1984 }
1985
1986 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1987
1988 tcg_gen_mov_tl(t1, cpu_T[1]);
1989 tcg_gen_mov_tl(t2, cpu_T3);
1990
1991 /* Must test zero case to avoid using undefined behaviour in TCG
1992 shifts. */
1993 label1 = gen_new_label();
1994 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1995
1996 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1997 if (ot == OT_WORD) {
1998 /* Note: we implement the Intel behaviour for shift count > 16 */
1999 if (is_right) {
2000 tcg_gen_andi_tl(t0, t0, 0xffff);
2001 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
2002 tcg_gen_or_tl(t0, t0, cpu_tmp0);
2003 tcg_gen_ext32u_tl(t0, t0);
2004
2005 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
2006
2007 /* only needed if count > 16, but a test would complicate */
2008 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
2009 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
2010
2011 tcg_gen_shr_tl(t0, t0, t2);
2012
2013 tcg_gen_or_tl(t0, t0, cpu_tmp0);
2014 } else {
2015 /* XXX: not optimal */
2016 tcg_gen_andi_tl(t0, t0, 0xffff);
2017 tcg_gen_shli_tl(t1, t1, 16);
2018 tcg_gen_or_tl(t1, t1, t0);
2019 tcg_gen_ext32u_tl(t1, t1);
2020
2021 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
2022 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
2023 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
2024 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
2025
2026 tcg_gen_shl_tl(t0, t0, t2);
2027 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
2028 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
2029 tcg_gen_or_tl(t0, t0, t1);
2030 }
2031 } else {
2032 data_bits = 8 << ot;
2033 if (is_right) {
2034 if (ot == OT_LONG)
2035 tcg_gen_ext32u_tl(t0, t0);
2036
2037 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
2038
2039 tcg_gen_shr_tl(t0, t0, t2);
2040 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
2041 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
2042 tcg_gen_or_tl(t0, t0, t1);
2043
2044 } else {
2045 if (ot == OT_LONG)
2046 tcg_gen_ext32u_tl(t1, t1);
2047
2048 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
2049
2050 tcg_gen_shl_tl(t0, t0, t2);
2051 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
2052 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
2053 tcg_gen_or_tl(t0, t0, t1);
2054 }
2055 }
2056 tcg_gen_mov_tl(t1, cpu_tmp4);
2057
2058 gen_set_label(label1);
2059 /* store */
2060 if (op1 == OR_TMP0) {
2061 gen_op_st_v(ot + s->mem_index, t0, a0);
2062 } else {
2063 gen_op_mov_reg_v(ot, op1, t0);
2064 }
2065
2066 /* update eflags */
2067 if (s->cc_op != CC_OP_DYNAMIC)
2068 gen_op_set_cc_op(s->cc_op);
2069
2070 label2 = gen_new_label();
2071 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
2072
2073 tcg_gen_mov_tl(cpu_cc_src, t1);
2074 tcg_gen_mov_tl(cpu_cc_dst, t0);
2075 if (is_right) {
2076 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
2077 } else {
2078 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
2079 }
2080 gen_set_label(label2);
2081 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
2082
2083 tcg_temp_free(t0);
2084 tcg_temp_free(t1);
2085 tcg_temp_free(t2);
2086 tcg_temp_free(a0);
2087}
2088
2089static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2090{
2091 if (s != OR_TMP1)
2092 gen_op_mov_TN_reg(ot, 1, s);
2093 switch(op) {
2094 case OP_ROL:
2095 gen_rot_rm_T1(s1, ot, d, 0);
2096 break;
2097 case OP_ROR:
2098 gen_rot_rm_T1(s1, ot, d, 1);
2099 break;
2100 case OP_SHL:
2101 case OP_SHL1:
2102 gen_shift_rm_T1(s1, ot, d, 0, 0);
2103 break;
2104 case OP_SHR:
2105 gen_shift_rm_T1(s1, ot, d, 1, 0);
2106 break;
2107 case OP_SAR:
2108 gen_shift_rm_T1(s1, ot, d, 1, 1);
2109 break;
2110 case OP_RCL:
2111 gen_rotc_rm_T1(s1, ot, d, 0);
2112 break;
2113 case OP_RCR:
2114 gen_rotc_rm_T1(s1, ot, d, 1);
2115 break;
2116 }
2117}
2118
2119static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2120{
2121 switch(op) {
2122 case OP_ROL:
2123 gen_rot_rm_im(s1, ot, d, c, 0);
2124 break;
2125 case OP_ROR:
2126 gen_rot_rm_im(s1, ot, d, c, 1);
2127 break;
2128 case OP_SHL:
2129 case OP_SHL1:
2130 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2131 break;
2132 case OP_SHR:
2133 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2134 break;
2135 case OP_SAR:
2136 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2137 break;
2138 default:
2139 /* currently not optimized */
2140 gen_op_movl_T1_im(c);
2141 gen_shift(s1, op, ot, d, OR_TMP1);
2142 break;
2143 }
2144}
2145
2146static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
2147{
2148 target_long disp;
2149 int havesib;
2150 int base;
2151 int index;
2152 int scale;
2153 int opreg;
2154 int mod, rm, code, override, must_add_seg;
2155
2156 override = s->override;
2157 must_add_seg = s->addseg;
2158 if (override >= 0)
2159 must_add_seg = 1;
2160 mod = (modrm >> 6) & 3;
2161 rm = modrm & 7;
2162
2163 if (s->aflag) {
2164
2165 havesib = 0;
2166 base = rm;
2167 index = 0;
2168 scale = 0;
2169
2170 if (base == 4) {
2171 havesib = 1;
2172 code = ldub_code(s->pc++);
2173 scale = (code >> 6) & 3;
2174 index = ((code >> 3) & 7) | REX_X(s);
2175 base = (code & 7);
2176 }
2177 base |= REX_B(s);
2178
2179 switch (mod) {
2180 case 0:
2181 if ((base & 7) == 5) {
2182 base = -1;
2183 disp = (int32_t)ldl_code(s->pc);
2184 s->pc += 4;
2185 if (CODE64(s) && !havesib) {
2186 disp += s->pc + s->rip_offset;
2187 }
2188 } else {
2189 disp = 0;
2190 }
2191 break;
2192 case 1:
2193 disp = (int8_t)ldub_code(s->pc++);
2194 break;
2195 default:
2196 case 2:
2197#ifdef VBOX
2198 disp = (int32_t)ldl_code(s->pc);
2199#else
2200 disp = ldl_code(s->pc);
2201#endif
2202 s->pc += 4;
2203 break;
2204 }
2205
2206 if (base >= 0) {
2207 /* for correct popl handling with esp */
2208 if (base == 4 && s->popl_esp_hack)
2209 disp += s->popl_esp_hack;
2210#ifdef TARGET_X86_64
2211 if (s->aflag == 2) {
2212 gen_op_movq_A0_reg(base);
2213 if (disp != 0) {
2214 gen_op_addq_A0_im(disp);
2215 }
2216 } else
2217#endif
2218 {
2219 gen_op_movl_A0_reg(base);
2220 if (disp != 0)
2221 gen_op_addl_A0_im(disp);
2222 }
2223 } else {
2224#ifdef TARGET_X86_64
2225 if (s->aflag == 2) {
2226 gen_op_movq_A0_im(disp);
2227 } else
2228#endif
2229 {
2230 gen_op_movl_A0_im(disp);
2231 }
2232 }
2233 /* index == 4 means no index */
2234 if (havesib && (index != 4)) {
2235#ifdef TARGET_X86_64
2236 if (s->aflag == 2) {
2237 gen_op_addq_A0_reg_sN(scale, index);
2238 } else
2239#endif
2240 {
2241 gen_op_addl_A0_reg_sN(scale, index);
2242 }
2243 }
2244 if (must_add_seg) {
2245 if (override < 0) {
2246 if (base == R_EBP || base == R_ESP)
2247 override = R_SS;
2248 else
2249 override = R_DS;
2250 }
2251#ifdef TARGET_X86_64
2252 if (s->aflag == 2) {
2253 gen_op_addq_A0_seg(override);
2254 } else
2255#endif
2256 {
2257 gen_op_addl_A0_seg(override);
2258 }
2259 }
2260 } else {
2261 switch (mod) {
2262 case 0:
2263 if (rm == 6) {
2264 disp = lduw_code(s->pc);
2265 s->pc += 2;
2266 gen_op_movl_A0_im(disp);
2267 rm = 0; /* avoid SS override */
2268 goto no_rm;
2269 } else {
2270 disp = 0;
2271 }
2272 break;
2273 case 1:
2274 disp = (int8_t)ldub_code(s->pc++);
2275 break;
2276 default:
2277 case 2:
2278 disp = lduw_code(s->pc);
2279 s->pc += 2;
2280 break;
2281 }
2282 switch(rm) {
2283 case 0:
2284 gen_op_movl_A0_reg(R_EBX);
2285 gen_op_addl_A0_reg_sN(0, R_ESI);
2286 break;
2287 case 1:
2288 gen_op_movl_A0_reg(R_EBX);
2289 gen_op_addl_A0_reg_sN(0, R_EDI);
2290 break;
2291 case 2:
2292 gen_op_movl_A0_reg(R_EBP);
2293 gen_op_addl_A0_reg_sN(0, R_ESI);
2294 break;
2295 case 3:
2296 gen_op_movl_A0_reg(R_EBP);
2297 gen_op_addl_A0_reg_sN(0, R_EDI);
2298 break;
2299 case 4:
2300 gen_op_movl_A0_reg(R_ESI);
2301 break;
2302 case 5:
2303 gen_op_movl_A0_reg(R_EDI);
2304 break;
2305 case 6:
2306 gen_op_movl_A0_reg(R_EBP);
2307 break;
2308 default:
2309 case 7:
2310 gen_op_movl_A0_reg(R_EBX);
2311 break;
2312 }
2313 if (disp != 0)
2314 gen_op_addl_A0_im(disp);
2315 gen_op_andl_A0_ffff();
2316 no_rm:
2317 if (must_add_seg) {
2318 if (override < 0) {
2319 if (rm == 2 || rm == 3 || rm == 6)
2320 override = R_SS;
2321 else
2322 override = R_DS;
2323 }
2324 gen_op_addl_A0_seg(override);
2325 }
2326 }
2327
2328 opreg = OR_A0;
2329 disp = 0;
2330 *reg_ptr = opreg;
2331 *offset_ptr = disp;
2332}
2333
2334static void gen_nop_modrm(DisasContext *s, int modrm)
2335{
2336 int mod, rm, base, code;
2337
2338 mod = (modrm >> 6) & 3;
2339 if (mod == 3)
2340 return;
2341 rm = modrm & 7;
2342
2343 if (s->aflag) {
2344
2345 base = rm;
2346
2347 if (base == 4) {
2348 code = ldub_code(s->pc++);
2349 base = (code & 7);
2350 }
2351
2352 switch (mod) {
2353 case 0:
2354 if (base == 5) {
2355 s->pc += 4;
2356 }
2357 break;
2358 case 1:
2359 s->pc++;
2360 break;
2361 default:
2362 case 2:
2363 s->pc += 4;
2364 break;
2365 }
2366 } else {
2367 switch (mod) {
2368 case 0:
2369 if (rm == 6) {
2370 s->pc += 2;
2371 }
2372 break;
2373 case 1:
2374 s->pc++;
2375 break;
2376 default:
2377 case 2:
2378 s->pc += 2;
2379 break;
2380 }
2381 }
2382}
2383
2384/* used for LEA and MOV AX, mem */
2385static void gen_add_A0_ds_seg(DisasContext *s)
2386{
2387 int override, must_add_seg;
2388 must_add_seg = s->addseg;
2389 override = R_DS;
2390 if (s->override >= 0) {
2391 override = s->override;
2392 must_add_seg = 1;
2393 }
2394 if (must_add_seg) {
2395#ifdef TARGET_X86_64
2396 if (CODE64(s)) {
2397 gen_op_addq_A0_seg(override);
2398 } else
2399#endif
2400 {
2401 gen_op_addl_A0_seg(override);
2402 }
2403 }
2404}
2405
2406/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2407 OR_TMP0 */
2408static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2409{
2410 int mod, rm, opreg, disp;
2411
2412 mod = (modrm >> 6) & 3;
2413 rm = (modrm & 7) | REX_B(s);
2414 if (mod == 3) {
2415 if (is_store) {
2416 if (reg != OR_TMP0)
2417 gen_op_mov_TN_reg(ot, 0, reg);
2418 gen_op_mov_reg_T0(ot, rm);
2419 } else {
2420 gen_op_mov_TN_reg(ot, 0, rm);
2421 if (reg != OR_TMP0)
2422 gen_op_mov_reg_T0(ot, reg);
2423 }
2424 } else {
2425 gen_lea_modrm(s, modrm, &opreg, &disp);
2426 if (is_store) {
2427 if (reg != OR_TMP0)
2428 gen_op_mov_TN_reg(ot, 0, reg);
2429 gen_op_st_T0_A0(ot + s->mem_index);
2430 } else {
2431 gen_op_ld_T0_A0(ot + s->mem_index);
2432 if (reg != OR_TMP0)
2433 gen_op_mov_reg_T0(ot, reg);
2434 }
2435 }
2436}
2437
2438static inline uint32_t insn_get(DisasContext *s, int ot)
2439{
2440 uint32_t ret;
2441
2442 switch(ot) {
2443 case OT_BYTE:
2444 ret = ldub_code(s->pc);
2445 s->pc++;
2446 break;
2447 case OT_WORD:
2448 ret = lduw_code(s->pc);
2449 s->pc += 2;
2450 break;
2451 default:
2452 case OT_LONG:
2453 ret = ldl_code(s->pc);
2454 s->pc += 4;
2455 break;
2456 }
2457 return ret;
2458}
2459
2460static inline int insn_const_size(unsigned int ot)
2461{
2462 if (ot <= OT_LONG)
2463 return 1 << ot;
2464 else
2465 return 4;
2466}
2467
2468static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2469{
2470 TranslationBlock *tb;
2471 target_ulong pc;
2472
2473 pc = s->cs_base + eip;
2474 tb = s->tb;
2475 /* NOTE: we handle the case where the TB spans two pages here */
2476 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2477 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2478#ifdef VBOX
2479 gen_check_external_event();
2480#endif /* VBOX */
2481 /* jump to same page: we can use a direct jump */
2482 tcg_gen_goto_tb(tb_num);
2483 gen_jmp_im(eip);
2484 tcg_gen_exit_tb((intptr_t)tb + tb_num);
2485 } else {
2486 /* jump to another page: currently not optimized */
2487 gen_jmp_im(eip);
2488 gen_eob(s);
2489 }
2490}
2491
2492static inline void gen_jcc(DisasContext *s, int b,
2493 target_ulong val, target_ulong next_eip)
2494{
2495 int l1, l2, cc_op;
2496
2497 cc_op = s->cc_op;
2498 gen_update_cc_op(s);
2499 if (s->jmp_opt) {
2500 l1 = gen_new_label();
2501 gen_jcc1(s, cc_op, b, l1);
2502
2503 gen_goto_tb(s, 0, next_eip);
2504
2505 gen_set_label(l1);
2506 gen_goto_tb(s, 1, val);
2507 s->is_jmp = DISAS_TB_JUMP;
2508 } else {
2509
2510 l1 = gen_new_label();
2511 l2 = gen_new_label();
2512 gen_jcc1(s, cc_op, b, l1);
2513
2514 gen_jmp_im(next_eip);
2515 tcg_gen_br(l2);
2516
2517 gen_set_label(l1);
2518 gen_jmp_im(val);
2519 gen_set_label(l2);
2520 gen_eob(s);
2521 }
2522}
2523
2524static void gen_setcc(DisasContext *s, int b)
2525{
2526 int inv, jcc_op, l1;
2527 TCGv t0;
2528
2529 if (is_fast_jcc_case(s, b)) {
2530 /* nominal case: we use a jump */
2531 /* XXX: make it faster by adding new instructions in TCG */
2532 t0 = tcg_temp_local_new();
2533 tcg_gen_movi_tl(t0, 0);
2534 l1 = gen_new_label();
2535 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2536 tcg_gen_movi_tl(t0, 1);
2537 gen_set_label(l1);
2538 tcg_gen_mov_tl(cpu_T[0], t0);
2539 tcg_temp_free(t0);
2540 } else {
2541 /* slow case: it is more efficient not to generate a jump,
2542 although it is questionnable whether this optimization is
2543 worth to */
2544 inv = b & 1;
2545 jcc_op = (b >> 1) & 7;
2546 gen_setcc_slow_T0(s, jcc_op);
2547 if (inv) {
2548 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2549 }
2550 }
2551}
2552
2553static inline void gen_op_movl_T0_seg(int seg_reg)
2554{
2555 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2556 offsetof(CPUX86State,segs[seg_reg].selector));
2557}
2558
2559static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2560{
2561 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2562 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2563 offsetof(CPUX86State,segs[seg_reg].selector));
2564 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2565 tcg_gen_st_tl(cpu_T[0], cpu_env,
2566 offsetof(CPUX86State,segs[seg_reg].base));
2567}
2568
2569/* move T0 to seg_reg and compute if the CPU state may change. Never
2570 call this function with seg_reg == R_CS */
2571static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2572{
2573 if (s->pe && !s->vm86) {
2574 /* XXX: optimize by finding processor state dynamically */
2575 if (s->cc_op != CC_OP_DYNAMIC)
2576 gen_op_set_cc_op(s->cc_op);
2577 gen_jmp_im(cur_eip);
2578 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2579 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2580 /* abort translation because the addseg value may change or
2581 because ss32 may change. For R_SS, translation must always
2582 stop as a special handling must be done to disable hardware
2583 interrupts for the next instruction */
2584 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2585 s->is_jmp = DISAS_TB_JUMP;
2586 } else {
2587 gen_op_movl_seg_T0_vm(seg_reg);
2588 if (seg_reg == R_SS)
2589 s->is_jmp = DISAS_TB_JUMP;
2590 }
2591}
2592
2593static inline int svm_is_rep(int prefixes)
2594{
2595 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2596}
2597
2598static inline void
2599gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2600 uint32_t type, uint64_t param)
2601{
2602 /* no SVM activated; fast case */
2603 if (likely(!(s->flags & HF_SVMI_MASK)))
2604 return;
2605 if (s->cc_op != CC_OP_DYNAMIC)
2606 gen_op_set_cc_op(s->cc_op);
2607 gen_jmp_im(pc_start - s->cs_base);
2608 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2609 tcg_const_i64(param));
2610}
2611
2612static inline void
2613gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2614{
2615 gen_svm_check_intercept_param(s, pc_start, type, 0);
2616}
2617
2618static inline void gen_stack_update(DisasContext *s, int addend)
2619{
2620#ifdef TARGET_X86_64
2621 if (CODE64(s)) {
2622 gen_op_add_reg_im(2, R_ESP, addend);
2623 } else
2624#endif
2625 if (s->ss32) {
2626 gen_op_add_reg_im(1, R_ESP, addend);
2627 } else {
2628 gen_op_add_reg_im(0, R_ESP, addend);
2629 }
2630}
2631
2632/* generate a push. It depends on ss32, addseg and dflag */
2633static void gen_push_T0(DisasContext *s)
2634{
2635#ifdef TARGET_X86_64
2636 if (CODE64(s)) {
2637 gen_op_movq_A0_reg(R_ESP);
2638 if (s->dflag) {
2639 gen_op_addq_A0_im(-8);
2640 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2641 } else {
2642 gen_op_addq_A0_im(-2);
2643 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2644 }
2645 gen_op_mov_reg_A0(2, R_ESP);
2646 } else
2647#endif
2648 {
2649 gen_op_movl_A0_reg(R_ESP);
2650 if (!s->dflag)
2651 gen_op_addl_A0_im(-2);
2652 else
2653 gen_op_addl_A0_im(-4);
2654 if (s->ss32) {
2655 if (s->addseg) {
2656 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2657 gen_op_addl_A0_seg(R_SS);
2658 }
2659 } else {
2660 gen_op_andl_A0_ffff();
2661 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2662 gen_op_addl_A0_seg(R_SS);
2663 }
2664 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2665 if (s->ss32 && !s->addseg)
2666 gen_op_mov_reg_A0(1, R_ESP);
2667 else
2668 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2669 }
2670}
2671
2672/* generate a push. It depends on ss32, addseg and dflag */
2673/* slower version for T1, only used for call Ev */
2674static void gen_push_T1(DisasContext *s)
2675{
2676#ifdef TARGET_X86_64
2677 if (CODE64(s)) {
2678 gen_op_movq_A0_reg(R_ESP);
2679 if (s->dflag) {
2680 gen_op_addq_A0_im(-8);
2681 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2682 } else {
2683 gen_op_addq_A0_im(-2);
2684 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2685 }
2686 gen_op_mov_reg_A0(2, R_ESP);
2687 } else
2688#endif
2689 {
2690 gen_op_movl_A0_reg(R_ESP);
2691 if (!s->dflag)
2692 gen_op_addl_A0_im(-2);
2693 else
2694 gen_op_addl_A0_im(-4);
2695 if (s->ss32) {
2696 if (s->addseg) {
2697 gen_op_addl_A0_seg(R_SS);
2698 }
2699 } else {
2700 gen_op_andl_A0_ffff();
2701 gen_op_addl_A0_seg(R_SS);
2702 }
2703 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2704
2705 if (s->ss32 && !s->addseg)
2706 gen_op_mov_reg_A0(1, R_ESP);
2707 else
2708 gen_stack_update(s, (-2) << s->dflag);
2709 }
2710}
2711
2712/* two step pop is necessary for precise exceptions */
2713static void gen_pop_T0(DisasContext *s)
2714{
2715#ifdef TARGET_X86_64
2716 if (CODE64(s)) {
2717 gen_op_movq_A0_reg(R_ESP);
2718 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2719 } else
2720#endif
2721 {
2722 gen_op_movl_A0_reg(R_ESP);
2723 if (s->ss32) {
2724 if (s->addseg)
2725 gen_op_addl_A0_seg(R_SS);
2726 } else {
2727 gen_op_andl_A0_ffff();
2728 gen_op_addl_A0_seg(R_SS);
2729 }
2730 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2731 }
2732}
2733
2734static void gen_pop_update(DisasContext *s)
2735{
2736#ifdef TARGET_X86_64
2737 if (CODE64(s) && s->dflag) {
2738 gen_stack_update(s, 8);
2739 } else
2740#endif
2741 {
2742 gen_stack_update(s, 2 << s->dflag);
2743 }
2744}
2745
2746static void gen_stack_A0(DisasContext *s)
2747{
2748 gen_op_movl_A0_reg(R_ESP);
2749 if (!s->ss32)
2750 gen_op_andl_A0_ffff();
2751 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2752 if (s->addseg)
2753 gen_op_addl_A0_seg(R_SS);
2754}
2755
2756/* NOTE: wrap around in 16 bit not fully handled */
2757static void gen_pusha(DisasContext *s)
2758{
2759 int i;
2760 gen_op_movl_A0_reg(R_ESP);
2761 gen_op_addl_A0_im(-16 << s->dflag);
2762 if (!s->ss32)
2763 gen_op_andl_A0_ffff();
2764 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2765 if (s->addseg)
2766 gen_op_addl_A0_seg(R_SS);
2767 for(i = 0;i < 8; i++) {
2768 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2769 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2770 gen_op_addl_A0_im(2 << s->dflag);
2771 }
2772 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2773}
2774
2775/* NOTE: wrap around in 16 bit not fully handled */
2776static void gen_popa(DisasContext *s)
2777{
2778 int i;
2779 gen_op_movl_A0_reg(R_ESP);
2780 if (!s->ss32)
2781 gen_op_andl_A0_ffff();
2782 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2783 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2784 if (s->addseg)
2785 gen_op_addl_A0_seg(R_SS);
2786 for(i = 0;i < 8; i++) {
2787 /* ESP is not reloaded */
2788 if (i != 3) {
2789 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2790 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2791 }
2792 gen_op_addl_A0_im(2 << s->dflag);
2793 }
2794 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2795}
2796
2797static void gen_enter(DisasContext *s, int esp_addend, int level)
2798{
2799 int ot, opsize;
2800
2801 level &= 0x1f;
2802#ifdef TARGET_X86_64
2803 if (CODE64(s)) {
2804 ot = s->dflag ? OT_QUAD : OT_WORD;
2805 opsize = 1 << ot;
2806
2807 gen_op_movl_A0_reg(R_ESP);
2808 gen_op_addq_A0_im(-opsize);
2809 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2810
2811 /* push bp */
2812 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2813 gen_op_st_T0_A0(ot + s->mem_index);
2814 if (level) {
2815 /* XXX: must save state */
2816 gen_helper_enter64_level(tcg_const_i32(level),
2817 tcg_const_i32((ot == OT_QUAD)),
2818 cpu_T[1]);
2819 }
2820 gen_op_mov_reg_T1(ot, R_EBP);
2821 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2822 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2823 } else
2824#endif
2825 {
2826 ot = s->dflag + OT_WORD;
2827 opsize = 2 << s->dflag;
2828
2829 gen_op_movl_A0_reg(R_ESP);
2830 gen_op_addl_A0_im(-opsize);
2831 if (!s->ss32)
2832 gen_op_andl_A0_ffff();
2833 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2834 if (s->addseg)
2835 gen_op_addl_A0_seg(R_SS);
2836 /* push bp */
2837 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2838 gen_op_st_T0_A0(ot + s->mem_index);
2839 if (level) {
2840 /* XXX: must save state */
2841 gen_helper_enter_level(tcg_const_i32(level),
2842 tcg_const_i32(s->dflag),
2843 cpu_T[1]);
2844 }
2845 gen_op_mov_reg_T1(ot, R_EBP);
2846 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2847 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2848 }
2849}
2850
2851static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2852{
2853 if (s->cc_op != CC_OP_DYNAMIC)
2854 gen_op_set_cc_op(s->cc_op);
2855 gen_jmp_im(cur_eip);
2856 gen_helper_raise_exception(tcg_const_i32(trapno));
2857 s->is_jmp = DISAS_TB_JUMP;
2858}
2859
2860/* an interrupt is different from an exception because of the
2861 privilege checks */
2862static void gen_interrupt(DisasContext *s, int intno,
2863 target_ulong cur_eip, target_ulong next_eip)
2864{
2865 if (s->cc_op != CC_OP_DYNAMIC)
2866 gen_op_set_cc_op(s->cc_op);
2867 gen_jmp_im(cur_eip);
2868 gen_helper_raise_interrupt(tcg_const_i32(intno),
2869 tcg_const_i32(next_eip - cur_eip));
2870 s->is_jmp = DISAS_TB_JUMP;
2871}
2872
2873static void gen_debug(DisasContext *s, target_ulong cur_eip)
2874{
2875 if (s->cc_op != CC_OP_DYNAMIC)
2876 gen_op_set_cc_op(s->cc_op);
2877 gen_jmp_im(cur_eip);
2878 gen_helper_debug();
2879 s->is_jmp = DISAS_TB_JUMP;
2880}
2881
2882/* generate a generic end of block. Trace exception is also generated
2883 if needed */
2884static void gen_eob(DisasContext *s)
2885{
2886 if (s->cc_op != CC_OP_DYNAMIC)
2887 gen_op_set_cc_op(s->cc_op);
2888 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2889 gen_helper_reset_inhibit_irq();
2890 }
2891 if (s->tb->flags & HF_RF_MASK) {
2892 gen_helper_reset_rf();
2893 }
2894 if ( s->singlestep_enabled
2895#ifdef VBOX
2896 && ( !(cpu_single_env->state & CPU_EMULATE_SINGLE_STEP)
2897 || !(s->prefix & (PREFIX_REPNZ | PREFIX_REPZ) ))
2898#endif
2899 ) {
2900 gen_helper_debug();
2901 } else if (s->tf) {
2902 gen_helper_single_step();
2903 } else {
2904 tcg_gen_exit_tb(0);
2905 }
2906 s->is_jmp = DISAS_TB_JUMP;
2907}
2908
2909/* generate a jump to eip. No segment change must happen before as a
2910 direct call to the next block may occur */
2911static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2912{
2913 if (s->jmp_opt) {
2914 gen_update_cc_op(s);
2915 gen_goto_tb(s, tb_num, eip);
2916 s->is_jmp = DISAS_TB_JUMP;
2917 } else {
2918 gen_jmp_im(eip);
2919 gen_eob(s);
2920 }
2921}
2922
2923static void gen_jmp(DisasContext *s, target_ulong eip)
2924{
2925 gen_jmp_tb(s, eip, 0);
2926}
2927
2928static inline void gen_ldq_env_A0(int idx, int offset)
2929{
2930 int mem_index = (idx >> 2) - 1;
2931 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2932 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2933}
2934
2935static inline void gen_stq_env_A0(int idx, int offset)
2936{
2937 int mem_index = (idx >> 2) - 1;
2938 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2939 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2940}
2941
2942static inline void gen_ldo_env_A0(int idx, int offset)
2943{
2944 int mem_index = (idx >> 2) - 1;
2945 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2946 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2947 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2948 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2949 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2950}
2951
2952static inline void gen_sto_env_A0(int idx, int offset)
2953{
2954 int mem_index = (idx >> 2) - 1;
2955 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2956 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2957 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2958 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2959 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2960}
2961
2962static inline void gen_op_movo(int d_offset, int s_offset)
2963{
2964 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2965 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2966 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2967 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2968}
2969
2970static inline void gen_op_movq(int d_offset, int s_offset)
2971{
2972 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2973 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2974}
2975
2976static inline void gen_op_movl(int d_offset, int s_offset)
2977{
2978 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2979 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2980}
2981
2982static inline void gen_op_movq_env_0(int d_offset)
2983{
2984 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2985 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2986}
2987
2988#define SSE_SPECIAL ((void *)1)
2989#define SSE_DUMMY ((void *)2)
2990
2991#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2992#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2993 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2994
2995static void *sse_op_table1[256][4] = {
2996 /* 3DNow! extensions */
2997 [0x0e] = { SSE_DUMMY }, /* femms */
2998 [0x0f] = { SSE_DUMMY }, /* pf... */
2999 /* pure SSE operations */
3000 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
3001 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
3002 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
3003 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
3004 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
3005 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
3006 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
3007 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
3008
3009 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
3010 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
3011 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
3012 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
3013 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
3014 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
3015 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
3016 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
3017 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
3018 [0x51] = SSE_FOP(sqrt),
3019 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
3020 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
3021 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
3022 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
3023 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
3024 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
3025 [0x58] = SSE_FOP(add),
3026 [0x59] = SSE_FOP(mul),
3027 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
3028 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
3029 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
3030 [0x5c] = SSE_FOP(sub),
3031 [0x5d] = SSE_FOP(min),
3032 [0x5e] = SSE_FOP(div),
3033 [0x5f] = SSE_FOP(max),
3034
3035 [0xc2] = SSE_FOP(cmpeq),
3036 [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
3037
3038 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
3039 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
3040
3041 /* MMX ops and their SSE extensions */
3042 [0x60] = MMX_OP2(punpcklbw),
3043 [0x61] = MMX_OP2(punpcklwd),
3044 [0x62] = MMX_OP2(punpckldq),
3045 [0x63] = MMX_OP2(packsswb),
3046 [0x64] = MMX_OP2(pcmpgtb),
3047 [0x65] = MMX_OP2(pcmpgtw),
3048 [0x66] = MMX_OP2(pcmpgtl),
3049 [0x67] = MMX_OP2(packuswb),
3050 [0x68] = MMX_OP2(punpckhbw),
3051 [0x69] = MMX_OP2(punpckhwd),
3052 [0x6a] = MMX_OP2(punpckhdq),
3053 [0x6b] = MMX_OP2(packssdw),
3054 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
3055 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
3056 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
3057 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
3058 [0x70] = { gen_helper_pshufw_mmx,
3059 gen_helper_pshufd_xmm,
3060 gen_helper_pshufhw_xmm,
3061 gen_helper_pshuflw_xmm },
3062 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
3063 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
3064 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
3065 [0x74] = MMX_OP2(pcmpeqb),
3066 [0x75] = MMX_OP2(pcmpeqw),
3067 [0x76] = MMX_OP2(pcmpeql),
3068 [0x77] = { SSE_DUMMY }, /* emms */
3069 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
3070 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
3071 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
3072 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
3073 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
3074 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
3075 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
3076 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
3077 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
3078 [0xd1] = MMX_OP2(psrlw),
3079 [0xd2] = MMX_OP2(psrld),
3080 [0xd3] = MMX_OP2(psrlq),
3081 [0xd4] = MMX_OP2(paddq),
3082 [0xd5] = MMX_OP2(pmullw),
3083 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
3084 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
3085 [0xd8] = MMX_OP2(psubusb),
3086 [0xd9] = MMX_OP2(psubusw),
3087 [0xda] = MMX_OP2(pminub),
3088 [0xdb] = MMX_OP2(pand),
3089 [0xdc] = MMX_OP2(paddusb),
3090 [0xdd] = MMX_OP2(paddusw),
3091 [0xde] = MMX_OP2(pmaxub),
3092 [0xdf] = MMX_OP2(pandn),
3093 [0xe0] = MMX_OP2(pavgb),
3094 [0xe1] = MMX_OP2(psraw),
3095 [0xe2] = MMX_OP2(psrad),
3096 [0xe3] = MMX_OP2(pavgw),
3097 [0xe4] = MMX_OP2(pmulhuw),
3098 [0xe5] = MMX_OP2(pmulhw),
3099 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
3100 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3101 [0xe8] = MMX_OP2(psubsb),
3102 [0xe9] = MMX_OP2(psubsw),
3103 [0xea] = MMX_OP2(pminsw),
3104 [0xeb] = MMX_OP2(por),
3105 [0xec] = MMX_OP2(paddsb),
3106 [0xed] = MMX_OP2(paddsw),
3107 [0xee] = MMX_OP2(pmaxsw),
3108 [0xef] = MMX_OP2(pxor),
3109 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
3110 [0xf1] = MMX_OP2(psllw),
3111 [0xf2] = MMX_OP2(pslld),
3112 [0xf3] = MMX_OP2(psllq),
3113 [0xf4] = MMX_OP2(pmuludq),
3114 [0xf5] = MMX_OP2(pmaddwd),
3115 [0xf6] = MMX_OP2(psadbw),
3116 [0xf7] = MMX_OP2(maskmov),
3117 [0xf8] = MMX_OP2(psubb),
3118 [0xf9] = MMX_OP2(psubw),
3119 [0xfa] = MMX_OP2(psubl),
3120 [0xfb] = MMX_OP2(psubq),
3121 [0xfc] = MMX_OP2(paddb),
3122 [0xfd] = MMX_OP2(paddw),
3123 [0xfe] = MMX_OP2(paddl),
3124};
3125
3126static void *sse_op_table2[3 * 8][2] = {
3127 [0 + 2] = MMX_OP2(psrlw),
3128 [0 + 4] = MMX_OP2(psraw),
3129 [0 + 6] = MMX_OP2(psllw),
3130 [8 + 2] = MMX_OP2(psrld),
3131 [8 + 4] = MMX_OP2(psrad),
3132 [8 + 6] = MMX_OP2(pslld),
3133 [16 + 2] = MMX_OP2(psrlq),
3134 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3135 [16 + 6] = MMX_OP2(psllq),
3136 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3137};
3138
3139static void *sse_op_table3[4 * 3] = {
3140 gen_helper_cvtsi2ss,
3141 gen_helper_cvtsi2sd,
3142 X86_64_ONLY(gen_helper_cvtsq2ss),
3143 X86_64_ONLY(gen_helper_cvtsq2sd),
3144
3145 gen_helper_cvttss2si,
3146 gen_helper_cvttsd2si,
3147 X86_64_ONLY(gen_helper_cvttss2sq),
3148 X86_64_ONLY(gen_helper_cvttsd2sq),
3149
3150 gen_helper_cvtss2si,
3151 gen_helper_cvtsd2si,
3152 X86_64_ONLY(gen_helper_cvtss2sq),
3153 X86_64_ONLY(gen_helper_cvtsd2sq),
3154};
3155
3156static void *sse_op_table4[8][4] = {
3157 SSE_FOP(cmpeq),
3158 SSE_FOP(cmplt),
3159 SSE_FOP(cmple),
3160 SSE_FOP(cmpunord),
3161 SSE_FOP(cmpneq),
3162 SSE_FOP(cmpnlt),
3163 SSE_FOP(cmpnle),
3164 SSE_FOP(cmpord),
3165};
3166
3167static void *sse_op_table5[256] = {
3168 [0x0c] = gen_helper_pi2fw,
3169 [0x0d] = gen_helper_pi2fd,
3170 [0x1c] = gen_helper_pf2iw,
3171 [0x1d] = gen_helper_pf2id,
3172 [0x8a] = gen_helper_pfnacc,
3173 [0x8e] = gen_helper_pfpnacc,
3174 [0x90] = gen_helper_pfcmpge,
3175 [0x94] = gen_helper_pfmin,
3176 [0x96] = gen_helper_pfrcp,
3177 [0x97] = gen_helper_pfrsqrt,
3178 [0x9a] = gen_helper_pfsub,
3179 [0x9e] = gen_helper_pfadd,
3180 [0xa0] = gen_helper_pfcmpgt,
3181 [0xa4] = gen_helper_pfmax,
3182 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3183 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3184 [0xaa] = gen_helper_pfsubr,
3185 [0xae] = gen_helper_pfacc,
3186 [0xb0] = gen_helper_pfcmpeq,
3187 [0xb4] = gen_helper_pfmul,
3188 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3189 [0xb7] = gen_helper_pmulhrw_mmx,
3190 [0xbb] = gen_helper_pswapd,
3191 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3192};
3193
3194struct sse_op_helper_s {
3195 void *op[2]; uint32_t ext_mask;
3196};
3197#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3198#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3199#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3200#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3201static struct sse_op_helper_s sse_op_table6[256] = {
3202 [0x00] = SSSE3_OP(pshufb),
3203 [0x01] = SSSE3_OP(phaddw),
3204 [0x02] = SSSE3_OP(phaddd),
3205 [0x03] = SSSE3_OP(phaddsw),
3206 [0x04] = SSSE3_OP(pmaddubsw),
3207 [0x05] = SSSE3_OP(phsubw),
3208 [0x06] = SSSE3_OP(phsubd),
3209 [0x07] = SSSE3_OP(phsubsw),
3210 [0x08] = SSSE3_OP(psignb),
3211 [0x09] = SSSE3_OP(psignw),
3212 [0x0a] = SSSE3_OP(psignd),
3213 [0x0b] = SSSE3_OP(pmulhrsw),
3214 [0x10] = SSE41_OP(pblendvb),
3215 [0x14] = SSE41_OP(blendvps),
3216 [0x15] = SSE41_OP(blendvpd),
3217 [0x17] = SSE41_OP(ptest),
3218 [0x1c] = SSSE3_OP(pabsb),
3219 [0x1d] = SSSE3_OP(pabsw),
3220 [0x1e] = SSSE3_OP(pabsd),
3221 [0x20] = SSE41_OP(pmovsxbw),
3222 [0x21] = SSE41_OP(pmovsxbd),
3223 [0x22] = SSE41_OP(pmovsxbq),
3224 [0x23] = SSE41_OP(pmovsxwd),
3225 [0x24] = SSE41_OP(pmovsxwq),
3226 [0x25] = SSE41_OP(pmovsxdq),
3227 [0x28] = SSE41_OP(pmuldq),
3228 [0x29] = SSE41_OP(pcmpeqq),
3229 [0x2a] = SSE41_SPECIAL, /* movntqda */
3230 [0x2b] = SSE41_OP(packusdw),
3231 [0x30] = SSE41_OP(pmovzxbw),
3232 [0x31] = SSE41_OP(pmovzxbd),
3233 [0x32] = SSE41_OP(pmovzxbq),
3234 [0x33] = SSE41_OP(pmovzxwd),
3235 [0x34] = SSE41_OP(pmovzxwq),
3236 [0x35] = SSE41_OP(pmovzxdq),
3237 [0x37] = SSE42_OP(pcmpgtq),
3238 [0x38] = SSE41_OP(pminsb),
3239 [0x39] = SSE41_OP(pminsd),
3240 [0x3a] = SSE41_OP(pminuw),
3241 [0x3b] = SSE41_OP(pminud),
3242 [0x3c] = SSE41_OP(pmaxsb),
3243 [0x3d] = SSE41_OP(pmaxsd),
3244 [0x3e] = SSE41_OP(pmaxuw),
3245 [0x3f] = SSE41_OP(pmaxud),
3246 [0x40] = SSE41_OP(pmulld),
3247 [0x41] = SSE41_OP(phminposuw),
3248};
3249
3250static struct sse_op_helper_s sse_op_table7[256] = {
3251 [0x08] = SSE41_OP(roundps),
3252 [0x09] = SSE41_OP(roundpd),
3253 [0x0a] = SSE41_OP(roundss),
3254 [0x0b] = SSE41_OP(roundsd),
3255 [0x0c] = SSE41_OP(blendps),
3256 [0x0d] = SSE41_OP(blendpd),
3257 [0x0e] = SSE41_OP(pblendw),
3258 [0x0f] = SSSE3_OP(palignr),
3259 [0x14] = SSE41_SPECIAL, /* pextrb */
3260 [0x15] = SSE41_SPECIAL, /* pextrw */
3261 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3262 [0x17] = SSE41_SPECIAL, /* extractps */
3263 [0x20] = SSE41_SPECIAL, /* pinsrb */
3264 [0x21] = SSE41_SPECIAL, /* insertps */
3265 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3266 [0x40] = SSE41_OP(dpps),
3267 [0x41] = SSE41_OP(dppd),
3268 [0x42] = SSE41_OP(mpsadbw),
3269 [0x60] = SSE42_OP(pcmpestrm),
3270 [0x61] = SSE42_OP(pcmpestri),
3271 [0x62] = SSE42_OP(pcmpistrm),
3272 [0x63] = SSE42_OP(pcmpistri),
3273};
3274
3275static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3276{
3277 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3278 int modrm, mod, rm, reg, reg_addr, offset_addr;
3279 void *sse_op2;
3280
3281 b &= 0xff;
3282 if (s->prefix & PREFIX_DATA)
3283 b1 = 1;
3284 else if (s->prefix & PREFIX_REPZ)
3285 b1 = 2;
3286 else if (s->prefix & PREFIX_REPNZ)
3287 b1 = 3;
3288 else
3289 b1 = 0;
3290 sse_op2 = sse_op_table1[b][b1];
3291 if (!sse_op2)
3292 goto illegal_op;
3293 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3294 is_xmm = 1;
3295 } else {
3296 if (b1 == 0) {
3297 /* MMX case */
3298 is_xmm = 0;
3299 } else {
3300 is_xmm = 1;
3301 }
3302 }
3303 /* simple MMX/SSE operation */
3304 if (s->flags & HF_TS_MASK) {
3305 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3306 return;
3307 }
3308 if (s->flags & HF_EM_MASK) {
3309 illegal_op:
3310 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3311 return;
3312 }
3313 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3314 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3315 goto illegal_op;
3316 if (b == 0x0e) {
3317 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3318 goto illegal_op;
3319 /* femms */
3320 gen_helper_emms();
3321 return;
3322 }
3323 if (b == 0x77) {
3324 /* emms */
3325 gen_helper_emms();
3326 return;
3327 }
3328 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3329 the static cpu state) */
3330 if (!is_xmm) {
3331 gen_helper_enter_mmx();
3332 }
3333
3334 modrm = ldub_code(s->pc++);
3335 reg = ((modrm >> 3) & 7);
3336 if (is_xmm)
3337 reg |= rex_r;
3338 mod = (modrm >> 6) & 3;
3339 if (sse_op2 == SSE_SPECIAL) {
3340 b |= (b1 << 8);
3341 switch(b) {
3342 case 0x0e7: /* movntq */
3343 if (mod == 3)
3344 goto illegal_op;
3345 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3346 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3347 break;
3348 case 0x1e7: /* movntdq */
3349 case 0x02b: /* movntps */
3350 case 0x12b: /* movntps */
3351 if (mod == 3)
3352 goto illegal_op;
3353 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3354 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3355 break;
3356 case 0x3f0: /* lddqu */
3357 if (mod == 3)
3358 goto illegal_op;
3359 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3360 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3361 break;
3362 case 0x22b: /* movntss */
3363 case 0x32b: /* movntsd */
3364 if (mod == 3)
3365 goto illegal_op;
3366 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3367 if (b1 & 1) {
3368 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3369 xmm_regs[reg]));
3370 } else {
3371 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3372 xmm_regs[reg].XMM_L(0)));
3373 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3374 }
3375 break;
3376 case 0x6e: /* movd mm, ea */
3377#ifdef TARGET_X86_64
3378 if (s->dflag == 2) {
3379 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3380 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3381 } else
3382#endif
3383 {
3384 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3385 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3386 offsetof(CPUX86State,fpregs[reg].mmx));
3387 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3388 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3389 }
3390 break;
3391 case 0x16e: /* movd xmm, ea */
3392#ifdef TARGET_X86_64
3393 if (s->dflag == 2) {
3394 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3395 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3396 offsetof(CPUX86State,xmm_regs[reg]));
3397 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3398 } else
3399#endif
3400 {
3401 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3402 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3403 offsetof(CPUX86State,xmm_regs[reg]));
3404 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3405 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3406 }
3407 break;
3408 case 0x6f: /* movq mm, ea */
3409 if (mod != 3) {
3410 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3411 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3412 } else {
3413 rm = (modrm & 7);
3414 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3415 offsetof(CPUX86State,fpregs[rm].mmx));
3416 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3417 offsetof(CPUX86State,fpregs[reg].mmx));
3418 }
3419 break;
3420 case 0x010: /* movups */
3421 case 0x110: /* movupd */
3422 case 0x028: /* movaps */
3423 case 0x128: /* movapd */
3424 case 0x16f: /* movdqa xmm, ea */
3425 case 0x26f: /* movdqu xmm, ea */
3426 if (mod != 3) {
3427 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3428 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3429 } else {
3430 rm = (modrm & 7) | REX_B(s);
3431 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3432 offsetof(CPUX86State,xmm_regs[rm]));
3433 }
3434 break;
3435 case 0x210: /* movss xmm, ea */
3436 if (mod != 3) {
3437 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3438 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3439 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3440 gen_op_movl_T0_0();
3441 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3442 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3443 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3444 } else {
3445 rm = (modrm & 7) | REX_B(s);
3446 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3447 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3448 }
3449 break;
3450 case 0x310: /* movsd xmm, ea */
3451 if (mod != 3) {
3452 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3453 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3454 gen_op_movl_T0_0();
3455 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3456 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3457 } else {
3458 rm = (modrm & 7) | REX_B(s);
3459 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3460 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3461 }
3462 break;
3463 case 0x012: /* movlps */
3464 case 0x112: /* movlpd */
3465 if (mod != 3) {
3466 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3467 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3468 } else {
3469 /* movhlps */
3470 rm = (modrm & 7) | REX_B(s);
3471 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3472 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3473 }
3474 break;
3475 case 0x212: /* movsldup */
3476 if (mod != 3) {
3477 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3478 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3479 } else {
3480 rm = (modrm & 7) | REX_B(s);
3481 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3482 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3483 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3484 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3485 }
3486 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3487 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3488 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3489 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3490 break;
3491 case 0x312: /* movddup */
3492 if (mod != 3) {
3493 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3494 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3495 } else {
3496 rm = (modrm & 7) | REX_B(s);
3497 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3498 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3499 }
3500 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3501 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3502 break;
3503 case 0x016: /* movhps */
3504 case 0x116: /* movhpd */
3505 if (mod != 3) {
3506 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3507 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3508 } else {
3509 /* movlhps */
3510 rm = (modrm & 7) | REX_B(s);
3511 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3512 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3513 }
3514 break;
3515 case 0x216: /* movshdup */
3516 if (mod != 3) {
3517 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3518 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3519 } else {
3520 rm = (modrm & 7) | REX_B(s);
3521 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3522 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3523 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3524 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3525 }
3526 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3527 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3528 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3529 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3530 break;
3531 case 0x178:
3532 case 0x378:
3533 {
3534 int bit_index, field_length;
3535
3536 if (b1 == 1 && reg != 0)
3537 goto illegal_op;
3538 field_length = ldub_code(s->pc++) & 0x3F;
3539 bit_index = ldub_code(s->pc++) & 0x3F;
3540 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3541 offsetof(CPUX86State,xmm_regs[reg]));
3542 if (b1 == 1)
3543 gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
3544 tcg_const_i32(field_length));
3545 else
3546 gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
3547 tcg_const_i32(field_length));
3548 }
3549 break;
3550 case 0x7e: /* movd ea, mm */
3551#ifdef TARGET_X86_64
3552 if (s->dflag == 2) {
3553 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3554 offsetof(CPUX86State,fpregs[reg].mmx));
3555 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3556 } else
3557#endif
3558 {
3559 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3560 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3561 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3562 }
3563 break;
3564 case 0x17e: /* movd ea, xmm */
3565#ifdef TARGET_X86_64
3566 if (s->dflag == 2) {
3567 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3568 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3569 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3570 } else
3571#endif
3572 {
3573 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3574 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3575 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3576 }
3577 break;
3578 case 0x27e: /* movq xmm, ea */
3579 if (mod != 3) {
3580 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3581 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3582 } else {
3583 rm = (modrm & 7) | REX_B(s);
3584 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3585 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3586 }
3587 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3588 break;
3589 case 0x7f: /* movq ea, mm */
3590 if (mod != 3) {
3591 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3592 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3593 } else {
3594 rm = (modrm & 7);
3595 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3596 offsetof(CPUX86State,fpregs[reg].mmx));
3597 }
3598 break;
3599 case 0x011: /* movups */
3600 case 0x111: /* movupd */
3601 case 0x029: /* movaps */
3602 case 0x129: /* movapd */
3603 case 0x17f: /* movdqa ea, xmm */
3604 case 0x27f: /* movdqu ea, xmm */
3605 if (mod != 3) {
3606 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3607 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3608 } else {
3609 rm = (modrm & 7) | REX_B(s);
3610 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3611 offsetof(CPUX86State,xmm_regs[reg]));
3612 }
3613 break;
3614 case 0x211: /* movss ea, xmm */
3615 if (mod != 3) {
3616 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3617 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3618 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3619 } else {
3620 rm = (modrm & 7) | REX_B(s);
3621 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3622 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3623 }
3624 break;
3625 case 0x311: /* movsd ea, xmm */
3626 if (mod != 3) {
3627 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3628 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3629 } else {
3630 rm = (modrm & 7) | REX_B(s);
3631 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3632 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3633 }
3634 break;
3635 case 0x013: /* movlps */
3636 case 0x113: /* movlpd */
3637 if (mod != 3) {
3638 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3639 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3640 } else {
3641 goto illegal_op;
3642 }
3643 break;
3644 case 0x017: /* movhps */
3645 case 0x117: /* movhpd */
3646 if (mod != 3) {
3647 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3648 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3649 } else {
3650 goto illegal_op;
3651 }
3652 break;
3653 case 0x71: /* shift mm, im */
3654 case 0x72:
3655 case 0x73:
3656 case 0x171: /* shift xmm, im */
3657 case 0x172:
3658 case 0x173:
3659 if (b1 >= 2) {
3660 goto illegal_op;
3661 }
3662 val = ldub_code(s->pc++);
3663 if (is_xmm) {
3664 gen_op_movl_T0_im(val);
3665 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3666 gen_op_movl_T0_0();
3667 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3668 op1_offset = offsetof(CPUX86State,xmm_t0);
3669 } else {
3670 gen_op_movl_T0_im(val);
3671 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3672 gen_op_movl_T0_0();
3673 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3674 op1_offset = offsetof(CPUX86State,mmx_t0);
3675 }
3676 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3677 if (!sse_op2)
3678 goto illegal_op;
3679 if (is_xmm) {
3680 rm = (modrm & 7) | REX_B(s);
3681 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3682 } else {
3683 rm = (modrm & 7);
3684 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3685 }
3686 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3687 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3688 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3689 break;
3690 case 0x050: /* movmskps */
3691 rm = (modrm & 7) | REX_B(s);
3692 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3693 offsetof(CPUX86State,xmm_regs[rm]));
3694 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3695 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3696 gen_op_mov_reg_T0(OT_LONG, reg);
3697 break;
3698 case 0x150: /* movmskpd */
3699 rm = (modrm & 7) | REX_B(s);
3700 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3701 offsetof(CPUX86State,xmm_regs[rm]));
3702 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3703 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3704 gen_op_mov_reg_T0(OT_LONG, reg);
3705 break;
3706 case 0x02a: /* cvtpi2ps */
3707 case 0x12a: /* cvtpi2pd */
3708 gen_helper_enter_mmx();
3709 if (mod != 3) {
3710 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3711 op2_offset = offsetof(CPUX86State,mmx_t0);
3712 gen_ldq_env_A0(s->mem_index, op2_offset);
3713 } else {
3714 rm = (modrm & 7);
3715 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3716 }
3717 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3718 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3719 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3720 switch(b >> 8) {
3721 case 0x0:
3722 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3723 break;
3724 default:
3725 case 0x1:
3726 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3727 break;
3728 }
3729 break;
3730 case 0x22a: /* cvtsi2ss */
3731 case 0x32a: /* cvtsi2sd */
3732 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3733 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3734 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3735 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3736 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3737 if (ot == OT_LONG) {
3738 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3739 ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3740 } else {
3741 ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3742 }
3743 break;
3744 case 0x02c: /* cvttps2pi */
3745 case 0x12c: /* cvttpd2pi */
3746 case 0x02d: /* cvtps2pi */
3747 case 0x12d: /* cvtpd2pi */
3748 gen_helper_enter_mmx();
3749 if (mod != 3) {
3750 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3751 op2_offset = offsetof(CPUX86State,xmm_t0);
3752 gen_ldo_env_A0(s->mem_index, op2_offset);
3753 } else {
3754 rm = (modrm & 7) | REX_B(s);
3755 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3756 }
3757 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3758 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3759 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3760 switch(b) {
3761 case 0x02c:
3762 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3763 break;
3764 case 0x12c:
3765 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3766 break;
3767 case 0x02d:
3768 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3769 break;
3770 case 0x12d:
3771 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3772 break;
3773 }
3774 break;
3775 case 0x22c: /* cvttss2si */
3776 case 0x32c: /* cvttsd2si */
3777 case 0x22d: /* cvtss2si */
3778 case 0x32d: /* cvtsd2si */
3779 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3780 if (mod != 3) {
3781 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3782 if ((b >> 8) & 1) {
3783 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3784 } else {
3785 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3786 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3787 }
3788 op2_offset = offsetof(CPUX86State,xmm_t0);
3789 } else {
3790 rm = (modrm & 7) | REX_B(s);
3791 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3792 }
3793 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3794 (b & 1) * 4];
3795 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3796 if (ot == OT_LONG) {
3797 ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3798 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3799 } else {
3800 ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3801 }
3802 gen_op_mov_reg_T0(ot, reg);
3803 break;
3804 case 0xc4: /* pinsrw */
3805 case 0x1c4:
3806 s->rip_offset = 1;
3807 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3808 val = ldub_code(s->pc++);
3809 if (b1) {
3810 val &= 7;
3811 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3812 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3813 } else {
3814 val &= 3;
3815 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3816 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3817 }
3818 break;
3819 case 0xc5: /* pextrw */
3820 case 0x1c5:
3821 if (mod != 3)
3822 goto illegal_op;
3823 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3824 val = ldub_code(s->pc++);
3825 if (b1) {
3826 val &= 7;
3827 rm = (modrm & 7) | REX_B(s);
3828 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3829 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3830 } else {
3831 val &= 3;
3832 rm = (modrm & 7);
3833 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3834 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3835 }
3836 reg = ((modrm >> 3) & 7) | rex_r;
3837 gen_op_mov_reg_T0(ot, reg);
3838 break;
3839 case 0x1d6: /* movq ea, xmm */
3840 if (mod != 3) {
3841 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3842 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3843 } else {
3844 rm = (modrm & 7) | REX_B(s);
3845 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3846 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3847 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3848 }
3849 break;
3850 case 0x2d6: /* movq2dq */
3851 gen_helper_enter_mmx();
3852 rm = (modrm & 7);
3853 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3854 offsetof(CPUX86State,fpregs[rm].mmx));
3855 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3856 break;
3857 case 0x3d6: /* movdq2q */
3858 gen_helper_enter_mmx();
3859 rm = (modrm & 7) | REX_B(s);
3860 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3861 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3862 break;
3863 case 0xd7: /* pmovmskb */
3864 case 0x1d7:
3865 if (mod != 3)
3866 goto illegal_op;
3867 if (b1) {
3868 rm = (modrm & 7) | REX_B(s);
3869 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3870 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3871 } else {
3872 rm = (modrm & 7);
3873 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3874 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3875 }
3876 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3877 reg = ((modrm >> 3) & 7) | rex_r;
3878 gen_op_mov_reg_T0(OT_LONG, reg);
3879 break;
3880 case 0x138:
3881 if (s->prefix & PREFIX_REPNZ)
3882 goto crc32;
3883 case 0x038:
3884 b = modrm;
3885 modrm = ldub_code(s->pc++);
3886 rm = modrm & 7;
3887 reg = ((modrm >> 3) & 7) | rex_r;
3888 mod = (modrm >> 6) & 3;
3889 if (b1 >= 2) {
3890 goto illegal_op;
3891 }
3892
3893 sse_op2 = sse_op_table6[b].op[b1];
3894 if (!sse_op2)
3895 goto illegal_op;
3896 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3897 goto illegal_op;
3898
3899 if (b1) {
3900 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3901 if (mod == 3) {
3902 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3903 } else {
3904 op2_offset = offsetof(CPUX86State,xmm_t0);
3905 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3906 switch (b) {
3907 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3908 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3909 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3910 gen_ldq_env_A0(s->mem_index, op2_offset +
3911 offsetof(XMMReg, XMM_Q(0)));
3912 break;
3913 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3914 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3915 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3916 (s->mem_index >> 2) - 1);
3917 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3918 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3919 offsetof(XMMReg, XMM_L(0)));
3920 break;
3921 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3922 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3923 (s->mem_index >> 2) - 1);
3924 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3925 offsetof(XMMReg, XMM_W(0)));
3926 break;
3927 case 0x2a: /* movntqda */
3928 gen_ldo_env_A0(s->mem_index, op1_offset);
3929 return;
3930 default:
3931 gen_ldo_env_A0(s->mem_index, op2_offset);
3932 }
3933 }
3934 } else {
3935 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3936 if (mod == 3) {
3937 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3938 } else {
3939 op2_offset = offsetof(CPUX86State,mmx_t0);
3940 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3941 gen_ldq_env_A0(s->mem_index, op2_offset);
3942 }
3943 }
3944 if (sse_op2 == SSE_SPECIAL)
3945 goto illegal_op;
3946
3947 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3948 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3949 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3950
3951 if (b == 0x17)
3952 s->cc_op = CC_OP_EFLAGS;
3953 break;
3954 case 0x338: /* crc32 */
3955 crc32:
3956 b = modrm;
3957 modrm = ldub_code(s->pc++);
3958 reg = ((modrm >> 3) & 7) | rex_r;
3959
3960 if (b != 0xf0 && b != 0xf1)
3961 goto illegal_op;
3962 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3963 goto illegal_op;
3964
3965 if (b == 0xf0)
3966 ot = OT_BYTE;
3967 else if (b == 0xf1 && s->dflag != 2)
3968 if (s->prefix & PREFIX_DATA)
3969 ot = OT_WORD;
3970 else
3971 ot = OT_LONG;
3972 else
3973 ot = OT_QUAD;
3974
3975 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3976 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3977 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3978 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3979 cpu_T[0], tcg_const_i32(8 << ot));
3980
3981 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3982 gen_op_mov_reg_T0(ot, reg);
3983 break;
3984 case 0x03a:
3985 case 0x13a:
3986 b = modrm;
3987 modrm = ldub_code(s->pc++);
3988 rm = modrm & 7;
3989 reg = ((modrm >> 3) & 7) | rex_r;
3990 mod = (modrm >> 6) & 3;
3991 if (b1 >= 2) {
3992 goto illegal_op;
3993 }
3994
3995 sse_op2 = sse_op_table7[b].op[b1];
3996 if (!sse_op2)
3997 goto illegal_op;
3998 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3999 goto illegal_op;
4000
4001 if (sse_op2 == SSE_SPECIAL) {
4002 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
4003 rm = (modrm & 7) | REX_B(s);
4004 if (mod != 3)
4005 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4006 reg = ((modrm >> 3) & 7) | rex_r;
4007 val = ldub_code(s->pc++);
4008 switch (b) {
4009 case 0x14: /* pextrb */
4010 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4011 xmm_regs[reg].XMM_B(val & 15)));
4012 if (mod == 3)
4013 gen_op_mov_reg_T0(ot, rm);
4014 else
4015 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
4016 (s->mem_index >> 2) - 1);
4017 break;
4018 case 0x15: /* pextrw */
4019 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4020 xmm_regs[reg].XMM_W(val & 7)));
4021 if (mod == 3)
4022 gen_op_mov_reg_T0(ot, rm);
4023 else
4024 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
4025 (s->mem_index >> 2) - 1);
4026 break;
4027 case 0x16:
4028 if (ot == OT_LONG) { /* pextrd */
4029 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4030 offsetof(CPUX86State,
4031 xmm_regs[reg].XMM_L(val & 3)));
4032 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4033 if (mod == 3)
4034 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4035 else
4036 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4037 (s->mem_index >> 2) - 1);
4038 } else { /* pextrq */
4039#ifdef TARGET_X86_64
4040 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4041 offsetof(CPUX86State,
4042 xmm_regs[reg].XMM_Q(val & 1)));
4043 if (mod == 3)
4044 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4045 else
4046 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4047 (s->mem_index >> 2) - 1);
4048#else
4049 goto illegal_op;
4050#endif
4051 }
4052 break;
4053 case 0x17: /* extractps */
4054 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4055 xmm_regs[reg].XMM_L(val & 3)));
4056 if (mod == 3)
4057 gen_op_mov_reg_T0(ot, rm);
4058 else
4059 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4060 (s->mem_index >> 2) - 1);
4061 break;
4062 case 0x20: /* pinsrb */
4063 if (mod == 3)
4064 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4065 else
4066 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
4067 (s->mem_index >> 2) - 1);
4068 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
4069 xmm_regs[reg].XMM_B(val & 15)));
4070 break;
4071 case 0x21: /* insertps */
4072 if (mod == 3) {
4073 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4074 offsetof(CPUX86State,xmm_regs[rm]
4075 .XMM_L((val >> 6) & 3)));
4076 } else {
4077 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4078 (s->mem_index >> 2) - 1);
4079 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4080 }
4081 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4082 offsetof(CPUX86State,xmm_regs[reg]
4083 .XMM_L((val >> 4) & 3)));
4084 if ((val >> 0) & 1)
4085 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4086 cpu_env, offsetof(CPUX86State,
4087 xmm_regs[reg].XMM_L(0)));
4088 if ((val >> 1) & 1)
4089 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4090 cpu_env, offsetof(CPUX86State,
4091 xmm_regs[reg].XMM_L(1)));
4092 if ((val >> 2) & 1)
4093 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4094 cpu_env, offsetof(CPUX86State,
4095 xmm_regs[reg].XMM_L(2)));
4096 if ((val >> 3) & 1)
4097 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4098 cpu_env, offsetof(CPUX86State,
4099 xmm_regs[reg].XMM_L(3)));
4100 break;
4101 case 0x22:
4102 if (ot == OT_LONG) { /* pinsrd */
4103 if (mod == 3)
4104 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4105 else
4106 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4107 (s->mem_index >> 2) - 1);
4108 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4109 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4110 offsetof(CPUX86State,
4111 xmm_regs[reg].XMM_L(val & 3)));
4112 } else { /* pinsrq */
4113#ifdef TARGET_X86_64
4114 if (mod == 3)
4115 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4116 else
4117 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4118 (s->mem_index >> 2) - 1);
4119 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4120 offsetof(CPUX86State,
4121 xmm_regs[reg].XMM_Q(val & 1)));
4122#else
4123 goto illegal_op;
4124#endif
4125 }
4126 break;
4127 }
4128 return;
4129 }
4130
4131 if (b1) {
4132 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4133 if (mod == 3) {
4134 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4135 } else {
4136 op2_offset = offsetof(CPUX86State,xmm_t0);
4137 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4138 gen_ldo_env_A0(s->mem_index, op2_offset);
4139 }
4140 } else {
4141 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4142 if (mod == 3) {
4143 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4144 } else {
4145 op2_offset = offsetof(CPUX86State,mmx_t0);
4146 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4147 gen_ldq_env_A0(s->mem_index, op2_offset);
4148 }
4149 }
4150 val = ldub_code(s->pc++);
4151
4152 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4153 s->cc_op = CC_OP_EFLAGS;
4154
4155 if (s->dflag == 2)
4156 /* The helper must use entire 64-bit gp registers */
4157 val |= 1 << 8;
4158 }
4159
4160 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4161 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4162 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4163 break;
4164 default:
4165 goto illegal_op;
4166 }
4167 } else {
4168 /* generic MMX or SSE operation */
4169 switch(b) {
4170 case 0x70: /* pshufx insn */
4171 case 0xc6: /* pshufx insn */
4172 case 0xc2: /* compare insns */
4173 s->rip_offset = 1;
4174 break;
4175 default:
4176 break;
4177 }
4178 if (is_xmm) {
4179 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4180 if (mod != 3) {
4181 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4182 op2_offset = offsetof(CPUX86State,xmm_t0);
4183 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4184 b == 0xc2)) {
4185 /* specific case for SSE single instructions */
4186 if (b1 == 2) {
4187 /* 32 bit access */
4188 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4189 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4190 } else {
4191 /* 64 bit access */
4192 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4193 }
4194 } else {
4195 gen_ldo_env_A0(s->mem_index, op2_offset);
4196 }
4197 } else {
4198 rm = (modrm & 7) | REX_B(s);
4199 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4200 }
4201 } else {
4202 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4203 if (mod != 3) {
4204 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4205 op2_offset = offsetof(CPUX86State,mmx_t0);
4206 gen_ldq_env_A0(s->mem_index, op2_offset);
4207 } else {
4208 rm = (modrm & 7);
4209 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4210 }
4211 }
4212 switch(b) {
4213 case 0x0f: /* 3DNow! data insns */
4214 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4215 goto illegal_op;
4216 val = ldub_code(s->pc++);
4217 sse_op2 = sse_op_table5[val];
4218 if (!sse_op2)
4219 goto illegal_op;
4220 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4221 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4222 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4223 break;
4224 case 0x70: /* pshufx insn */
4225 case 0xc6: /* pshufx insn */
4226 val = ldub_code(s->pc++);
4227 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4228 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4229 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4230 break;
4231 case 0xc2:
4232 /* compare insns */
4233 val = ldub_code(s->pc++);
4234 if (val >= 8)
4235 goto illegal_op;
4236 sse_op2 = sse_op_table4[val][b1];
4237 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4238 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4239 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4240 break;
4241 case 0xf7:
4242 /* maskmov : we must prepare A0 */
4243 if (mod != 3)
4244 goto illegal_op;
4245#ifdef TARGET_X86_64
4246 if (s->aflag == 2) {
4247 gen_op_movq_A0_reg(R_EDI);
4248 } else
4249#endif
4250 {
4251 gen_op_movl_A0_reg(R_EDI);
4252 if (s->aflag == 0)
4253 gen_op_andl_A0_ffff();
4254 }
4255 gen_add_A0_ds_seg(s);
4256
4257 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4258 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4259 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
4260 break;
4261 default:
4262 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4263 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4264 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4265 break;
4266 }
4267 if (b == 0x2e || b == 0x2f) {
4268 s->cc_op = CC_OP_EFLAGS;
4269 }
4270 }
4271}
4272
4273#ifdef VBOX
4274/* Checks if it's an invalid lock sequence. Only a few instructions
4275 can be used together with the lock prefix and of those only the
4276 form that write a memory operand. So, this is kind of annoying
4277 work to do...
4278 The AMD manual lists the following instructions.
4279 ADC
4280 ADD
4281 AND
4282 BTC
4283 BTR
4284 BTS
4285 CMPXCHG
4286 CMPXCHG8B
4287 CMPXCHG16B
4288 DEC
4289 INC
4290 NEG
4291 NOT
4292 OR
4293 SBB
4294 SUB
4295 XADD
4296 XCHG
4297 XOR */
4298static bool is_invalid_lock_sequence(DisasContext *s, target_ulong pc_start, int b)
4299{
4300 target_ulong pc = s->pc;
4301 int modrm, mod, op;
4302
4303 /* X={8,16,32,64} Y={16,32,64} */
4304 switch (b)
4305 {
4306 /* /2: ADC reg/memX, immX */
4307 /* /0: ADD reg/memX, immX */
4308 /* /4: AND reg/memX, immX */
4309 /* /1: OR reg/memX, immX */
4310 /* /3: SBB reg/memX, immX */
4311 /* /5: SUB reg/memX, immX */
4312 /* /6: XOR reg/memX, immX */
4313 case 0x80:
4314 case 0x81:
4315 case 0x83:
4316 modrm = ldub_code(pc++);
4317 op = (modrm >> 3) & 7;
4318 if (op == 7) /* /7: CMP */
4319 break;
4320 mod = (modrm >> 6) & 3;
4321 if (mod == 3) /* register destination */
4322 break;
4323 return false;
4324
4325 case 0x10: /* /r: ADC reg/mem8, reg8 */
4326 case 0x11: /* /r: ADC reg/memX, regY */
4327 case 0x00: /* /r: ADD reg/mem8, reg8 */
4328 case 0x01: /* /r: ADD reg/memX, regY */
4329 case 0x20: /* /r: AND reg/mem8, reg8 */
4330 case 0x21: /* /r: AND reg/memY, regY */
4331 case 0x08: /* /r: OR reg/mem8, reg8 */
4332 case 0x09: /* /r: OR reg/memY, regY */
4333 case 0x18: /* /r: SBB reg/mem8, reg8 */
4334 case 0x19: /* /r: SBB reg/memY, regY */
4335 case 0x28: /* /r: SUB reg/mem8, reg8 */
4336 case 0x29: /* /r: SUB reg/memY, regY */
4337 case 0x86: /* /r: XCHG reg/mem8, reg8 or XCHG reg8, reg/mem8 */
4338 case 0x87: /* /r: XCHG reg/memY, regY or XCHG regY, reg/memY */
4339 case 0x30: /* /r: XOR reg/mem8, reg8 */
4340 case 0x31: /* /r: XOR reg/memY, regY */
4341 modrm = ldub_code(pc++);
4342 mod = (modrm >> 6) & 3;
4343 if (mod == 3) /* register destination */
4344 break;
4345 return false;
4346
4347 /* /1: DEC reg/memX */
4348 /* /0: INC reg/memX */
4349 case 0xfe:
4350 case 0xff:
4351 modrm = ldub_code(pc++);
4352 mod = (modrm >> 6) & 3;
4353 if (mod == 3) /* register destination */
4354 break;
4355 return false;
4356
4357 /* /3: NEG reg/memX */
4358 /* /2: NOT reg/memX */
4359 case 0xf6:
4360 case 0xf7:
4361 modrm = ldub_code(pc++);
4362 mod = (modrm >> 6) & 3;
4363 if (mod == 3) /* register destination */
4364 break;
4365 return false;
4366
4367 case 0x0f:
4368 b = ldub_code(pc++);
4369 switch (b)
4370 {
4371 /* /7: BTC reg/memY, imm8 */
4372 /* /6: BTR reg/memY, imm8 */
4373 /* /5: BTS reg/memY, imm8 */
4374 case 0xba:
4375 modrm = ldub_code(pc++);
4376 op = (modrm >> 3) & 7;
4377 if (op < 5)
4378 break;
4379 mod = (modrm >> 6) & 3;
4380 if (mod == 3) /* register destination */
4381 break;
4382 return false;
4383
4384 case 0xbb: /* /r: BTC reg/memY, regY */
4385 case 0xb3: /* /r: BTR reg/memY, regY */
4386 case 0xab: /* /r: BTS reg/memY, regY */
4387 case 0xb0: /* /r: CMPXCHG reg/mem8, reg8 */
4388 case 0xb1: /* /r: CMPXCHG reg/memY, regY */
4389 case 0xc0: /* /r: XADD reg/mem8, reg8 */
4390 case 0xc1: /* /r: XADD reg/memY, regY */
4391 modrm = ldub_code(pc++);
4392 mod = (modrm >> 6) & 3;
4393 if (mod == 3) /* register destination */
4394 break;
4395 return false;
4396
4397 /* /1: CMPXCHG8B mem64 or CMPXCHG16B mem128 */
4398 case 0xc7:
4399 modrm = ldub_code(pc++);
4400 op = (modrm >> 3) & 7;
4401 if (op != 1)
4402 break;
4403 return false;
4404 }
4405 break;
4406 }
4407
4408 /* illegal sequence. The s->pc is past the lock prefix and that
4409 is sufficient for the TB, I think. */
4410 Log(("illegal lock sequence %RGv (b=%#x)\n", pc_start, b));
4411 return true;
4412}
4413#endif /* VBOX */
4414
4415/* convert one instruction. s->is_jmp is set if the translation must
4416 be stopped. Return the next pc value */
4417static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4418{
4419 int b, prefixes, aflag, dflag;
4420 int shift, ot;
4421 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4422 target_ulong next_eip, tval;
4423 int rex_w, rex_r;
4424
4425 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4426 tcg_gen_debug_insn_start(pc_start);
4427 s->pc = pc_start;
4428 prefixes = 0;
4429 aflag = s->code32;
4430 dflag = s->code32;
4431 s->override = -1;
4432 rex_w = -1;
4433 rex_r = 0;
4434#ifdef TARGET_X86_64
4435 s->rex_x = 0;
4436 s->rex_b = 0;
4437 x86_64_hregs = 0;
4438#endif
4439 s->rip_offset = 0; /* for relative ip address */
4440#ifdef VBOX
4441 /* nike: seems only slow down things */
4442# if 0
4443 /* Always update EIP. Otherwise one must be very careful with generated code that can raise exceptions. */
4444
4445 gen_update_eip(pc_start - s->cs_base);
4446# endif
4447#endif /* VBOX */
4448
4449 next_byte:
4450 b = ldub_code(s->pc);
4451 s->pc++;
4452 /* check prefixes */
4453#ifdef TARGET_X86_64
4454 if (CODE64(s)) {
4455 switch (b) {
4456 case 0xf3:
4457 prefixes |= PREFIX_REPZ;
4458 goto next_byte;
4459 case 0xf2:
4460 prefixes |= PREFIX_REPNZ;
4461 goto next_byte;
4462 case 0xf0:
4463 prefixes |= PREFIX_LOCK;
4464 goto next_byte;
4465 case 0x2e:
4466 s->override = R_CS;
4467 goto next_byte;
4468 case 0x36:
4469 s->override = R_SS;
4470 goto next_byte;
4471 case 0x3e:
4472 s->override = R_DS;
4473 goto next_byte;
4474 case 0x26:
4475 s->override = R_ES;
4476 goto next_byte;
4477 case 0x64:
4478 s->override = R_FS;
4479 goto next_byte;
4480 case 0x65:
4481 s->override = R_GS;
4482 goto next_byte;
4483 case 0x66:
4484 prefixes |= PREFIX_DATA;
4485 goto next_byte;
4486 case 0x67:
4487 prefixes |= PREFIX_ADR;
4488 goto next_byte;
4489 case 0x40 ... 0x4f:
4490 /* REX prefix */
4491 rex_w = (b >> 3) & 1;
4492 rex_r = (b & 0x4) << 1;
4493 s->rex_x = (b & 0x2) << 2;
4494 REX_B(s) = (b & 0x1) << 3;
4495 x86_64_hregs = 1; /* select uniform byte register addressing */
4496 goto next_byte;
4497 }
4498 if (rex_w == 1) {
4499 /* 0x66 is ignored if rex.w is set */
4500 dflag = 2;
4501 } else {
4502 if (prefixes & PREFIX_DATA)
4503 dflag ^= 1;
4504 }
4505 if (!(prefixes & PREFIX_ADR))
4506 aflag = 2;
4507 } else
4508#endif
4509 {
4510 switch (b) {
4511 case 0xf3:
4512 prefixes |= PREFIX_REPZ;
4513 goto next_byte;
4514 case 0xf2:
4515 prefixes |= PREFIX_REPNZ;
4516 goto next_byte;
4517 case 0xf0:
4518 prefixes |= PREFIX_LOCK;
4519 goto next_byte;
4520 case 0x2e:
4521 s->override = R_CS;
4522 goto next_byte;
4523 case 0x36:
4524 s->override = R_SS;
4525 goto next_byte;
4526 case 0x3e:
4527 s->override = R_DS;
4528 goto next_byte;
4529 case 0x26:
4530 s->override = R_ES;
4531 goto next_byte;
4532 case 0x64:
4533 s->override = R_FS;
4534 goto next_byte;
4535 case 0x65:
4536 s->override = R_GS;
4537 goto next_byte;
4538 case 0x66:
4539 prefixes |= PREFIX_DATA;
4540 goto next_byte;
4541 case 0x67:
4542 prefixes |= PREFIX_ADR;
4543 goto next_byte;
4544 }
4545 if (prefixes & PREFIX_DATA)
4546 dflag ^= 1;
4547 if (prefixes & PREFIX_ADR)
4548 aflag ^= 1;
4549 }
4550
4551 s->prefix = prefixes;
4552 s->aflag = aflag;
4553 s->dflag = dflag;
4554
4555 /* lock generation */
4556#ifndef VBOX
4557 if (prefixes & PREFIX_LOCK)
4558 gen_helper_lock();
4559#else /* VBOX */
4560 if (prefixes & PREFIX_LOCK) {
4561 if (is_invalid_lock_sequence(s, pc_start, b)) {
4562 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
4563 return s->pc;
4564 }
4565 gen_helper_lock();
4566 }
4567#endif /* VBOX */
4568
4569 /* now check op code */
4570 reswitch:
4571 switch(b) {
4572 case 0x0f:
4573 /**************************/
4574 /* extended op code */
4575 b = ldub_code(s->pc++) | 0x100;
4576 goto reswitch;
4577
4578 /**************************/
4579 /* arith & logic */
4580 case 0x00 ... 0x05:
4581 case 0x08 ... 0x0d:
4582 case 0x10 ... 0x15:
4583 case 0x18 ... 0x1d:
4584 case 0x20 ... 0x25:
4585 case 0x28 ... 0x2d:
4586 case 0x30 ... 0x35:
4587 case 0x38 ... 0x3d:
4588 {
4589 int op, f, val;
4590 op = (b >> 3) & 7;
4591 f = (b >> 1) & 3;
4592
4593 if ((b & 1) == 0)
4594 ot = OT_BYTE;
4595 else
4596 ot = dflag + OT_WORD;
4597
4598 switch(f) {
4599 case 0: /* OP Ev, Gv */
4600 modrm = ldub_code(s->pc++);
4601 reg = ((modrm >> 3) & 7) | rex_r;
4602 mod = (modrm >> 6) & 3;
4603 rm = (modrm & 7) | REX_B(s);
4604 if (mod != 3) {
4605 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4606 opreg = OR_TMP0;
4607 } else if (op == OP_XORL && rm == reg) {
4608 xor_zero:
4609 /* xor reg, reg optimisation */
4610 gen_op_movl_T0_0();
4611 s->cc_op = CC_OP_LOGICB + ot;
4612 gen_op_mov_reg_T0(ot, reg);
4613 gen_op_update1_cc();
4614 break;
4615 } else {
4616 opreg = rm;
4617 }
4618 gen_op_mov_TN_reg(ot, 1, reg);
4619 gen_op(s, op, ot, opreg);
4620 break;
4621 case 1: /* OP Gv, Ev */
4622 modrm = ldub_code(s->pc++);
4623 mod = (modrm >> 6) & 3;
4624 reg = ((modrm >> 3) & 7) | rex_r;
4625 rm = (modrm & 7) | REX_B(s);
4626 if (mod != 3) {
4627 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4628 gen_op_ld_T1_A0(ot + s->mem_index);
4629 } else if (op == OP_XORL && rm == reg) {
4630 goto xor_zero;
4631 } else {
4632 gen_op_mov_TN_reg(ot, 1, rm);
4633 }
4634 gen_op(s, op, ot, reg);
4635 break;
4636 case 2: /* OP A, Iv */
4637 val = insn_get(s, ot);
4638 gen_op_movl_T1_im(val);
4639 gen_op(s, op, ot, OR_EAX);
4640 break;
4641 }
4642 }
4643 break;
4644
4645 case 0x82:
4646 if (CODE64(s))
4647 goto illegal_op;
4648 case 0x80: /* GRP1 */
4649 case 0x81:
4650 case 0x83:
4651 {
4652 int val;
4653
4654 if ((b & 1) == 0)
4655 ot = OT_BYTE;
4656 else
4657 ot = dflag + OT_WORD;
4658
4659 modrm = ldub_code(s->pc++);
4660 mod = (modrm >> 6) & 3;
4661 rm = (modrm & 7) | REX_B(s);
4662 op = (modrm >> 3) & 7;
4663
4664 if (mod != 3) {
4665 if (b == 0x83)
4666 s->rip_offset = 1;
4667 else
4668 s->rip_offset = insn_const_size(ot);
4669 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4670 opreg = OR_TMP0;
4671 } else {
4672 opreg = rm;
4673 }
4674
4675 switch(b) {
4676 default:
4677 case 0x80:
4678 case 0x81:
4679 case 0x82:
4680 val = insn_get(s, ot);
4681 break;
4682 case 0x83:
4683 val = (int8_t)insn_get(s, OT_BYTE);
4684 break;
4685 }
4686 gen_op_movl_T1_im(val);
4687 gen_op(s, op, ot, opreg);
4688 }
4689 break;
4690
4691 /**************************/
4692 /* inc, dec, and other misc arith */
4693 case 0x40 ... 0x47: /* inc Gv */
4694 ot = dflag ? OT_LONG : OT_WORD;
4695 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4696 break;
4697 case 0x48 ... 0x4f: /* dec Gv */
4698 ot = dflag ? OT_LONG : OT_WORD;
4699 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4700 break;
4701 case 0xf6: /* GRP3 */
4702 case 0xf7:
4703 if ((b & 1) == 0)
4704 ot = OT_BYTE;
4705 else
4706 ot = dflag + OT_WORD;
4707
4708 modrm = ldub_code(s->pc++);
4709 mod = (modrm >> 6) & 3;
4710 rm = (modrm & 7) | REX_B(s);
4711 op = (modrm >> 3) & 7;
4712 if (mod != 3) {
4713 if (op == 0)
4714 s->rip_offset = insn_const_size(ot);
4715 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4716 gen_op_ld_T0_A0(ot + s->mem_index);
4717 } else {
4718 gen_op_mov_TN_reg(ot, 0, rm);
4719 }
4720
4721 switch(op) {
4722 case 0: /* test */
4723 val = insn_get(s, ot);
4724 gen_op_movl_T1_im(val);
4725 gen_op_testl_T0_T1_cc();
4726 s->cc_op = CC_OP_LOGICB + ot;
4727 break;
4728 case 2: /* not */
4729 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4730 if (mod != 3) {
4731 gen_op_st_T0_A0(ot + s->mem_index);
4732 } else {
4733 gen_op_mov_reg_T0(ot, rm);
4734 }
4735 break;
4736 case 3: /* neg */
4737 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4738 if (mod != 3) {
4739 gen_op_st_T0_A0(ot + s->mem_index);
4740 } else {
4741 gen_op_mov_reg_T0(ot, rm);
4742 }
4743 gen_op_update_neg_cc();
4744 s->cc_op = CC_OP_SUBB + ot;
4745 break;
4746 case 4: /* mul */
4747 switch(ot) {
4748 case OT_BYTE:
4749 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4750 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4751 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4752 /* XXX: use 32 bit mul which could be faster */
4753 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4754 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4755 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4756 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4757 s->cc_op = CC_OP_MULB;
4758 break;
4759 case OT_WORD:
4760 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4761 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4762 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4763 /* XXX: use 32 bit mul which could be faster */
4764 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4765 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4766 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4767 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4768 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4769 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4770 s->cc_op = CC_OP_MULW;
4771 break;
4772 default:
4773 case OT_LONG:
4774#ifdef TARGET_X86_64
4775 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4776 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4777 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4778 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4779 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4780 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4781 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4782 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4783 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4784#else
4785 {
4786 TCGv_i64 t0, t1;
4787 t0 = tcg_temp_new_i64();
4788 t1 = tcg_temp_new_i64();
4789 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4790 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4791 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4792 tcg_gen_mul_i64(t0, t0, t1);
4793 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4794 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4795 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4796 tcg_gen_shri_i64(t0, t0, 32);
4797 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4798 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4799 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4800 }
4801#endif
4802 s->cc_op = CC_OP_MULL;
4803 break;
4804#ifdef TARGET_X86_64
4805 case OT_QUAD:
4806 gen_helper_mulq_EAX_T0(cpu_T[0]);
4807 s->cc_op = CC_OP_MULQ;
4808 break;
4809#endif
4810 }
4811 break;
4812 case 5: /* imul */
4813 switch(ot) {
4814 case OT_BYTE:
4815 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4816 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4817 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4818 /* XXX: use 32 bit mul which could be faster */
4819 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4820 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4821 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4822 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4823 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4824 s->cc_op = CC_OP_MULB;
4825 break;
4826 case OT_WORD:
4827 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4828 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4829 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4830 /* XXX: use 32 bit mul which could be faster */
4831 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4832 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4833 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4834 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4835 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4836 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4837 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4838 s->cc_op = CC_OP_MULW;
4839 break;
4840 default:
4841 case OT_LONG:
4842#ifdef TARGET_X86_64
4843 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4844 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4845 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4846 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4847 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4848 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4849 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4850 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4851 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4852 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4853#else
4854 {
4855 TCGv_i64 t0, t1;
4856 t0 = tcg_temp_new_i64();
4857 t1 = tcg_temp_new_i64();
4858 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4859 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4860 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4861 tcg_gen_mul_i64(t0, t0, t1);
4862 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4863 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4864 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4865 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4866 tcg_gen_shri_i64(t0, t0, 32);
4867 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4868 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4869 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4870 }
4871#endif
4872 s->cc_op = CC_OP_MULL;
4873 break;
4874#ifdef TARGET_X86_64
4875 case OT_QUAD:
4876 gen_helper_imulq_EAX_T0(cpu_T[0]);
4877 s->cc_op = CC_OP_MULQ;
4878 break;
4879#endif
4880 }
4881 break;
4882 case 6: /* div */
4883 switch(ot) {
4884 case OT_BYTE:
4885 gen_jmp_im(pc_start - s->cs_base);
4886 gen_helper_divb_AL(cpu_T[0]);
4887 break;
4888 case OT_WORD:
4889 gen_jmp_im(pc_start - s->cs_base);
4890 gen_helper_divw_AX(cpu_T[0]);
4891 break;
4892 default:
4893 case OT_LONG:
4894 gen_jmp_im(pc_start - s->cs_base);
4895 gen_helper_divl_EAX(cpu_T[0]);
4896 break;
4897#ifdef TARGET_X86_64
4898 case OT_QUAD:
4899 gen_jmp_im(pc_start - s->cs_base);
4900 gen_helper_divq_EAX(cpu_T[0]);
4901 break;
4902#endif
4903 }
4904 break;
4905 case 7: /* idiv */
4906 switch(ot) {
4907 case OT_BYTE:
4908 gen_jmp_im(pc_start - s->cs_base);
4909 gen_helper_idivb_AL(cpu_T[0]);
4910 break;
4911 case OT_WORD:
4912 gen_jmp_im(pc_start - s->cs_base);
4913 gen_helper_idivw_AX(cpu_T[0]);
4914 break;
4915 default:
4916 case OT_LONG:
4917 gen_jmp_im(pc_start - s->cs_base);
4918 gen_helper_idivl_EAX(cpu_T[0]);
4919 break;
4920#ifdef TARGET_X86_64
4921 case OT_QUAD:
4922 gen_jmp_im(pc_start - s->cs_base);
4923 gen_helper_idivq_EAX(cpu_T[0]);
4924 break;
4925#endif
4926 }
4927 break;
4928 default:
4929 goto illegal_op;
4930 }
4931 break;
4932
4933 case 0xfe: /* GRP4 */
4934 case 0xff: /* GRP5 */
4935 if ((b & 1) == 0)
4936 ot = OT_BYTE;
4937 else
4938 ot = dflag + OT_WORD;
4939
4940 modrm = ldub_code(s->pc++);
4941 mod = (modrm >> 6) & 3;
4942 rm = (modrm & 7) | REX_B(s);
4943 op = (modrm >> 3) & 7;
4944 if (op >= 2 && b == 0xfe) {
4945 goto illegal_op;
4946 }
4947 if (CODE64(s)) {
4948 if (op == 2 || op == 4) {
4949 /* operand size for jumps is 64 bit */
4950 ot = OT_QUAD;
4951 } else if (op == 3 || op == 5) {
4952 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4953 } else if (op == 6) {
4954 /* default push size is 64 bit */
4955 ot = dflag ? OT_QUAD : OT_WORD;
4956 }
4957 }
4958 if (mod != 3) {
4959 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4960 if (op >= 2 && op != 3 && op != 5)
4961 gen_op_ld_T0_A0(ot + s->mem_index);
4962 } else {
4963 gen_op_mov_TN_reg(ot, 0, rm);
4964 }
4965
4966 switch(op) {
4967 case 0: /* inc Ev */
4968 if (mod != 3)
4969 opreg = OR_TMP0;
4970 else
4971 opreg = rm;
4972 gen_inc(s, ot, opreg, 1);
4973 break;
4974 case 1: /* dec Ev */
4975 if (mod != 3)
4976 opreg = OR_TMP0;
4977 else
4978 opreg = rm;
4979 gen_inc(s, ot, opreg, -1);
4980 break;
4981 case 2: /* call Ev */
4982 /* XXX: optimize if memory (no 'and' is necessary) */
4983#ifdef VBOX_WITH_CALL_RECORD
4984 if (s->record_call)
4985 gen_op_record_call();
4986#endif
4987 if (s->dflag == 0)
4988 gen_op_andl_T0_ffff();
4989 next_eip = s->pc - s->cs_base;
4990 gen_movtl_T1_im(next_eip);
4991 gen_push_T1(s);
4992 gen_op_jmp_T0();
4993 gen_eob(s);
4994 break;
4995 case 3: /* lcall Ev */
4996 gen_op_ld_T1_A0(ot + s->mem_index);
4997 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4998 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4999 do_lcall:
5000 if (s->pe && !s->vm86) {
5001 if (s->cc_op != CC_OP_DYNAMIC)
5002 gen_op_set_cc_op(s->cc_op);
5003 gen_jmp_im(pc_start - s->cs_base);
5004 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5005 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
5006 tcg_const_i32(dflag),
5007 tcg_const_i32(s->pc - pc_start));
5008 } else {
5009 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5010 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
5011 tcg_const_i32(dflag),
5012 tcg_const_i32(s->pc - s->cs_base));
5013 }
5014 gen_eob(s);
5015 break;
5016 case 4: /* jmp Ev */
5017 if (s->dflag == 0)
5018 gen_op_andl_T0_ffff();
5019 gen_op_jmp_T0();
5020 gen_eob(s);
5021 break;
5022 case 5: /* ljmp Ev */
5023 gen_op_ld_T1_A0(ot + s->mem_index);
5024 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5025 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5026 do_ljmp:
5027 if (s->pe && !s->vm86) {
5028 if (s->cc_op != CC_OP_DYNAMIC)
5029 gen_op_set_cc_op(s->cc_op);
5030 gen_jmp_im(pc_start - s->cs_base);
5031 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5032 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
5033 tcg_const_i32(s->pc - pc_start));
5034 } else {
5035 gen_op_movl_seg_T0_vm(R_CS);
5036 gen_op_movl_T0_T1();
5037 gen_op_jmp_T0();
5038 }
5039 gen_eob(s);
5040 break;
5041 case 6: /* push Ev */
5042 gen_push_T0(s);
5043 break;
5044 default:
5045 goto illegal_op;
5046 }
5047 break;
5048
5049 case 0x84: /* test Ev, Gv */
5050 case 0x85:
5051 if ((b & 1) == 0)
5052 ot = OT_BYTE;
5053 else
5054 ot = dflag + OT_WORD;
5055
5056 modrm = ldub_code(s->pc++);
5057 reg = ((modrm >> 3) & 7) | rex_r;
5058
5059 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5060 gen_op_mov_TN_reg(ot, 1, reg);
5061 gen_op_testl_T0_T1_cc();
5062 s->cc_op = CC_OP_LOGICB + ot;
5063 break;
5064
5065 case 0xa8: /* test eAX, Iv */
5066 case 0xa9:
5067 if ((b & 1) == 0)
5068 ot = OT_BYTE;
5069 else
5070 ot = dflag + OT_WORD;
5071 val = insn_get(s, ot);
5072
5073 gen_op_mov_TN_reg(ot, 0, OR_EAX);
5074 gen_op_movl_T1_im(val);
5075 gen_op_testl_T0_T1_cc();
5076 s->cc_op = CC_OP_LOGICB + ot;
5077 break;
5078
5079 case 0x98: /* CWDE/CBW */
5080#ifdef TARGET_X86_64
5081 if (dflag == 2) {
5082 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5083 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5084 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
5085 } else
5086#endif
5087 if (dflag == 1) {
5088 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5089 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5090 gen_op_mov_reg_T0(OT_LONG, R_EAX);
5091 } else {
5092 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
5093 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5094 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5095 }
5096 break;
5097 case 0x99: /* CDQ/CWD */
5098#ifdef TARGET_X86_64
5099 if (dflag == 2) {
5100 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5101 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5102 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
5103 } else
5104#endif
5105 if (dflag == 1) {
5106 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5107 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5108 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5109 gen_op_mov_reg_T0(OT_LONG, R_EDX);
5110 } else {
5111 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5112 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5113 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5114 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5115 }
5116 break;
5117 case 0x1af: /* imul Gv, Ev */
5118 case 0x69: /* imul Gv, Ev, I */
5119 case 0x6b:
5120 ot = dflag + OT_WORD;
5121 modrm = ldub_code(s->pc++);
5122 reg = ((modrm >> 3) & 7) | rex_r;
5123 if (b == 0x69)
5124 s->rip_offset = insn_const_size(ot);
5125 else if (b == 0x6b)
5126 s->rip_offset = 1;
5127 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5128 if (b == 0x69) {
5129 val = insn_get(s, ot);
5130 gen_op_movl_T1_im(val);
5131 } else if (b == 0x6b) {
5132 val = (int8_t)insn_get(s, OT_BYTE);
5133 gen_op_movl_T1_im(val);
5134 } else {
5135 gen_op_mov_TN_reg(ot, 1, reg);
5136 }
5137
5138#ifdef TARGET_X86_64
5139 if (ot == OT_QUAD) {
5140 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
5141 } else
5142#endif
5143 if (ot == OT_LONG) {
5144#ifdef TARGET_X86_64
5145 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5146 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
5147 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5148 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5149 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
5150 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5151#else
5152 {
5153 TCGv_i64 t0, t1;
5154 t0 = tcg_temp_new_i64();
5155 t1 = tcg_temp_new_i64();
5156 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
5157 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
5158 tcg_gen_mul_i64(t0, t0, t1);
5159 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
5160 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5161 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
5162 tcg_gen_shri_i64(t0, t0, 32);
5163 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
5164 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
5165 }
5166#endif
5167 } else {
5168 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5169 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5170 /* XXX: use 32 bit mul which could be faster */
5171 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5172 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5173 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5174 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5175 }
5176 gen_op_mov_reg_T0(ot, reg);
5177 s->cc_op = CC_OP_MULB + ot;
5178 break;
5179 case 0x1c0:
5180 case 0x1c1: /* xadd Ev, Gv */
5181 if ((b & 1) == 0)
5182 ot = OT_BYTE;
5183 else
5184 ot = dflag + OT_WORD;
5185 modrm = ldub_code(s->pc++);
5186 reg = ((modrm >> 3) & 7) | rex_r;
5187 mod = (modrm >> 6) & 3;
5188 if (mod == 3) {
5189 rm = (modrm & 7) | REX_B(s);
5190 gen_op_mov_TN_reg(ot, 0, reg);
5191 gen_op_mov_TN_reg(ot, 1, rm);
5192 gen_op_addl_T0_T1();
5193 gen_op_mov_reg_T1(ot, reg);
5194 gen_op_mov_reg_T0(ot, rm);
5195 } else {
5196 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5197 gen_op_mov_TN_reg(ot, 0, reg);
5198 gen_op_ld_T1_A0(ot + s->mem_index);
5199 gen_op_addl_T0_T1();
5200 gen_op_st_T0_A0(ot + s->mem_index);
5201 gen_op_mov_reg_T1(ot, reg);
5202 }
5203 gen_op_update2_cc();
5204 s->cc_op = CC_OP_ADDB + ot;
5205 break;
5206 case 0x1b0:
5207 case 0x1b1: /* cmpxchg Ev, Gv */
5208 {
5209 int label1, label2;
5210 TCGv t0, t1, t2, a0;
5211
5212 if ((b & 1) == 0)
5213 ot = OT_BYTE;
5214 else
5215 ot = dflag + OT_WORD;
5216 modrm = ldub_code(s->pc++);
5217 reg = ((modrm >> 3) & 7) | rex_r;
5218 mod = (modrm >> 6) & 3;
5219 t0 = tcg_temp_local_new();
5220 t1 = tcg_temp_local_new();
5221 t2 = tcg_temp_local_new();
5222 a0 = tcg_temp_local_new();
5223 gen_op_mov_v_reg(ot, t1, reg);
5224 if (mod == 3) {
5225 rm = (modrm & 7) | REX_B(s);
5226 gen_op_mov_v_reg(ot, t0, rm);
5227 } else {
5228 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5229 tcg_gen_mov_tl(a0, cpu_A0);
5230 gen_op_ld_v(ot + s->mem_index, t0, a0);
5231 rm = 0; /* avoid warning */
5232 }
5233 label1 = gen_new_label();
5234 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
5235 gen_extu(ot, t2);
5236 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
5237 if (mod == 3) {
5238 label2 = gen_new_label();
5239 gen_op_mov_reg_v(ot, R_EAX, t0);
5240 tcg_gen_br(label2);
5241 gen_set_label(label1);
5242 gen_op_mov_reg_v(ot, rm, t1);
5243 gen_set_label(label2);
5244 } else {
5245 tcg_gen_mov_tl(t1, t0);
5246 gen_op_mov_reg_v(ot, R_EAX, t0);
5247 gen_set_label(label1);
5248 /* always store */
5249 gen_op_st_v(ot + s->mem_index, t1, a0);
5250 }
5251 tcg_gen_mov_tl(cpu_cc_src, t0);
5252 tcg_gen_mov_tl(cpu_cc_dst, t2);
5253 s->cc_op = CC_OP_SUBB + ot;
5254 tcg_temp_free(t0);
5255 tcg_temp_free(t1);
5256 tcg_temp_free(t2);
5257 tcg_temp_free(a0);
5258 }
5259 break;
5260 case 0x1c7: /* cmpxchg8b */
5261 modrm = ldub_code(s->pc++);
5262 mod = (modrm >> 6) & 3;
5263 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5264 goto illegal_op;
5265#ifdef TARGET_X86_64
5266 if (dflag == 2) {
5267 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5268 goto illegal_op;
5269 gen_jmp_im(pc_start - s->cs_base);
5270 if (s->cc_op != CC_OP_DYNAMIC)
5271 gen_op_set_cc_op(s->cc_op);
5272 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5273 gen_helper_cmpxchg16b(cpu_A0);
5274 } else
5275#endif
5276 {
5277 if (!(s->cpuid_features & CPUID_CX8))
5278 goto illegal_op;
5279 gen_jmp_im(pc_start - s->cs_base);
5280 if (s->cc_op != CC_OP_DYNAMIC)
5281 gen_op_set_cc_op(s->cc_op);
5282 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5283 gen_helper_cmpxchg8b(cpu_A0);
5284 }
5285 s->cc_op = CC_OP_EFLAGS;
5286 break;
5287
5288 /**************************/
5289 /* push/pop */
5290 case 0x50 ... 0x57: /* push */
5291 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5292 gen_push_T0(s);
5293 break;
5294 case 0x58 ... 0x5f: /* pop */
5295 if (CODE64(s)) {
5296 ot = dflag ? OT_QUAD : OT_WORD;
5297 } else {
5298 ot = dflag + OT_WORD;
5299 }
5300 gen_pop_T0(s);
5301 /* NOTE: order is important for pop %sp */
5302 gen_pop_update(s);
5303 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5304 break;
5305 case 0x60: /* pusha */
5306 if (CODE64(s))
5307 goto illegal_op;
5308 gen_pusha(s);
5309 break;
5310 case 0x61: /* popa */
5311 if (CODE64(s))
5312 goto illegal_op;
5313 gen_popa(s);
5314 break;
5315 case 0x68: /* push Iv */
5316 case 0x6a:
5317 if (CODE64(s)) {
5318 ot = dflag ? OT_QUAD : OT_WORD;
5319 } else {
5320 ot = dflag + OT_WORD;
5321 }
5322 if (b == 0x68)
5323 val = insn_get(s, ot);
5324 else
5325 val = (int8_t)insn_get(s, OT_BYTE);
5326 gen_op_movl_T0_im(val);
5327 gen_push_T0(s);
5328 break;
5329 case 0x8f: /* pop Ev */
5330 if (CODE64(s)) {
5331 ot = dflag ? OT_QUAD : OT_WORD;
5332 } else {
5333 ot = dflag + OT_WORD;
5334 }
5335 modrm = ldub_code(s->pc++);
5336 mod = (modrm >> 6) & 3;
5337 gen_pop_T0(s);
5338 if (mod == 3) {
5339 /* NOTE: order is important for pop %sp */
5340 gen_pop_update(s);
5341 rm = (modrm & 7) | REX_B(s);
5342 gen_op_mov_reg_T0(ot, rm);
5343 } else {
5344 /* NOTE: order is important too for MMU exceptions */
5345 s->popl_esp_hack = 1 << ot;
5346 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5347 s->popl_esp_hack = 0;
5348 gen_pop_update(s);
5349 }
5350 break;
5351 case 0xc8: /* enter */
5352 {
5353 int level;
5354 val = lduw_code(s->pc);
5355 s->pc += 2;
5356 level = ldub_code(s->pc++);
5357 gen_enter(s, val, level);
5358 }
5359 break;
5360 case 0xc9: /* leave */
5361 /* XXX: exception not precise (ESP is updated before potential exception) */
5362 if (CODE64(s)) {
5363 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5364 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5365 } else if (s->ss32) {
5366 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5367 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5368 } else {
5369 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5370 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5371 }
5372 gen_pop_T0(s);
5373 if (CODE64(s)) {
5374 ot = dflag ? OT_QUAD : OT_WORD;
5375 } else {
5376 ot = dflag + OT_WORD;
5377 }
5378 gen_op_mov_reg_T0(ot, R_EBP);
5379 gen_pop_update(s);
5380 break;
5381 case 0x06: /* push es */
5382 case 0x0e: /* push cs */
5383 case 0x16: /* push ss */
5384 case 0x1e: /* push ds */
5385 if (CODE64(s))
5386 goto illegal_op;
5387 gen_op_movl_T0_seg(b >> 3);
5388 gen_push_T0(s);
5389 break;
5390 case 0x1a0: /* push fs */
5391 case 0x1a8: /* push gs */
5392 gen_op_movl_T0_seg((b >> 3) & 7);
5393 gen_push_T0(s);
5394 break;
5395 case 0x07: /* pop es */
5396 case 0x17: /* pop ss */
5397 case 0x1f: /* pop ds */
5398 if (CODE64(s))
5399 goto illegal_op;
5400 reg = b >> 3;
5401 gen_pop_T0(s);
5402 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5403 gen_pop_update(s);
5404 if (reg == R_SS) {
5405 /* if reg == SS, inhibit interrupts/trace. */
5406 /* If several instructions disable interrupts, only the
5407 _first_ does it */
5408 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5409 gen_helper_set_inhibit_irq();
5410 s->tf = 0;
5411 }
5412 if (s->is_jmp) {
5413 gen_jmp_im(s->pc - s->cs_base);
5414 gen_eob(s);
5415 }
5416 break;
5417 case 0x1a1: /* pop fs */
5418 case 0x1a9: /* pop gs */
5419 gen_pop_T0(s);
5420 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5421 gen_pop_update(s);
5422 if (s->is_jmp) {
5423 gen_jmp_im(s->pc - s->cs_base);
5424 gen_eob(s);
5425 }
5426 break;
5427
5428 /**************************/
5429 /* mov */
5430 case 0x88:
5431 case 0x89: /* mov Gv, Ev */
5432 if ((b & 1) == 0)
5433 ot = OT_BYTE;
5434 else
5435 ot = dflag + OT_WORD;
5436 modrm = ldub_code(s->pc++);
5437 reg = ((modrm >> 3) & 7) | rex_r;
5438
5439 /* generate a generic store */
5440 gen_ldst_modrm(s, modrm, ot, reg, 1);
5441 break;
5442 case 0xc6:
5443 case 0xc7: /* mov Ev, Iv */
5444 if ((b & 1) == 0)
5445 ot = OT_BYTE;
5446 else
5447 ot = dflag + OT_WORD;
5448 modrm = ldub_code(s->pc++);
5449 mod = (modrm >> 6) & 3;
5450 if (mod != 3) {
5451 s->rip_offset = insn_const_size(ot);
5452 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5453 }
5454 val = insn_get(s, ot);
5455 gen_op_movl_T0_im(val);
5456 if (mod != 3)
5457 gen_op_st_T0_A0(ot + s->mem_index);
5458 else
5459 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5460 break;
5461 case 0x8a:
5462 case 0x8b: /* mov Ev, Gv */
5463#ifdef VBOX /* dtrace hot fix */
5464 if (prefixes & PREFIX_LOCK)
5465 goto illegal_op;
5466#endif
5467 if ((b & 1) == 0)
5468 ot = OT_BYTE;
5469 else
5470 ot = OT_WORD + dflag;
5471 modrm = ldub_code(s->pc++);
5472 reg = ((modrm >> 3) & 7) | rex_r;
5473
5474 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5475 gen_op_mov_reg_T0(ot, reg);
5476 break;
5477 case 0x8e: /* mov seg, Gv */
5478 modrm = ldub_code(s->pc++);
5479 reg = (modrm >> 3) & 7;
5480 if (reg >= 6 || reg == R_CS)
5481 goto illegal_op;
5482 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5483 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5484 if (reg == R_SS) {
5485 /* if reg == SS, inhibit interrupts/trace */
5486 /* If several instructions disable interrupts, only the
5487 _first_ does it */
5488 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5489 gen_helper_set_inhibit_irq();
5490 s->tf = 0;
5491 }
5492 if (s->is_jmp) {
5493 gen_jmp_im(s->pc - s->cs_base);
5494 gen_eob(s);
5495 }
5496 break;
5497 case 0x8c: /* mov Gv, seg */
5498 modrm = ldub_code(s->pc++);
5499 reg = (modrm >> 3) & 7;
5500 mod = (modrm >> 6) & 3;
5501 if (reg >= 6)
5502 goto illegal_op;
5503 gen_op_movl_T0_seg(reg);
5504 if (mod == 3)
5505 ot = OT_WORD + dflag;
5506 else
5507 ot = OT_WORD;
5508 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5509 break;
5510
5511 case 0x1b6: /* movzbS Gv, Eb */
5512 case 0x1b7: /* movzwS Gv, Eb */
5513 case 0x1be: /* movsbS Gv, Eb */
5514 case 0x1bf: /* movswS Gv, Eb */
5515 {
5516 int d_ot;
5517 /* d_ot is the size of destination */
5518 d_ot = dflag + OT_WORD;
5519 /* ot is the size of source */
5520 ot = (b & 1) + OT_BYTE;
5521 modrm = ldub_code(s->pc++);
5522 reg = ((modrm >> 3) & 7) | rex_r;
5523 mod = (modrm >> 6) & 3;
5524 rm = (modrm & 7) | REX_B(s);
5525
5526 if (mod == 3) {
5527 gen_op_mov_TN_reg(ot, 0, rm);
5528 switch(ot | (b & 8)) {
5529 case OT_BYTE:
5530 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5531 break;
5532 case OT_BYTE | 8:
5533 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5534 break;
5535 case OT_WORD:
5536 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5537 break;
5538 default:
5539 case OT_WORD | 8:
5540 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5541 break;
5542 }
5543 gen_op_mov_reg_T0(d_ot, reg);
5544 } else {
5545 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5546 if (b & 8) {
5547 gen_op_lds_T0_A0(ot + s->mem_index);
5548 } else {
5549 gen_op_ldu_T0_A0(ot + s->mem_index);
5550 }
5551 gen_op_mov_reg_T0(d_ot, reg);
5552 }
5553 }
5554 break;
5555
5556 case 0x8d: /* lea */
5557 ot = dflag + OT_WORD;
5558 modrm = ldub_code(s->pc++);
5559 mod = (modrm >> 6) & 3;
5560 if (mod == 3)
5561 goto illegal_op;
5562 reg = ((modrm >> 3) & 7) | rex_r;
5563 /* we must ensure that no segment is added */
5564 s->override = -1;
5565 val = s->addseg;
5566 s->addseg = 0;
5567 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5568 s->addseg = val;
5569 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5570 break;
5571
5572 case 0xa0: /* mov EAX, Ov */
5573 case 0xa1:
5574 case 0xa2: /* mov Ov, EAX */
5575 case 0xa3:
5576 {
5577 target_ulong offset_addr;
5578
5579 if ((b & 1) == 0)
5580 ot = OT_BYTE;
5581 else
5582 ot = dflag + OT_WORD;
5583#ifdef TARGET_X86_64
5584 if (s->aflag == 2) {
5585 offset_addr = ldq_code(s->pc);
5586 s->pc += 8;
5587 gen_op_movq_A0_im(offset_addr);
5588 } else
5589#endif
5590 {
5591 if (s->aflag) {
5592 offset_addr = insn_get(s, OT_LONG);
5593 } else {
5594 offset_addr = insn_get(s, OT_WORD);
5595 }
5596 gen_op_movl_A0_im(offset_addr);
5597 }
5598 gen_add_A0_ds_seg(s);
5599 if ((b & 2) == 0) {
5600 gen_op_ld_T0_A0(ot + s->mem_index);
5601 gen_op_mov_reg_T0(ot, R_EAX);
5602 } else {
5603 gen_op_mov_TN_reg(ot, 0, R_EAX);
5604 gen_op_st_T0_A0(ot + s->mem_index);
5605 }
5606 }
5607 break;
5608 case 0xd7: /* xlat */
5609#ifdef TARGET_X86_64
5610 if (s->aflag == 2) {
5611 gen_op_movq_A0_reg(R_EBX);
5612 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5613 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5614 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5615 } else
5616#endif
5617 {
5618 gen_op_movl_A0_reg(R_EBX);
5619 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5620 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5621 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5622 if (s->aflag == 0)
5623 gen_op_andl_A0_ffff();
5624 else
5625 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5626 }
5627 gen_add_A0_ds_seg(s);
5628 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5629 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5630 break;
5631 case 0xb0 ... 0xb7: /* mov R, Ib */
5632 val = insn_get(s, OT_BYTE);
5633 gen_op_movl_T0_im(val);
5634 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5635 break;
5636 case 0xb8 ... 0xbf: /* mov R, Iv */
5637#ifdef TARGET_X86_64
5638 if (dflag == 2) {
5639 uint64_t tmp;
5640 /* 64 bit case */
5641 tmp = ldq_code(s->pc);
5642 s->pc += 8;
5643 reg = (b & 7) | REX_B(s);
5644 gen_movtl_T0_im(tmp);
5645 gen_op_mov_reg_T0(OT_QUAD, reg);
5646 } else
5647#endif
5648 {
5649 ot = dflag ? OT_LONG : OT_WORD;
5650 val = insn_get(s, ot);
5651 reg = (b & 7) | REX_B(s);
5652 gen_op_movl_T0_im(val);
5653 gen_op_mov_reg_T0(ot, reg);
5654 }
5655 break;
5656
5657 case 0x91 ... 0x97: /* xchg R, EAX */
5658 do_xchg_reg_eax:
5659 ot = dflag + OT_WORD;
5660 reg = (b & 7) | REX_B(s);
5661 rm = R_EAX;
5662 goto do_xchg_reg;
5663 case 0x86:
5664 case 0x87: /* xchg Ev, Gv */
5665 if ((b & 1) == 0)
5666 ot = OT_BYTE;
5667 else
5668 ot = dflag + OT_WORD;
5669 modrm = ldub_code(s->pc++);
5670 reg = ((modrm >> 3) & 7) | rex_r;
5671 mod = (modrm >> 6) & 3;
5672 if (mod == 3) {
5673 rm = (modrm & 7) | REX_B(s);
5674 do_xchg_reg:
5675 gen_op_mov_TN_reg(ot, 0, reg);
5676 gen_op_mov_TN_reg(ot, 1, rm);
5677 gen_op_mov_reg_T0(ot, rm);
5678 gen_op_mov_reg_T1(ot, reg);
5679 } else {
5680 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5681 gen_op_mov_TN_reg(ot, 0, reg);
5682 /* for xchg, lock is implicit */
5683 if (!(prefixes & PREFIX_LOCK))
5684 gen_helper_lock();
5685 gen_op_ld_T1_A0(ot + s->mem_index);
5686 gen_op_st_T0_A0(ot + s->mem_index);
5687 if (!(prefixes & PREFIX_LOCK))
5688 gen_helper_unlock();
5689 gen_op_mov_reg_T1(ot, reg);
5690 }
5691 break;
5692 case 0xc4: /* les Gv */
5693 if (CODE64(s))
5694 goto illegal_op;
5695 op = R_ES;
5696 goto do_lxx;
5697 case 0xc5: /* lds Gv */
5698 if (CODE64(s))
5699 goto illegal_op;
5700 op = R_DS;
5701 goto do_lxx;
5702 case 0x1b2: /* lss Gv */
5703 op = R_SS;
5704 goto do_lxx;
5705 case 0x1b4: /* lfs Gv */
5706 op = R_FS;
5707 goto do_lxx;
5708 case 0x1b5: /* lgs Gv */
5709 op = R_GS;
5710 do_lxx:
5711 ot = dflag ? OT_LONG : OT_WORD;
5712 modrm = ldub_code(s->pc++);
5713 reg = ((modrm >> 3) & 7) | rex_r;
5714 mod = (modrm >> 6) & 3;
5715 if (mod == 3)
5716 goto illegal_op;
5717 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5718 gen_op_ld_T1_A0(ot + s->mem_index);
5719 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5720 /* load the segment first to handle exceptions properly */
5721 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5722 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5723 /* then put the data */
5724 gen_op_mov_reg_T1(ot, reg);
5725 if (s->is_jmp) {
5726 gen_jmp_im(s->pc - s->cs_base);
5727 gen_eob(s);
5728 }
5729 break;
5730
5731 /************************/
5732 /* shifts */
5733 case 0xc0:
5734 case 0xc1:
5735 /* shift Ev,Ib */
5736 shift = 2;
5737 grp2:
5738 {
5739 if ((b & 1) == 0)
5740 ot = OT_BYTE;
5741 else
5742 ot = dflag + OT_WORD;
5743
5744 modrm = ldub_code(s->pc++);
5745 mod = (modrm >> 6) & 3;
5746 op = (modrm >> 3) & 7;
5747
5748 if (mod != 3) {
5749 if (shift == 2) {
5750 s->rip_offset = 1;
5751 }
5752 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5753 opreg = OR_TMP0;
5754 } else {
5755 opreg = (modrm & 7) | REX_B(s);
5756 }
5757
5758 /* simpler op */
5759 if (shift == 0) {
5760 gen_shift(s, op, ot, opreg, OR_ECX);
5761 } else {
5762 if (shift == 2) {
5763 shift = ldub_code(s->pc++);
5764 }
5765 gen_shifti(s, op, ot, opreg, shift);
5766 }
5767 }
5768 break;
5769 case 0xd0:
5770 case 0xd1:
5771 /* shift Ev,1 */
5772 shift = 1;
5773 goto grp2;
5774 case 0xd2:
5775 case 0xd3:
5776 /* shift Ev,cl */
5777 shift = 0;
5778 goto grp2;
5779
5780 case 0x1a4: /* shld imm */
5781 op = 0;
5782 shift = 1;
5783 goto do_shiftd;
5784 case 0x1a5: /* shld cl */
5785 op = 0;
5786 shift = 0;
5787 goto do_shiftd;
5788 case 0x1ac: /* shrd imm */
5789 op = 1;
5790 shift = 1;
5791 goto do_shiftd;
5792 case 0x1ad: /* shrd cl */
5793 op = 1;
5794 shift = 0;
5795 do_shiftd:
5796 ot = dflag + OT_WORD;
5797 modrm = ldub_code(s->pc++);
5798 mod = (modrm >> 6) & 3;
5799 rm = (modrm & 7) | REX_B(s);
5800 reg = ((modrm >> 3) & 7) | rex_r;
5801 if (mod != 3) {
5802 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5803 opreg = OR_TMP0;
5804 } else {
5805 opreg = rm;
5806 }
5807 gen_op_mov_TN_reg(ot, 1, reg);
5808
5809 if (shift) {
5810 val = ldub_code(s->pc++);
5811 tcg_gen_movi_tl(cpu_T3, val);
5812 } else {
5813 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
5814 }
5815 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5816 break;
5817
5818 /************************/
5819 /* floats */
5820 case 0xd8 ... 0xdf:
5821 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5822 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5823 /* XXX: what to do if illegal op ? */
5824 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5825 break;
5826 }
5827 modrm = ldub_code(s->pc++);
5828 mod = (modrm >> 6) & 3;
5829 rm = modrm & 7;
5830 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5831 if (mod != 3) {
5832 /* memory op */
5833 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5834 switch(op) {
5835 case 0x00 ... 0x07: /* fxxxs */
5836 case 0x10 ... 0x17: /* fixxxl */
5837 case 0x20 ... 0x27: /* fxxxl */
5838 case 0x30 ... 0x37: /* fixxx */
5839 {
5840 int op1;
5841 op1 = op & 7;
5842
5843 switch(op >> 4) {
5844 case 0:
5845 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5846 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5847 gen_helper_flds_FT0(cpu_tmp2_i32);
5848 break;
5849 case 1:
5850 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5851 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5852 gen_helper_fildl_FT0(cpu_tmp2_i32);
5853 break;
5854 case 2:
5855 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5856 (s->mem_index >> 2) - 1);
5857 gen_helper_fldl_FT0(cpu_tmp1_i64);
5858 break;
5859 case 3:
5860 default:
5861 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5862 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5863 gen_helper_fildl_FT0(cpu_tmp2_i32);
5864 break;
5865 }
5866
5867 gen_helper_fp_arith_ST0_FT0(op1);
5868 if (op1 == 3) {
5869 /* fcomp needs pop */
5870 gen_helper_fpop();
5871 }
5872 }
5873 break;
5874 case 0x08: /* flds */
5875 case 0x0a: /* fsts */
5876 case 0x0b: /* fstps */
5877 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5878 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5879 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5880 switch(op & 7) {
5881 case 0:
5882 switch(op >> 4) {
5883 case 0:
5884 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5885 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5886 gen_helper_flds_ST0(cpu_tmp2_i32);
5887 break;
5888 case 1:
5889 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5890 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5891 gen_helper_fildl_ST0(cpu_tmp2_i32);
5892 break;
5893 case 2:
5894 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5895 (s->mem_index >> 2) - 1);
5896 gen_helper_fldl_ST0(cpu_tmp1_i64);
5897 break;
5898 case 3:
5899 default:
5900 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5901 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5902 gen_helper_fildl_ST0(cpu_tmp2_i32);
5903 break;
5904 }
5905 break;
5906 case 1:
5907 /* XXX: the corresponding CPUID bit must be tested ! */
5908 switch(op >> 4) {
5909 case 1:
5910 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5911 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5912 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5913 break;
5914 case 2:
5915 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5916 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5917 (s->mem_index >> 2) - 1);
5918 break;
5919 case 3:
5920 default:
5921 gen_helper_fistt_ST0(cpu_tmp2_i32);
5922 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5923 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5924 break;
5925 }
5926 gen_helper_fpop();
5927 break;
5928 default:
5929 switch(op >> 4) {
5930 case 0:
5931 gen_helper_fsts_ST0(cpu_tmp2_i32);
5932 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5933 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5934 break;
5935 case 1:
5936 gen_helper_fistl_ST0(cpu_tmp2_i32);
5937 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5938 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5939 break;
5940 case 2:
5941 gen_helper_fstl_ST0(cpu_tmp1_i64);
5942 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5943 (s->mem_index >> 2) - 1);
5944 break;
5945 case 3:
5946 default:
5947 gen_helper_fist_ST0(cpu_tmp2_i32);
5948 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5949 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5950 break;
5951 }
5952 if ((op & 7) == 3)
5953 gen_helper_fpop();
5954 break;
5955 }
5956 break;
5957 case 0x0c: /* fldenv mem */
5958 if (s->cc_op != CC_OP_DYNAMIC)
5959 gen_op_set_cc_op(s->cc_op);
5960 gen_jmp_im(pc_start - s->cs_base);
5961 gen_helper_fldenv(
5962 cpu_A0, tcg_const_i32(s->dflag));
5963 break;
5964 case 0x0d: /* fldcw mem */
5965 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5966 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5967 gen_helper_fldcw(cpu_tmp2_i32);
5968 break;
5969 case 0x0e: /* fnstenv mem */
5970 if (s->cc_op != CC_OP_DYNAMIC)
5971 gen_op_set_cc_op(s->cc_op);
5972 gen_jmp_im(pc_start - s->cs_base);
5973 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5974 break;
5975 case 0x0f: /* fnstcw mem */
5976 gen_helper_fnstcw(cpu_tmp2_i32);
5977 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5978 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5979 break;
5980 case 0x1d: /* fldt mem */
5981 if (s->cc_op != CC_OP_DYNAMIC)
5982 gen_op_set_cc_op(s->cc_op);
5983 gen_jmp_im(pc_start - s->cs_base);
5984 gen_helper_fldt_ST0(cpu_A0);
5985 break;
5986 case 0x1f: /* fstpt mem */
5987 if (s->cc_op != CC_OP_DYNAMIC)
5988 gen_op_set_cc_op(s->cc_op);
5989 gen_jmp_im(pc_start - s->cs_base);
5990 gen_helper_fstt_ST0(cpu_A0);
5991 gen_helper_fpop();
5992 break;
5993 case 0x2c: /* frstor mem */
5994 if (s->cc_op != CC_OP_DYNAMIC)
5995 gen_op_set_cc_op(s->cc_op);
5996 gen_jmp_im(pc_start - s->cs_base);
5997 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5998 break;
5999 case 0x2e: /* fnsave mem */
6000 if (s->cc_op != CC_OP_DYNAMIC)
6001 gen_op_set_cc_op(s->cc_op);
6002 gen_jmp_im(pc_start - s->cs_base);
6003 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
6004 break;
6005 case 0x2f: /* fnstsw mem */
6006 gen_helper_fnstsw(cpu_tmp2_i32);
6007 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6008 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6009 break;
6010 case 0x3c: /* fbld */
6011 if (s->cc_op != CC_OP_DYNAMIC)
6012 gen_op_set_cc_op(s->cc_op);
6013 gen_jmp_im(pc_start - s->cs_base);
6014 gen_helper_fbld_ST0(cpu_A0);
6015 break;
6016 case 0x3e: /* fbstp */
6017 if (s->cc_op != CC_OP_DYNAMIC)
6018 gen_op_set_cc_op(s->cc_op);
6019 gen_jmp_im(pc_start - s->cs_base);
6020 gen_helper_fbst_ST0(cpu_A0);
6021 gen_helper_fpop();
6022 break;
6023 case 0x3d: /* fildll */
6024 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
6025 (s->mem_index >> 2) - 1);
6026 gen_helper_fildll_ST0(cpu_tmp1_i64);
6027 break;
6028 case 0x3f: /* fistpll */
6029 gen_helper_fistll_ST0(cpu_tmp1_i64);
6030 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
6031 (s->mem_index >> 2) - 1);
6032 gen_helper_fpop();
6033 break;
6034 default:
6035 goto illegal_op;
6036 }
6037 } else {
6038 /* register float ops */
6039 opreg = rm;
6040
6041 switch(op) {
6042 case 0x08: /* fld sti */
6043 gen_helper_fpush();
6044 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
6045 break;
6046 case 0x09: /* fxchg sti */
6047 case 0x29: /* fxchg4 sti, undocumented op */
6048 case 0x39: /* fxchg7 sti, undocumented op */
6049 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
6050 break;
6051 case 0x0a: /* grp d9/2 */
6052 switch(rm) {
6053 case 0: /* fnop */
6054 /* check exceptions (FreeBSD FPU probe) */
6055 if (s->cc_op != CC_OP_DYNAMIC)
6056 gen_op_set_cc_op(s->cc_op);
6057 gen_jmp_im(pc_start - s->cs_base);
6058 gen_helper_fwait();
6059 break;
6060 default:
6061 goto illegal_op;
6062 }
6063 break;
6064 case 0x0c: /* grp d9/4 */
6065 switch(rm) {
6066 case 0: /* fchs */
6067 gen_helper_fchs_ST0();
6068 break;
6069 case 1: /* fabs */
6070 gen_helper_fabs_ST0();
6071 break;
6072 case 4: /* ftst */
6073 gen_helper_fldz_FT0();
6074 gen_helper_fcom_ST0_FT0();
6075 break;
6076 case 5: /* fxam */
6077 gen_helper_fxam_ST0();
6078 break;
6079 default:
6080 goto illegal_op;
6081 }
6082 break;
6083 case 0x0d: /* grp d9/5 */
6084 {
6085 switch(rm) {
6086 case 0:
6087 gen_helper_fpush();
6088 gen_helper_fld1_ST0();
6089 break;
6090 case 1:
6091 gen_helper_fpush();
6092 gen_helper_fldl2t_ST0();
6093 break;
6094 case 2:
6095 gen_helper_fpush();
6096 gen_helper_fldl2e_ST0();
6097 break;
6098 case 3:
6099 gen_helper_fpush();
6100 gen_helper_fldpi_ST0();
6101 break;
6102 case 4:
6103 gen_helper_fpush();
6104 gen_helper_fldlg2_ST0();
6105 break;
6106 case 5:
6107 gen_helper_fpush();
6108 gen_helper_fldln2_ST0();
6109 break;
6110 case 6:
6111 gen_helper_fpush();
6112 gen_helper_fldz_ST0();
6113 break;
6114 default:
6115 goto illegal_op;
6116 }
6117 }
6118 break;
6119 case 0x0e: /* grp d9/6 */
6120 switch(rm) {
6121 case 0: /* f2xm1 */
6122 gen_helper_f2xm1();
6123 break;
6124 case 1: /* fyl2x */
6125 gen_helper_fyl2x();
6126 break;
6127 case 2: /* fptan */
6128 gen_helper_fptan();
6129 break;
6130 case 3: /* fpatan */
6131 gen_helper_fpatan();
6132 break;
6133 case 4: /* fxtract */
6134 gen_helper_fxtract();
6135 break;
6136 case 5: /* fprem1 */
6137 gen_helper_fprem1();
6138 break;
6139 case 6: /* fdecstp */
6140 gen_helper_fdecstp();
6141 break;
6142 default:
6143 case 7: /* fincstp */
6144 gen_helper_fincstp();
6145 break;
6146 }
6147 break;
6148 case 0x0f: /* grp d9/7 */
6149 switch(rm) {
6150 case 0: /* fprem */
6151 gen_helper_fprem();
6152 break;
6153 case 1: /* fyl2xp1 */
6154 gen_helper_fyl2xp1();
6155 break;
6156 case 2: /* fsqrt */
6157 gen_helper_fsqrt();
6158 break;
6159 case 3: /* fsincos */
6160 gen_helper_fsincos();
6161 break;
6162 case 5: /* fscale */
6163 gen_helper_fscale();
6164 break;
6165 case 4: /* frndint */
6166 gen_helper_frndint();
6167 break;
6168 case 6: /* fsin */
6169 gen_helper_fsin();
6170 break;
6171 default:
6172 case 7: /* fcos */
6173 gen_helper_fcos();
6174 break;
6175 }
6176 break;
6177 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6178 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6179 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6180 {
6181 int op1;
6182
6183 op1 = op & 7;
6184 if (op >= 0x20) {
6185 gen_helper_fp_arith_STN_ST0(op1, opreg);
6186 if (op >= 0x30)
6187 gen_helper_fpop();
6188 } else {
6189 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6190 gen_helper_fp_arith_ST0_FT0(op1);
6191 }
6192 }
6193 break;
6194 case 0x02: /* fcom */
6195 case 0x22: /* fcom2, undocumented op */
6196 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6197 gen_helper_fcom_ST0_FT0();
6198 break;
6199 case 0x03: /* fcomp */
6200 case 0x23: /* fcomp3, undocumented op */
6201 case 0x32: /* fcomp5, undocumented op */
6202 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6203 gen_helper_fcom_ST0_FT0();
6204 gen_helper_fpop();
6205 break;
6206 case 0x15: /* da/5 */
6207 switch(rm) {
6208 case 1: /* fucompp */
6209 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
6210 gen_helper_fucom_ST0_FT0();
6211 gen_helper_fpop();
6212 gen_helper_fpop();
6213 break;
6214 default:
6215 goto illegal_op;
6216 }
6217 break;
6218 case 0x1c:
6219 switch(rm) {
6220 case 0: /* feni (287 only, just do nop here) */
6221 break;
6222 case 1: /* fdisi (287 only, just do nop here) */
6223 break;
6224 case 2: /* fclex */
6225 gen_helper_fclex();
6226 break;
6227 case 3: /* fninit */
6228 gen_helper_fninit();
6229 break;
6230 case 4: /* fsetpm (287 only, just do nop here) */
6231 break;
6232 default:
6233 goto illegal_op;
6234 }
6235 break;
6236 case 0x1d: /* fucomi */
6237 if (s->cc_op != CC_OP_DYNAMIC)
6238 gen_op_set_cc_op(s->cc_op);
6239 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6240 gen_helper_fucomi_ST0_FT0();
6241 s->cc_op = CC_OP_EFLAGS;
6242 break;
6243 case 0x1e: /* fcomi */
6244 if (s->cc_op != CC_OP_DYNAMIC)
6245 gen_op_set_cc_op(s->cc_op);
6246 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6247 gen_helper_fcomi_ST0_FT0();
6248 s->cc_op = CC_OP_EFLAGS;
6249 break;
6250 case 0x28: /* ffree sti */
6251 gen_helper_ffree_STN(tcg_const_i32(opreg));
6252 break;
6253 case 0x2a: /* fst sti */
6254 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
6255 break;
6256 case 0x2b: /* fstp sti */
6257 case 0x0b: /* fstp1 sti, undocumented op */
6258 case 0x3a: /* fstp8 sti, undocumented op */
6259 case 0x3b: /* fstp9 sti, undocumented op */
6260 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
6261 gen_helper_fpop();
6262 break;
6263 case 0x2c: /* fucom st(i) */
6264 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6265 gen_helper_fucom_ST0_FT0();
6266 break;
6267 case 0x2d: /* fucomp st(i) */
6268 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6269 gen_helper_fucom_ST0_FT0();
6270 gen_helper_fpop();
6271 break;
6272 case 0x33: /* de/3 */
6273 switch(rm) {
6274 case 1: /* fcompp */
6275 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
6276 gen_helper_fcom_ST0_FT0();
6277 gen_helper_fpop();
6278 gen_helper_fpop();
6279 break;
6280 default:
6281 goto illegal_op;
6282 }
6283 break;
6284 case 0x38: /* ffreep sti, undocumented op */
6285 gen_helper_ffree_STN(tcg_const_i32(opreg));
6286 gen_helper_fpop();
6287 break;
6288 case 0x3c: /* df/4 */
6289 switch(rm) {
6290 case 0:
6291 gen_helper_fnstsw(cpu_tmp2_i32);
6292 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6293 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6294 break;
6295 default:
6296 goto illegal_op;
6297 }
6298 break;
6299 case 0x3d: /* fucomip */
6300 if (s->cc_op != CC_OP_DYNAMIC)
6301 gen_op_set_cc_op(s->cc_op);
6302 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6303 gen_helper_fucomi_ST0_FT0();
6304 gen_helper_fpop();
6305 s->cc_op = CC_OP_EFLAGS;
6306 break;
6307 case 0x3e: /* fcomip */
6308 if (s->cc_op != CC_OP_DYNAMIC)
6309 gen_op_set_cc_op(s->cc_op);
6310 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6311 gen_helper_fcomi_ST0_FT0();
6312 gen_helper_fpop();
6313 s->cc_op = CC_OP_EFLAGS;
6314 break;
6315 case 0x10 ... 0x13: /* fcmovxx */
6316 case 0x18 ... 0x1b:
6317 {
6318 int op1, l1;
6319 static const uint8_t fcmov_cc[8] = {
6320 (JCC_B << 1),
6321 (JCC_Z << 1),
6322 (JCC_BE << 1),
6323 (JCC_P << 1),
6324 };
6325 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6326 l1 = gen_new_label();
6327 gen_jcc1(s, s->cc_op, op1, l1);
6328 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
6329 gen_set_label(l1);
6330 }
6331 break;
6332 default:
6333 goto illegal_op;
6334 }
6335 }
6336 break;
6337 /************************/
6338 /* string ops */
6339
6340 case 0xa4: /* movsS */
6341 case 0xa5:
6342 if ((b & 1) == 0)
6343 ot = OT_BYTE;
6344 else
6345 ot = dflag + OT_WORD;
6346
6347 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6348 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6349 } else {
6350 gen_movs(s, ot);
6351 }
6352 break;
6353
6354 case 0xaa: /* stosS */
6355 case 0xab:
6356 if ((b & 1) == 0)
6357 ot = OT_BYTE;
6358 else
6359 ot = dflag + OT_WORD;
6360
6361 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6362 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6363 } else {
6364 gen_stos(s, ot);
6365 }
6366 break;
6367 case 0xac: /* lodsS */
6368 case 0xad:
6369 if ((b & 1) == 0)
6370 ot = OT_BYTE;
6371 else
6372 ot = dflag + OT_WORD;
6373 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6374 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6375 } else {
6376 gen_lods(s, ot);
6377 }
6378 break;
6379 case 0xae: /* scasS */
6380 case 0xaf:
6381 if ((b & 1) == 0)
6382 ot = OT_BYTE;
6383 else
6384 ot = dflag + OT_WORD;
6385 if (prefixes & PREFIX_REPNZ) {
6386 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6387 } else if (prefixes & PREFIX_REPZ) {
6388 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6389 } else {
6390 gen_scas(s, ot);
6391 s->cc_op = CC_OP_SUBB + ot;
6392 }
6393 break;
6394
6395 case 0xa6: /* cmpsS */
6396 case 0xa7:
6397 if ((b & 1) == 0)
6398 ot = OT_BYTE;
6399 else
6400 ot = dflag + OT_WORD;
6401 if (prefixes & PREFIX_REPNZ) {
6402 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6403 } else if (prefixes & PREFIX_REPZ) {
6404 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6405 } else {
6406 gen_cmps(s, ot);
6407 s->cc_op = CC_OP_SUBB + ot;
6408 }
6409 break;
6410 case 0x6c: /* insS */
6411 case 0x6d:
6412 if ((b & 1) == 0)
6413 ot = OT_BYTE;
6414 else
6415 ot = dflag ? OT_LONG : OT_WORD;
6416 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6417 gen_op_andl_T0_ffff();
6418 gen_check_io(s, ot, pc_start - s->cs_base,
6419 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6420 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6421 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6422 } else {
6423 gen_ins(s, ot);
6424 if (use_icount) {
6425 gen_jmp(s, s->pc - s->cs_base);
6426 }
6427 }
6428 break;
6429 case 0x6e: /* outsS */
6430 case 0x6f:
6431 if ((b & 1) == 0)
6432 ot = OT_BYTE;
6433 else
6434 ot = dflag ? OT_LONG : OT_WORD;
6435 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6436 gen_op_andl_T0_ffff();
6437 gen_check_io(s, ot, pc_start - s->cs_base,
6438 svm_is_rep(prefixes) | 4);
6439 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6440 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6441 } else {
6442 gen_outs(s, ot);
6443 if (use_icount) {
6444 gen_jmp(s, s->pc - s->cs_base);
6445 }
6446 }
6447 break;
6448
6449 /************************/
6450 /* port I/O */
6451
6452 case 0xe4:
6453 case 0xe5:
6454 if ((b & 1) == 0)
6455 ot = OT_BYTE;
6456 else
6457 ot = dflag ? OT_LONG : OT_WORD;
6458 val = ldub_code(s->pc++);
6459 gen_op_movl_T0_im(val);
6460 gen_check_io(s, ot, pc_start - s->cs_base,
6461 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6462 if (use_icount)
6463 gen_io_start();
6464 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6465 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6466 gen_op_mov_reg_T1(ot, R_EAX);
6467 if (use_icount) {
6468 gen_io_end();
6469 gen_jmp(s, s->pc - s->cs_base);
6470 }
6471 break;
6472 case 0xe6:
6473 case 0xe7:
6474 if ((b & 1) == 0)
6475 ot = OT_BYTE;
6476 else
6477 ot = dflag ? OT_LONG : OT_WORD;
6478 val = ldub_code(s->pc++);
6479 gen_op_movl_T0_im(val);
6480 gen_check_io(s, ot, pc_start - s->cs_base,
6481 svm_is_rep(prefixes));
6482 gen_op_mov_TN_reg(ot, 1, R_EAX);
6483
6484 if (use_icount)
6485 gen_io_start();
6486 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6487 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6488 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6489 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6490 if (use_icount) {
6491 gen_io_end();
6492 gen_jmp(s, s->pc - s->cs_base);
6493 }
6494 break;
6495 case 0xec:
6496 case 0xed:
6497 if ((b & 1) == 0)
6498 ot = OT_BYTE;
6499 else
6500 ot = dflag ? OT_LONG : OT_WORD;
6501 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6502 gen_op_andl_T0_ffff();
6503 gen_check_io(s, ot, pc_start - s->cs_base,
6504 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6505 if (use_icount)
6506 gen_io_start();
6507 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6508 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6509 gen_op_mov_reg_T1(ot, R_EAX);
6510 if (use_icount) {
6511 gen_io_end();
6512 gen_jmp(s, s->pc - s->cs_base);
6513 }
6514 break;
6515 case 0xee:
6516 case 0xef:
6517 if ((b & 1) == 0)
6518 ot = OT_BYTE;
6519 else
6520 ot = dflag ? OT_LONG : OT_WORD;
6521 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6522 gen_op_andl_T0_ffff();
6523 gen_check_io(s, ot, pc_start - s->cs_base,
6524 svm_is_rep(prefixes));
6525 gen_op_mov_TN_reg(ot, 1, R_EAX);
6526
6527 if (use_icount)
6528 gen_io_start();
6529 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6530 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6531 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6532 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6533 if (use_icount) {
6534 gen_io_end();
6535 gen_jmp(s, s->pc - s->cs_base);
6536 }
6537 break;
6538
6539 /************************/
6540 /* control */
6541 case 0xc2: /* ret im */
6542 val = ldsw_code(s->pc);
6543 s->pc += 2;
6544 gen_pop_T0(s);
6545 if (CODE64(s) && s->dflag)
6546 s->dflag = 2;
6547 gen_stack_update(s, val + (2 << s->dflag));
6548 if (s->dflag == 0)
6549 gen_op_andl_T0_ffff();
6550 gen_op_jmp_T0();
6551 gen_eob(s);
6552 break;
6553 case 0xc3: /* ret */
6554 gen_pop_T0(s);
6555 gen_pop_update(s);
6556 if (s->dflag == 0)
6557 gen_op_andl_T0_ffff();
6558 gen_op_jmp_T0();
6559 gen_eob(s);
6560 break;
6561 case 0xca: /* lret im */
6562 val = ldsw_code(s->pc);
6563 s->pc += 2;
6564 do_lret:
6565 if (s->pe && !s->vm86) {
6566 if (s->cc_op != CC_OP_DYNAMIC)
6567 gen_op_set_cc_op(s->cc_op);
6568 gen_jmp_im(pc_start - s->cs_base);
6569 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6570 tcg_const_i32(val));
6571 } else {
6572 gen_stack_A0(s);
6573 /* pop offset */
6574 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6575 if (s->dflag == 0)
6576 gen_op_andl_T0_ffff();
6577 /* NOTE: keeping EIP updated is not a problem in case of
6578 exception */
6579 gen_op_jmp_T0();
6580 /* pop selector */
6581 gen_op_addl_A0_im(2 << s->dflag);
6582 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6583 gen_op_movl_seg_T0_vm(R_CS);
6584 /* add stack offset */
6585 gen_stack_update(s, val + (4 << s->dflag));
6586 }
6587 gen_eob(s);
6588 break;
6589 case 0xcb: /* lret */
6590 val = 0;
6591 goto do_lret;
6592 case 0xcf: /* iret */
6593 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6594 if (!s->pe) {
6595 /* real mode */
6596 gen_helper_iret_real(tcg_const_i32(s->dflag));
6597 s->cc_op = CC_OP_EFLAGS;
6598 } else if (s->vm86) {
6599#ifdef VBOX
6600 if (s->iopl != 3 && (!s->vme || s->dflag)) {
6601#else
6602 if (s->iopl != 3) {
6603#endif
6604 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6605 } else {
6606 gen_helper_iret_real(tcg_const_i32(s->dflag));
6607 s->cc_op = CC_OP_EFLAGS;
6608 }
6609 } else {
6610 if (s->cc_op != CC_OP_DYNAMIC)
6611 gen_op_set_cc_op(s->cc_op);
6612 gen_jmp_im(pc_start - s->cs_base);
6613 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6614 tcg_const_i32(s->pc - s->cs_base));
6615 s->cc_op = CC_OP_EFLAGS;
6616 }
6617 gen_eob(s);
6618 break;
6619 case 0xe8: /* call im */
6620 {
6621 if (dflag)
6622 tval = (int32_t)insn_get(s, OT_LONG);
6623 else
6624 tval = (int16_t)insn_get(s, OT_WORD);
6625 next_eip = s->pc - s->cs_base;
6626 tval += next_eip;
6627 if (s->dflag == 0)
6628 tval &= 0xffff;
6629 else if(!CODE64(s))
6630 tval &= 0xffffffff;
6631 gen_movtl_T0_im(next_eip);
6632 gen_push_T0(s);
6633 gen_jmp(s, tval);
6634 }
6635 break;
6636 case 0x9a: /* lcall im */
6637 {
6638 unsigned int selector, offset;
6639
6640 if (CODE64(s))
6641 goto illegal_op;
6642 ot = dflag ? OT_LONG : OT_WORD;
6643 offset = insn_get(s, ot);
6644 selector = insn_get(s, OT_WORD);
6645
6646 gen_op_movl_T0_im(selector);
6647 gen_op_movl_T1_imu(offset);
6648 }
6649 goto do_lcall;
6650 case 0xe9: /* jmp im */
6651 if (dflag)
6652 tval = (int32_t)insn_get(s, OT_LONG);
6653 else
6654 tval = (int16_t)insn_get(s, OT_WORD);
6655 tval += s->pc - s->cs_base;
6656 if (s->dflag == 0)
6657 tval &= 0xffff;
6658 else if(!CODE64(s))
6659 tval &= 0xffffffff;
6660 gen_jmp(s, tval);
6661 break;
6662 case 0xea: /* ljmp im */
6663 {
6664 unsigned int selector, offset;
6665
6666 if (CODE64(s))
6667 goto illegal_op;
6668 ot = dflag ? OT_LONG : OT_WORD;
6669 offset = insn_get(s, ot);
6670 selector = insn_get(s, OT_WORD);
6671
6672 gen_op_movl_T0_im(selector);
6673 gen_op_movl_T1_imu(offset);
6674 }
6675 goto do_ljmp;
6676 case 0xeb: /* jmp Jb */
6677 tval = (int8_t)insn_get(s, OT_BYTE);
6678 tval += s->pc - s->cs_base;
6679 if (s->dflag == 0)
6680 tval &= 0xffff;
6681 gen_jmp(s, tval);
6682 break;
6683 case 0x70 ... 0x7f: /* jcc Jb */
6684 tval = (int8_t)insn_get(s, OT_BYTE);
6685 goto do_jcc;
6686 case 0x180 ... 0x18f: /* jcc Jv */
6687 if (dflag) {
6688 tval = (int32_t)insn_get(s, OT_LONG);
6689 } else {
6690 tval = (int16_t)insn_get(s, OT_WORD);
6691 }
6692 do_jcc:
6693 next_eip = s->pc - s->cs_base;
6694 tval += next_eip;
6695 if (s->dflag == 0)
6696 tval &= 0xffff;
6697 gen_jcc(s, b, tval, next_eip);
6698 break;
6699
6700 case 0x190 ... 0x19f: /* setcc Gv */
6701 modrm = ldub_code(s->pc++);
6702 gen_setcc(s, b);
6703 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6704 break;
6705 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6706 {
6707 int l1;
6708 TCGv t0;
6709
6710 ot = dflag + OT_WORD;
6711 modrm = ldub_code(s->pc++);
6712 reg = ((modrm >> 3) & 7) | rex_r;
6713 mod = (modrm >> 6) & 3;
6714 t0 = tcg_temp_local_new();
6715 if (mod != 3) {
6716 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6717 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6718 } else {
6719 rm = (modrm & 7) | REX_B(s);
6720 gen_op_mov_v_reg(ot, t0, rm);
6721 }
6722#ifdef TARGET_X86_64
6723 if (ot == OT_LONG) {
6724 /* XXX: specific Intel behaviour ? */
6725 l1 = gen_new_label();
6726 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6727 tcg_gen_mov_tl(cpu_regs[reg], t0);
6728 gen_set_label(l1);
6729 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
6730 } else
6731#endif
6732 {
6733 l1 = gen_new_label();
6734 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6735 gen_op_mov_reg_v(ot, reg, t0);
6736 gen_set_label(l1);
6737 }
6738 tcg_temp_free(t0);
6739 }
6740 break;
6741
6742 /************************/
6743 /* flags */
6744 case 0x9c: /* pushf */
6745 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6746#ifdef VBOX
6747 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) {
6748#else
6749 if (s->vm86 && s->iopl != 3) {
6750#endif
6751 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6752 } else {
6753 if (s->cc_op != CC_OP_DYNAMIC)
6754 gen_op_set_cc_op(s->cc_op);
6755#ifdef VBOX
6756 if (s->vm86 && s->vme && s->iopl != 3)
6757 gen_helper_read_eflags_vme(cpu_T[0]);
6758 else
6759#endif
6760 gen_helper_read_eflags(cpu_T[0]);
6761 gen_push_T0(s);
6762 }
6763 break;
6764 case 0x9d: /* popf */
6765 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6766#ifdef VBOX
6767 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) {
6768#else
6769 if (s->vm86 && s->iopl != 3) {
6770#endif
6771 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6772 } else {
6773 gen_pop_T0(s);
6774 if (s->cpl == 0) {
6775 if (s->dflag) {
6776 gen_helper_write_eflags(cpu_T[0],
6777 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6778 } else {
6779 gen_helper_write_eflags(cpu_T[0],
6780 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6781 }
6782 } else {
6783 if (s->cpl <= s->iopl) {
6784 if (s->dflag) {
6785 gen_helper_write_eflags(cpu_T[0],
6786 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6787 } else {
6788 gen_helper_write_eflags(cpu_T[0],
6789 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6790 }
6791 } else {
6792 if (s->dflag) {
6793 gen_helper_write_eflags(cpu_T[0],
6794 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6795 } else {
6796#ifdef VBOX
6797 if (s->vm86 && s->vme)
6798 gen_helper_write_eflags_vme(cpu_T[0]);
6799 else
6800#endif
6801 gen_helper_write_eflags(cpu_T[0],
6802 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6803 }
6804 }
6805 }
6806 gen_pop_update(s);
6807 s->cc_op = CC_OP_EFLAGS;
6808 /* abort translation because TF flag may change */
6809 gen_jmp_im(s->pc - s->cs_base);
6810 gen_eob(s);
6811 }
6812 break;
6813 case 0x9e: /* sahf */
6814 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6815 goto illegal_op;
6816 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6817 if (s->cc_op != CC_OP_DYNAMIC)
6818 gen_op_set_cc_op(s->cc_op);
6819 gen_compute_eflags(cpu_cc_src);
6820 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6821 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6822 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6823 s->cc_op = CC_OP_EFLAGS;
6824 break;
6825 case 0x9f: /* lahf */
6826 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6827 goto illegal_op;
6828 if (s->cc_op != CC_OP_DYNAMIC)
6829 gen_op_set_cc_op(s->cc_op);
6830 gen_compute_eflags(cpu_T[0]);
6831 /* Note: gen_compute_eflags() only gives the condition codes */
6832 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6833 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6834 break;
6835 case 0xf5: /* cmc */
6836 if (s->cc_op != CC_OP_DYNAMIC)
6837 gen_op_set_cc_op(s->cc_op);
6838 gen_compute_eflags(cpu_cc_src);
6839 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6840 s->cc_op = CC_OP_EFLAGS;
6841 break;
6842 case 0xf8: /* clc */
6843 if (s->cc_op != CC_OP_DYNAMIC)
6844 gen_op_set_cc_op(s->cc_op);
6845 gen_compute_eflags(cpu_cc_src);
6846 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6847 s->cc_op = CC_OP_EFLAGS;
6848 break;
6849 case 0xf9: /* stc */
6850 if (s->cc_op != CC_OP_DYNAMIC)
6851 gen_op_set_cc_op(s->cc_op);
6852 gen_compute_eflags(cpu_cc_src);
6853 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6854 s->cc_op = CC_OP_EFLAGS;
6855 break;
6856 case 0xfc: /* cld */
6857 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6858 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6859 break;
6860 case 0xfd: /* std */
6861 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6862 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6863 break;
6864
6865 /************************/
6866 /* bit operations */
6867 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6868 ot = dflag + OT_WORD;
6869 modrm = ldub_code(s->pc++);
6870 op = (modrm >> 3) & 7;
6871 mod = (modrm >> 6) & 3;
6872 rm = (modrm & 7) | REX_B(s);
6873 if (mod != 3) {
6874 s->rip_offset = 1;
6875 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6876 gen_op_ld_T0_A0(ot + s->mem_index);
6877 } else {
6878 gen_op_mov_TN_reg(ot, 0, rm);
6879 }
6880 /* load shift */
6881 val = ldub_code(s->pc++);
6882 gen_op_movl_T1_im(val);
6883 if (op < 4)
6884 goto illegal_op;
6885 op -= 4;
6886 goto bt_op;
6887 case 0x1a3: /* bt Gv, Ev */
6888 op = 0;
6889 goto do_btx;
6890 case 0x1ab: /* bts */
6891 op = 1;
6892 goto do_btx;
6893 case 0x1b3: /* btr */
6894 op = 2;
6895 goto do_btx;
6896 case 0x1bb: /* btc */
6897 op = 3;
6898 do_btx:
6899 ot = dflag + OT_WORD;
6900 modrm = ldub_code(s->pc++);
6901 reg = ((modrm >> 3) & 7) | rex_r;
6902 mod = (modrm >> 6) & 3;
6903 rm = (modrm & 7) | REX_B(s);
6904 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6905 if (mod != 3) {
6906 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6907 /* specific case: we need to add a displacement */
6908 gen_exts(ot, cpu_T[1]);
6909 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6910 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6911 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6912 gen_op_ld_T0_A0(ot + s->mem_index);
6913 } else {
6914 gen_op_mov_TN_reg(ot, 0, rm);
6915 }
6916 bt_op:
6917 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6918 switch(op) {
6919 case 0:
6920 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6921 tcg_gen_movi_tl(cpu_cc_dst, 0);
6922 break;
6923 case 1:
6924 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6925 tcg_gen_movi_tl(cpu_tmp0, 1);
6926 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6927 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6928 break;
6929 case 2:
6930 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6931 tcg_gen_movi_tl(cpu_tmp0, 1);
6932 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6933 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6934 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6935 break;
6936 default:
6937 case 3:
6938 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6939 tcg_gen_movi_tl(cpu_tmp0, 1);
6940 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6941 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6942 break;
6943 }
6944 s->cc_op = CC_OP_SARB + ot;
6945 if (op != 0) {
6946 if (mod != 3)
6947 gen_op_st_T0_A0(ot + s->mem_index);
6948 else
6949 gen_op_mov_reg_T0(ot, rm);
6950 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6951 tcg_gen_movi_tl(cpu_cc_dst, 0);
6952 }
6953 break;
6954 case 0x1bc: /* bsf */
6955 case 0x1bd: /* bsr */
6956 {
6957 int label1;
6958 TCGv t0;
6959
6960 ot = dflag + OT_WORD;
6961 modrm = ldub_code(s->pc++);
6962 reg = ((modrm >> 3) & 7) | rex_r;
6963 gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
6964 gen_extu(ot, cpu_T[0]);
6965 t0 = tcg_temp_local_new();
6966 tcg_gen_mov_tl(t0, cpu_T[0]);
6967 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6968 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6969 switch(ot) {
6970 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6971 tcg_const_i32(16)); break;
6972 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6973 tcg_const_i32(32)); break;
6974 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6975 tcg_const_i32(64)); break;
6976 }
6977 gen_op_mov_reg_T0(ot, reg);
6978 } else {
6979 label1 = gen_new_label();
6980 tcg_gen_movi_tl(cpu_cc_dst, 0);
6981 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6982 if (b & 1) {
6983 gen_helper_bsr(cpu_T[0], t0);
6984 } else {
6985 gen_helper_bsf(cpu_T[0], t0);
6986 }
6987 gen_op_mov_reg_T0(ot, reg);
6988 tcg_gen_movi_tl(cpu_cc_dst, 1);
6989 gen_set_label(label1);
6990 tcg_gen_discard_tl(cpu_cc_src);
6991 s->cc_op = CC_OP_LOGICB + ot;
6992 }
6993 tcg_temp_free(t0);
6994 }
6995 break;
6996 /************************/
6997 /* bcd */
6998 case 0x27: /* daa */
6999 if (CODE64(s))
7000 goto illegal_op;
7001 if (s->cc_op != CC_OP_DYNAMIC)
7002 gen_op_set_cc_op(s->cc_op);
7003 gen_helper_daa();
7004 s->cc_op = CC_OP_EFLAGS;
7005 break;
7006 case 0x2f: /* das */
7007 if (CODE64(s))
7008 goto illegal_op;
7009 if (s->cc_op != CC_OP_DYNAMIC)
7010 gen_op_set_cc_op(s->cc_op);
7011 gen_helper_das();
7012 s->cc_op = CC_OP_EFLAGS;
7013 break;
7014 case 0x37: /* aaa */
7015 if (CODE64(s))
7016 goto illegal_op;
7017 if (s->cc_op != CC_OP_DYNAMIC)
7018 gen_op_set_cc_op(s->cc_op);
7019 gen_helper_aaa();
7020 s->cc_op = CC_OP_EFLAGS;
7021 break;
7022 case 0x3f: /* aas */
7023 if (CODE64(s))
7024 goto illegal_op;
7025 if (s->cc_op != CC_OP_DYNAMIC)
7026 gen_op_set_cc_op(s->cc_op);
7027 gen_helper_aas();
7028 s->cc_op = CC_OP_EFLAGS;
7029 break;
7030 case 0xd4: /* aam */
7031 if (CODE64(s))
7032 goto illegal_op;
7033 val = ldub_code(s->pc++);
7034 if (val == 0) {
7035 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7036 } else {
7037 gen_helper_aam(tcg_const_i32(val));
7038 s->cc_op = CC_OP_LOGICB;
7039 }
7040 break;
7041 case 0xd5: /* aad */
7042 if (CODE64(s))
7043 goto illegal_op;
7044 val = ldub_code(s->pc++);
7045 gen_helper_aad(tcg_const_i32(val));
7046 s->cc_op = CC_OP_LOGICB;
7047 break;
7048 /************************/
7049 /* misc */
7050 case 0x90: /* nop */
7051 /* XXX: correct lock test for all insn */
7052 if (prefixes & PREFIX_LOCK) {
7053 goto illegal_op;
7054 }
7055 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7056 if (REX_B(s)) {
7057 goto do_xchg_reg_eax;
7058 }
7059 if (prefixes & PREFIX_REPZ) {
7060 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
7061 }
7062 break;
7063 case 0x9b: /* fwait */
7064 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7065 (HF_MP_MASK | HF_TS_MASK)) {
7066 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7067 } else {
7068 if (s->cc_op != CC_OP_DYNAMIC)
7069 gen_op_set_cc_op(s->cc_op);
7070 gen_jmp_im(pc_start - s->cs_base);
7071 gen_helper_fwait();
7072 }
7073 break;
7074 case 0xcc: /* int3 */
7075#ifdef VBOX
7076 if (s->vm86 && s->iopl != 3 && !s->vme) {
7077 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7078 } else
7079#endif
7080 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7081 break;
7082 case 0xcd: /* int N */
7083 val = ldub_code(s->pc++);
7084#ifdef VBOX
7085 if (s->vm86 && s->iopl != 3 && !s->vme) {
7086#else
7087 if (s->vm86 && s->iopl != 3) {
7088#endif
7089 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7090 } else {
7091 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7092 }
7093 break;
7094 case 0xce: /* into */
7095 if (CODE64(s))
7096 goto illegal_op;
7097 if (s->cc_op != CC_OP_DYNAMIC)
7098 gen_op_set_cc_op(s->cc_op);
7099 gen_jmp_im(pc_start - s->cs_base);
7100 gen_helper_into(tcg_const_i32(s->pc - pc_start));
7101 break;
7102#ifdef WANT_ICEBP
7103 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7104 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
7105#if 1
7106 gen_debug(s, pc_start - s->cs_base);
7107#else
7108 /* start debug */
7109 tb_flush(cpu_single_env);
7110 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
7111#endif
7112 break;
7113#endif
7114 case 0xfa: /* cli */
7115 if (!s->vm86) {
7116 if (s->cpl <= s->iopl) {
7117 gen_helper_cli();
7118 } else {
7119 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7120 }
7121 } else {
7122 if (s->iopl == 3) {
7123 gen_helper_cli();
7124#ifdef VBOX
7125 } else if (s->iopl != 3 && s->vme) {
7126 gen_helper_cli_vme();
7127#endif
7128 } else {
7129 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7130 }
7131 }
7132 break;
7133 case 0xfb: /* sti */
7134 if (!s->vm86) {
7135 if (s->cpl <= s->iopl) {
7136 gen_sti:
7137 gen_helper_sti();
7138 /* interruptions are enabled only the first insn after sti */
7139 /* If several instructions disable interrupts, only the
7140 _first_ does it */
7141 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
7142 gen_helper_set_inhibit_irq();
7143 /* give a chance to handle pending irqs */
7144 gen_jmp_im(s->pc - s->cs_base);
7145 gen_eob(s);
7146 } else {
7147 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7148 }
7149 } else {
7150 if (s->iopl == 3) {
7151 goto gen_sti;
7152#ifdef VBOX
7153 } else if (s->iopl != 3 && s->vme) {
7154 gen_helper_sti_vme();
7155 /* give a chance to handle pending irqs */
7156 gen_jmp_im(s->pc - s->cs_base);
7157 gen_eob(s);
7158#endif
7159 } else {
7160 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7161 }
7162 }
7163 break;
7164 case 0x62: /* bound */
7165 if (CODE64(s))
7166 goto illegal_op;
7167 ot = dflag ? OT_LONG : OT_WORD;
7168 modrm = ldub_code(s->pc++);
7169 reg = (modrm >> 3) & 7;
7170 mod = (modrm >> 6) & 3;
7171 if (mod == 3)
7172 goto illegal_op;
7173 gen_op_mov_TN_reg(ot, 0, reg);
7174 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7175 gen_jmp_im(pc_start - s->cs_base);
7176 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7177 if (ot == OT_WORD)
7178 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
7179 else
7180 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
7181 break;
7182 case 0x1c8 ... 0x1cf: /* bswap reg */
7183 reg = (b & 7) | REX_B(s);
7184#ifdef TARGET_X86_64
7185 if (dflag == 2) {
7186 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
7187 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
7188 gen_op_mov_reg_T0(OT_QUAD, reg);
7189 } else
7190#endif
7191 {
7192 gen_op_mov_TN_reg(OT_LONG, 0, reg);
7193 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7194 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
7195 gen_op_mov_reg_T0(OT_LONG, reg);
7196 }
7197 break;
7198 case 0xd6: /* salc */
7199 if (CODE64(s))
7200 goto illegal_op;
7201 if (s->cc_op != CC_OP_DYNAMIC)
7202 gen_op_set_cc_op(s->cc_op);
7203 gen_compute_eflags_c(cpu_T[0]);
7204 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7205 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
7206 break;
7207 case 0xe0: /* loopnz */
7208 case 0xe1: /* loopz */
7209 case 0xe2: /* loop */
7210 case 0xe3: /* jecxz */
7211 {
7212 int l1, l2, l3;
7213
7214 tval = (int8_t)insn_get(s, OT_BYTE);
7215 next_eip = s->pc - s->cs_base;
7216 tval += next_eip;
7217 if (s->dflag == 0)
7218 tval &= 0xffff;
7219
7220 l1 = gen_new_label();
7221 l2 = gen_new_label();
7222 l3 = gen_new_label();
7223 b &= 3;
7224 switch(b) {
7225 case 0: /* loopnz */
7226 case 1: /* loopz */
7227 if (s->cc_op != CC_OP_DYNAMIC)
7228 gen_op_set_cc_op(s->cc_op);
7229 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7230 gen_op_jz_ecx(s->aflag, l3);
7231 gen_compute_eflags(cpu_tmp0);
7232 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
7233 if (b == 0) {
7234 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
7235 } else {
7236 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
7237 }
7238 break;
7239 case 2: /* loop */
7240 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7241 gen_op_jnz_ecx(s->aflag, l1);
7242 break;
7243 default:
7244 case 3: /* jcxz */
7245 gen_op_jz_ecx(s->aflag, l1);
7246 break;
7247 }
7248
7249 gen_set_label(l3);
7250 gen_jmp_im(next_eip);
7251 tcg_gen_br(l2);
7252
7253 gen_set_label(l1);
7254 gen_jmp_im(tval);
7255 gen_set_label(l2);
7256 gen_eob(s);
7257 }
7258 break;
7259 case 0x130: /* wrmsr */
7260 case 0x132: /* rdmsr */
7261 if (s->cpl != 0) {
7262 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7263 } else {
7264 if (s->cc_op != CC_OP_DYNAMIC)
7265 gen_op_set_cc_op(s->cc_op);
7266 gen_jmp_im(pc_start - s->cs_base);
7267 if (b & 2) {
7268 gen_helper_rdmsr();
7269 } else {
7270 gen_helper_wrmsr();
7271 }
7272 }
7273 break;
7274 case 0x131: /* rdtsc */
7275 if (s->cc_op != CC_OP_DYNAMIC)
7276 gen_op_set_cc_op(s->cc_op);
7277 gen_jmp_im(pc_start - s->cs_base);
7278 if (use_icount)
7279 gen_io_start();
7280 gen_helper_rdtsc();
7281 if (use_icount) {
7282 gen_io_end();
7283 gen_jmp(s, s->pc - s->cs_base);
7284 }
7285 break;
7286 case 0x133: /* rdpmc */
7287 if (s->cc_op != CC_OP_DYNAMIC)
7288 gen_op_set_cc_op(s->cc_op);
7289 gen_jmp_im(pc_start - s->cs_base);
7290 gen_helper_rdpmc();
7291 break;
7292 case 0x134: /* sysenter */
7293#ifndef VBOX
7294 /* For Intel SYSENTER is valid on 64-bit */
7295 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7296#else
7297 if ( !(cpu_single_env->cpuid_features & CPUID_SEP)
7298 || ( IS_LONG_MODE(s)
7299 && CPUMGetGuestCpuVendor(cpu_single_env->pVM) != CPUMCPUVENDOR_INTEL))
7300#endif
7301 goto illegal_op;
7302 if (!s->pe) {
7303 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7304 } else {
7305 gen_update_cc_op(s);
7306 gen_jmp_im(pc_start - s->cs_base);
7307 gen_helper_sysenter();
7308 gen_eob(s);
7309 }
7310 break;
7311 case 0x135: /* sysexit */
7312#ifndef VBOX
7313 /* For Intel SYSEXIT is valid on 64-bit */
7314 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7315#else
7316 if ( !(cpu_single_env->cpuid_features & CPUID_SEP)
7317 || ( IS_LONG_MODE(s)
7318 && CPUMGetGuestCpuVendor(cpu_single_env->pVM) != CPUMCPUVENDOR_INTEL))
7319#endif
7320 goto illegal_op;
7321 if (!s->pe) {
7322 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7323 } else {
7324 gen_update_cc_op(s);
7325 gen_jmp_im(pc_start - s->cs_base);
7326 gen_helper_sysexit(tcg_const_i32(dflag));
7327 gen_eob(s);
7328 }
7329 break;
7330#ifdef TARGET_X86_64
7331 case 0x105: /* syscall */
7332 /* XXX: is it usable in real mode ? */
7333 gen_update_cc_op(s);
7334 gen_jmp_im(pc_start - s->cs_base);
7335 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
7336 gen_eob(s);
7337 break;
7338 case 0x107: /* sysret */
7339 if (!s->pe) {
7340 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7341 } else {
7342 gen_update_cc_op(s);
7343 gen_jmp_im(pc_start - s->cs_base);
7344 gen_helper_sysret(tcg_const_i32(s->dflag));
7345 /* condition codes are modified only in long mode */
7346 if (s->lma)
7347 s->cc_op = CC_OP_EFLAGS;
7348 gen_eob(s);
7349 }
7350 break;
7351#endif
7352 case 0x1a2: /* cpuid */
7353 if (s->cc_op != CC_OP_DYNAMIC)
7354 gen_op_set_cc_op(s->cc_op);
7355 gen_jmp_im(pc_start - s->cs_base);
7356 gen_helper_cpuid();
7357 break;
7358 case 0xf4: /* hlt */
7359 if (s->cpl != 0) {
7360 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7361 } else {
7362 if (s->cc_op != CC_OP_DYNAMIC)
7363 gen_op_set_cc_op(s->cc_op);
7364 gen_jmp_im(pc_start - s->cs_base);
7365 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
7366 s->is_jmp = DISAS_TB_JUMP;
7367 }
7368 break;
7369 case 0x100:
7370 modrm = ldub_code(s->pc++);
7371 mod = (modrm >> 6) & 3;
7372 op = (modrm >> 3) & 7;
7373 switch(op) {
7374 case 0: /* sldt */
7375 if (!s->pe || s->vm86)
7376 goto illegal_op;
7377 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7378 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7379 ot = OT_WORD;
7380 if (mod == 3)
7381 ot += s->dflag;
7382 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7383 break;
7384 case 2: /* lldt */
7385 if (!s->pe || s->vm86)
7386 goto illegal_op;
7387 if (s->cpl != 0) {
7388 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7389 } else {
7390 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7391 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7392 gen_jmp_im(pc_start - s->cs_base);
7393 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7394 gen_helper_lldt(cpu_tmp2_i32);
7395 }
7396 break;
7397 case 1: /* str */
7398 if (!s->pe || s->vm86)
7399 goto illegal_op;
7400 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7401 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7402 ot = OT_WORD;
7403 if (mod == 3)
7404 ot += s->dflag;
7405 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7406 break;
7407 case 3: /* ltr */
7408 if (!s->pe || s->vm86)
7409 goto illegal_op;
7410 if (s->cpl != 0) {
7411 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7412 } else {
7413 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7414 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7415 gen_jmp_im(pc_start - s->cs_base);
7416 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7417 gen_helper_ltr(cpu_tmp2_i32);
7418 }
7419 break;
7420 case 4: /* verr */
7421 case 5: /* verw */
7422 if (!s->pe || s->vm86)
7423 goto illegal_op;
7424 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7425 if (s->cc_op != CC_OP_DYNAMIC)
7426 gen_op_set_cc_op(s->cc_op);
7427 if (op == 4)
7428 gen_helper_verr(cpu_T[0]);
7429 else
7430 gen_helper_verw(cpu_T[0]);
7431 s->cc_op = CC_OP_EFLAGS;
7432 break;
7433 default:
7434 goto illegal_op;
7435 }
7436 break;
7437 case 0x101:
7438 modrm = ldub_code(s->pc++);
7439 mod = (modrm >> 6) & 3;
7440 op = (modrm >> 3) & 7;
7441 rm = modrm & 7;
7442 switch(op) {
7443 case 0: /* sgdt */
7444 if (mod == 3)
7445 goto illegal_op;
7446 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7447 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7448 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7449 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7450 gen_add_A0_im(s, 2);
7451 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7452 if (!s->dflag)
7453 gen_op_andl_T0_im(0xffffff);
7454 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7455 break;
7456 case 1:
7457 if (mod == 3) {
7458 switch (rm) {
7459 case 0: /* monitor */
7460 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7461 s->cpl != 0)
7462 goto illegal_op;
7463 if (s->cc_op != CC_OP_DYNAMIC)
7464 gen_op_set_cc_op(s->cc_op);
7465 gen_jmp_im(pc_start - s->cs_base);
7466#ifdef TARGET_X86_64
7467 if (s->aflag == 2) {
7468 gen_op_movq_A0_reg(R_EAX);
7469 } else
7470#endif
7471 {
7472 gen_op_movl_A0_reg(R_EAX);
7473 if (s->aflag == 0)
7474 gen_op_andl_A0_ffff();
7475 }
7476 gen_add_A0_ds_seg(s);
7477 gen_helper_monitor(cpu_A0);
7478 break;
7479 case 1: /* mwait */
7480 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7481 s->cpl != 0)
7482 goto illegal_op;
7483 gen_update_cc_op(s);
7484 gen_jmp_im(pc_start - s->cs_base);
7485 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
7486 gen_eob(s);
7487 break;
7488 default:
7489 goto illegal_op;
7490 }
7491 } else { /* sidt */
7492 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7493 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7494 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7495 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7496 gen_add_A0_im(s, 2);
7497 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7498 if (!s->dflag)
7499 gen_op_andl_T0_im(0xffffff);
7500 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7501 }
7502 break;
7503 case 2: /* lgdt */
7504 case 3: /* lidt */
7505 if (mod == 3) {
7506 if (s->cc_op != CC_OP_DYNAMIC)
7507 gen_op_set_cc_op(s->cc_op);
7508 gen_jmp_im(pc_start - s->cs_base);
7509 switch(rm) {
7510 case 0: /* VMRUN */
7511 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7512 goto illegal_op;
7513 if (s->cpl != 0) {
7514 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7515 break;
7516 } else {
7517 gen_helper_vmrun(tcg_const_i32(s->aflag),
7518 tcg_const_i32(s->pc - pc_start));
7519 tcg_gen_exit_tb(0);
7520 s->is_jmp = DISAS_TB_JUMP;
7521 }
7522 break;
7523 case 1: /* VMMCALL */
7524 if (!(s->flags & HF_SVME_MASK))
7525 goto illegal_op;
7526 gen_helper_vmmcall();
7527 break;
7528 case 2: /* VMLOAD */
7529 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7530 goto illegal_op;
7531 if (s->cpl != 0) {
7532 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7533 break;
7534 } else {
7535 gen_helper_vmload(tcg_const_i32(s->aflag));
7536 }
7537 break;
7538 case 3: /* VMSAVE */
7539 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7540 goto illegal_op;
7541 if (s->cpl != 0) {
7542 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7543 break;
7544 } else {
7545 gen_helper_vmsave(tcg_const_i32(s->aflag));
7546 }
7547 break;
7548 case 4: /* STGI */
7549 if ((!(s->flags & HF_SVME_MASK) &&
7550 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7551 !s->pe)
7552 goto illegal_op;
7553 if (s->cpl != 0) {
7554 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7555 break;
7556 } else {
7557 gen_helper_stgi();
7558 }
7559 break;
7560 case 5: /* CLGI */
7561 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7562 goto illegal_op;
7563 if (s->cpl != 0) {
7564 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7565 break;
7566 } else {
7567 gen_helper_clgi();
7568 }
7569 break;
7570 case 6: /* SKINIT */
7571 if ((!(s->flags & HF_SVME_MASK) &&
7572 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7573 !s->pe)
7574 goto illegal_op;
7575 gen_helper_skinit();
7576 break;
7577 case 7: /* INVLPGA */
7578 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7579 goto illegal_op;
7580 if (s->cpl != 0) {
7581 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7582 break;
7583 } else {
7584 gen_helper_invlpga(tcg_const_i32(s->aflag));
7585 }
7586 break;
7587 default:
7588 goto illegal_op;
7589 }
7590 } else if (s->cpl != 0) {
7591 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7592 } else {
7593 gen_svm_check_intercept(s, pc_start,
7594 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7595 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7596 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7597 gen_add_A0_im(s, 2);
7598 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7599 if (!s->dflag)
7600 gen_op_andl_T0_im(0xffffff);
7601 if (op == 2) {
7602 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7603 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7604 } else {
7605 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7606 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7607 }
7608 }
7609 break;
7610 case 4: /* smsw */
7611 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7612#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7613 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7614#else
7615 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7616#endif
7617 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7618 break;
7619 case 6: /* lmsw */
7620 if (s->cpl != 0) {
7621 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7622 } else {
7623 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7624 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7625 gen_helper_lmsw(cpu_T[0]);
7626 gen_jmp_im(s->pc - s->cs_base);
7627 gen_eob(s);
7628 }
7629 break;
7630 case 7:
7631 if (mod != 3) { /* invlpg */
7632 if (s->cpl != 0) {
7633 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7634 } else {
7635 if (s->cc_op != CC_OP_DYNAMIC)
7636 gen_op_set_cc_op(s->cc_op);
7637 gen_jmp_im(pc_start - s->cs_base);
7638 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7639 gen_helper_invlpg(cpu_A0);
7640 gen_jmp_im(s->pc - s->cs_base);
7641 gen_eob(s);
7642 }
7643 } else {
7644 switch (rm) {
7645 case 0: /* swapgs */
7646#ifdef TARGET_X86_64
7647 if (CODE64(s)) {
7648 if (s->cpl != 0) {
7649 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7650 } else {
7651 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7652 offsetof(CPUX86State,segs[R_GS].base));
7653 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7654 offsetof(CPUX86State,kernelgsbase));
7655 tcg_gen_st_tl(cpu_T[1], cpu_env,
7656 offsetof(CPUX86State,segs[R_GS].base));
7657 tcg_gen_st_tl(cpu_T[0], cpu_env,
7658 offsetof(CPUX86State,kernelgsbase));
7659 }
7660 } else
7661#endif
7662 {
7663 goto illegal_op;
7664 }
7665 break;
7666 case 1: /* rdtscp */
7667 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7668 goto illegal_op;
7669 if (s->cc_op != CC_OP_DYNAMIC)
7670 gen_op_set_cc_op(s->cc_op);
7671 gen_jmp_im(pc_start - s->cs_base);
7672 if (use_icount)
7673 gen_io_start();
7674 gen_helper_rdtscp();
7675 if (use_icount) {
7676 gen_io_end();
7677 gen_jmp(s, s->pc - s->cs_base);
7678 }
7679 break;
7680 default:
7681 goto illegal_op;
7682 }
7683 }
7684 break;
7685 default:
7686 goto illegal_op;
7687 }
7688 break;
7689 case 0x108: /* invd */
7690 case 0x109: /* wbinvd */
7691 if (s->cpl != 0) {
7692 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7693 } else {
7694 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7695 /* nothing to do */
7696 }
7697 break;
7698 case 0x63: /* arpl or movslS (x86_64) */
7699#ifdef TARGET_X86_64
7700 if (CODE64(s)) {
7701 int d_ot;
7702 /* d_ot is the size of destination */
7703 d_ot = dflag + OT_WORD;
7704
7705 modrm = ldub_code(s->pc++);
7706 reg = ((modrm >> 3) & 7) | rex_r;
7707 mod = (modrm >> 6) & 3;
7708 rm = (modrm & 7) | REX_B(s);
7709
7710 if (mod == 3) {
7711 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7712 /* sign extend */
7713 if (d_ot == OT_QUAD)
7714 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7715 gen_op_mov_reg_T0(d_ot, reg);
7716 } else {
7717 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7718 if (d_ot == OT_QUAD) {
7719 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7720 } else {
7721 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7722 }
7723 gen_op_mov_reg_T0(d_ot, reg);
7724 }
7725 } else
7726#endif
7727 {
7728 int label1;
7729 TCGv t0, t1, t2, a0;
7730
7731 if (!s->pe || s->vm86)
7732 goto illegal_op;
7733 t0 = tcg_temp_local_new();
7734 t1 = tcg_temp_local_new();
7735 t2 = tcg_temp_local_new();
7736 ot = OT_WORD;
7737 modrm = ldub_code(s->pc++);
7738 reg = (modrm >> 3) & 7;
7739 mod = (modrm >> 6) & 3;
7740 rm = modrm & 7;
7741 if (mod != 3) {
7742 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7743 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7744 a0 = tcg_temp_local_new();
7745 tcg_gen_mov_tl(a0, cpu_A0);
7746 } else {
7747 gen_op_mov_v_reg(ot, t0, rm);
7748 TCGV_UNUSED(a0);
7749 }
7750 gen_op_mov_v_reg(ot, t1, reg);
7751 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7752 tcg_gen_andi_tl(t1, t1, 3);
7753 tcg_gen_movi_tl(t2, 0);
7754 label1 = gen_new_label();
7755 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7756 tcg_gen_andi_tl(t0, t0, ~3);
7757 tcg_gen_or_tl(t0, t0, t1);
7758 tcg_gen_movi_tl(t2, CC_Z);
7759 gen_set_label(label1);
7760 if (mod != 3) {
7761 gen_op_st_v(ot + s->mem_index, t0, a0);
7762 tcg_temp_free(a0);
7763 } else {
7764 gen_op_mov_reg_v(ot, rm, t0);
7765 }
7766 if (s->cc_op != CC_OP_DYNAMIC)
7767 gen_op_set_cc_op(s->cc_op);
7768 gen_compute_eflags(cpu_cc_src);
7769 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7770 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7771 s->cc_op = CC_OP_EFLAGS;
7772 tcg_temp_free(t0);
7773 tcg_temp_free(t1);
7774 tcg_temp_free(t2);
7775 }
7776 break;
7777 case 0x102: /* lar */
7778 case 0x103: /* lsl */
7779 {
7780 int label1;
7781 TCGv t0;
7782 if (!s->pe || s->vm86)
7783 goto illegal_op;
7784 ot = dflag ? OT_LONG : OT_WORD;
7785 modrm = ldub_code(s->pc++);
7786 reg = ((modrm >> 3) & 7) | rex_r;
7787 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7788 t0 = tcg_temp_local_new();
7789 if (s->cc_op != CC_OP_DYNAMIC)
7790 gen_op_set_cc_op(s->cc_op);
7791 if (b == 0x102)
7792 gen_helper_lar(t0, cpu_T[0]);
7793 else
7794 gen_helper_lsl(t0, cpu_T[0]);
7795 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7796 label1 = gen_new_label();
7797 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7798 gen_op_mov_reg_v(ot, reg, t0);
7799 gen_set_label(label1);
7800 s->cc_op = CC_OP_EFLAGS;
7801 tcg_temp_free(t0);
7802 }
7803 break;
7804 case 0x118:
7805 modrm = ldub_code(s->pc++);
7806 mod = (modrm >> 6) & 3;
7807 op = (modrm >> 3) & 7;
7808 switch(op) {
7809 case 0: /* prefetchnta */
7810 case 1: /* prefetchnt0 */
7811 case 2: /* prefetchnt0 */
7812 case 3: /* prefetchnt0 */
7813 if (mod == 3)
7814 goto illegal_op;
7815 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7816 /* nothing more to do */
7817 break;
7818 default: /* nop (multi byte) */
7819 gen_nop_modrm(s, modrm);
7820 break;
7821 }
7822 break;
7823 case 0x119 ... 0x11f: /* nop (multi byte) */
7824 modrm = ldub_code(s->pc++);
7825 gen_nop_modrm(s, modrm);
7826 break;
7827 case 0x120: /* mov reg, crN */
7828 case 0x122: /* mov crN, reg */
7829 if (s->cpl != 0) {
7830 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7831 } else {
7832 modrm = ldub_code(s->pc++);
7833#ifndef VBOX /* mod bits are always understood to be 11 (0xc0) regardless of actual content; see AMD manuals */
7834 if ((modrm & 0xc0) != 0xc0)
7835 goto illegal_op;
7836#endif
7837 rm = (modrm & 7) | REX_B(s);
7838 reg = ((modrm >> 3) & 7) | rex_r;
7839 if (CODE64(s))
7840 ot = OT_QUAD;
7841 else
7842 ot = OT_LONG;
7843 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7844 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7845 reg = 8;
7846 }
7847 switch(reg) {
7848 case 0:
7849 case 2:
7850 case 3:
7851 case 4:
7852 case 8:
7853 if (s->cc_op != CC_OP_DYNAMIC)
7854 gen_op_set_cc_op(s->cc_op);
7855 gen_jmp_im(pc_start - s->cs_base);
7856 if (b & 2) {
7857 gen_op_mov_TN_reg(ot, 0, rm);
7858 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7859 gen_jmp_im(s->pc - s->cs_base);
7860 gen_eob(s);
7861 } else {
7862 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7863 gen_op_mov_reg_T0(ot, rm);
7864 }
7865 break;
7866 default:
7867 goto illegal_op;
7868 }
7869 }
7870 break;
7871 case 0x121: /* mov reg, drN */
7872 case 0x123: /* mov drN, reg */
7873 if (s->cpl != 0) {
7874 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7875 } else {
7876 modrm = ldub_code(s->pc++);
7877#ifndef VBOX /* mod bits are always understood to be 11 (0xc0) regardless of actual content; see AMD manuals */
7878 if ((modrm & 0xc0) != 0xc0)
7879 goto illegal_op;
7880#endif
7881 rm = (modrm & 7) | REX_B(s);
7882 reg = ((modrm >> 3) & 7) | rex_r;
7883 if (CODE64(s))
7884 ot = OT_QUAD;
7885 else
7886 ot = OT_LONG;
7887 /* XXX: do it dynamically with CR4.DE bit */
7888 if (reg == 4 || reg == 5 || reg >= 8)
7889 goto illegal_op;
7890 if (b & 2) {
7891 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7892 gen_op_mov_TN_reg(ot, 0, rm);
7893 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7894 gen_jmp_im(s->pc - s->cs_base);
7895 gen_eob(s);
7896 } else {
7897 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7898 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7899 gen_op_mov_reg_T0(ot, rm);
7900 }
7901 }
7902 break;
7903 case 0x106: /* clts */
7904 if (s->cpl != 0) {
7905 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7906 } else {
7907 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7908 gen_helper_clts();
7909 /* abort block because static cpu state changed */
7910 gen_jmp_im(s->pc - s->cs_base);
7911 gen_eob(s);
7912 }
7913 break;
7914 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7915 case 0x1c3: /* MOVNTI reg, mem */
7916 if (!(s->cpuid_features & CPUID_SSE2))
7917 goto illegal_op;
7918 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7919 modrm = ldub_code(s->pc++);
7920 mod = (modrm >> 6) & 3;
7921 if (mod == 3)
7922 goto illegal_op;
7923 reg = ((modrm >> 3) & 7) | rex_r;
7924 /* generate a generic store */
7925 gen_ldst_modrm(s, modrm, ot, reg, 1);
7926 break;
7927 case 0x1ae:
7928 modrm = ldub_code(s->pc++);
7929 mod = (modrm >> 6) & 3;
7930 op = (modrm >> 3) & 7;
7931 switch(op) {
7932 case 0: /* fxsave */
7933 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7934 (s->prefix & PREFIX_LOCK))
7935 goto illegal_op;
7936 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7937 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7938 break;
7939 }
7940 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7941 if (s->cc_op != CC_OP_DYNAMIC)
7942 gen_op_set_cc_op(s->cc_op);
7943 gen_jmp_im(pc_start - s->cs_base);
7944 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7945 break;
7946 case 1: /* fxrstor */
7947 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7948 (s->prefix & PREFIX_LOCK))
7949 goto illegal_op;
7950 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7951 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7952 break;
7953 }
7954 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7955 if (s->cc_op != CC_OP_DYNAMIC)
7956 gen_op_set_cc_op(s->cc_op);
7957 gen_jmp_im(pc_start - s->cs_base);
7958 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7959 break;
7960 case 2: /* ldmxcsr */
7961 case 3: /* stmxcsr */
7962 if (s->flags & HF_TS_MASK) {
7963 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7964 break;
7965 }
7966 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7967 mod == 3)
7968 goto illegal_op;
7969 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7970 if (op == 2) {
7971 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7972 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7973 } else {
7974 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7975 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7976 }
7977 break;
7978 case 5: /* lfence */
7979 case 6: /* mfence */
7980 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
7981 goto illegal_op;
7982 break;
7983 case 7: /* sfence / clflush */
7984 if ((modrm & 0xc7) == 0xc0) {
7985 /* sfence */
7986 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7987 if (!(s->cpuid_features & CPUID_SSE))
7988 goto illegal_op;
7989 } else {
7990 /* clflush */
7991 if (!(s->cpuid_features & CPUID_CLFLUSH))
7992 goto illegal_op;
7993 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7994 }
7995 break;
7996 default:
7997 goto illegal_op;
7998 }
7999 break;
8000 case 0x10d: /* 3DNow! prefetch(w) */
8001 modrm = ldub_code(s->pc++);
8002 mod = (modrm >> 6) & 3;
8003 if (mod == 3)
8004 goto illegal_op;
8005 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
8006 /* ignore for now */
8007 break;
8008 case 0x1aa: /* rsm */
8009 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
8010 if (!(s->flags & HF_SMM_MASK))
8011 goto illegal_op;
8012 gen_update_cc_op(s);
8013 gen_jmp_im(s->pc - s->cs_base);
8014 gen_helper_rsm();
8015 gen_eob(s);
8016 break;
8017 case 0x1b8: /* SSE4.2 popcnt */
8018 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8019 PREFIX_REPZ)
8020 goto illegal_op;
8021 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8022 goto illegal_op;
8023
8024 modrm = ldub_code(s->pc++);
8025 reg = ((modrm >> 3) & 7);
8026
8027 if (s->prefix & PREFIX_DATA)
8028 ot = OT_WORD;
8029 else if (s->dflag != 2)
8030 ot = OT_LONG;
8031 else
8032 ot = OT_QUAD;
8033
8034 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
8035 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
8036 gen_op_mov_reg_T0(ot, reg);
8037
8038 s->cc_op = CC_OP_EFLAGS;
8039 break;
8040 case 0x10e ... 0x10f:
8041 /* 3DNow! instructions, ignore prefixes */
8042 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
8043 case 0x110 ... 0x117:
8044 case 0x128 ... 0x12f:
8045 case 0x138 ... 0x13a:
8046 case 0x150 ... 0x179:
8047 case 0x17c ... 0x17f:
8048 case 0x1c2:
8049 case 0x1c4 ... 0x1c6:
8050 case 0x1d0 ... 0x1fe:
8051 gen_sse(s, b, pc_start, rex_r);
8052 break;
8053 default:
8054 goto illegal_op;
8055 }
8056 /* lock generation */
8057 if (s->prefix & PREFIX_LOCK)
8058 gen_helper_unlock();
8059 return s->pc;
8060 illegal_op:
8061 if (s->prefix & PREFIX_LOCK)
8062 gen_helper_unlock();
8063 /* XXX: ensure that no lock was generated */
8064 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8065 return s->pc;
8066}
8067
8068void optimize_flags_init(void)
8069{
8070#if TCG_TARGET_REG_BITS == 32
8071 assert(sizeof(CCTable) == (1 << 3));
8072#else
8073 assert(sizeof(CCTable) == (1 << 4));
8074#endif
8075 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8076 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
8077 offsetof(CPUState, cc_op), "cc_op");
8078 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
8079 "cc_src");
8080 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
8081 "cc_dst");
8082 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
8083 "cc_tmp");
8084
8085#ifdef TARGET_X86_64
8086 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
8087 offsetof(CPUState, regs[R_EAX]), "rax");
8088 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
8089 offsetof(CPUState, regs[R_ECX]), "rcx");
8090 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
8091 offsetof(CPUState, regs[R_EDX]), "rdx");
8092 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
8093 offsetof(CPUState, regs[R_EBX]), "rbx");
8094 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
8095 offsetof(CPUState, regs[R_ESP]), "rsp");
8096 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
8097 offsetof(CPUState, regs[R_EBP]), "rbp");
8098 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
8099 offsetof(CPUState, regs[R_ESI]), "rsi");
8100 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
8101 offsetof(CPUState, regs[R_EDI]), "rdi");
8102 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
8103 offsetof(CPUState, regs[8]), "r8");
8104 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
8105 offsetof(CPUState, regs[9]), "r9");
8106 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
8107 offsetof(CPUState, regs[10]), "r10");
8108 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
8109 offsetof(CPUState, regs[11]), "r11");
8110 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
8111 offsetof(CPUState, regs[12]), "r12");
8112 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
8113 offsetof(CPUState, regs[13]), "r13");
8114 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
8115 offsetof(CPUState, regs[14]), "r14");
8116 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
8117 offsetof(CPUState, regs[15]), "r15");
8118#else
8119 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
8120 offsetof(CPUState, regs[R_EAX]), "eax");
8121 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
8122 offsetof(CPUState, regs[R_ECX]), "ecx");
8123 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
8124 offsetof(CPUState, regs[R_EDX]), "edx");
8125 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
8126 offsetof(CPUState, regs[R_EBX]), "ebx");
8127 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
8128 offsetof(CPUState, regs[R_ESP]), "esp");
8129 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
8130 offsetof(CPUState, regs[R_EBP]), "ebp");
8131 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
8132 offsetof(CPUState, regs[R_ESI]), "esi");
8133 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
8134 offsetof(CPUState, regs[R_EDI]), "edi");
8135#endif
8136
8137 /* register helpers */
8138#define GEN_HELPER 2
8139#include "helper.h"
8140}
8141
8142/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8143 basic block 'tb'. If search_pc is TRUE, also generate PC
8144 information for each intermediate instruction. */
8145static inline void gen_intermediate_code_internal(CPUState *env,
8146 TranslationBlock *tb,
8147 int search_pc)
8148{
8149 DisasContext dc1, *dc = &dc1;
8150 target_ulong pc_ptr;
8151 uint16_t *gen_opc_end;
8152 CPUBreakpoint *bp;
8153 int j, lj;
8154 uint64_t flags;
8155 target_ulong pc_start;
8156 target_ulong cs_base;
8157 int num_insns;
8158 int max_insns;
8159#ifdef VBOX
8160 int const singlestep = env->state & CPU_EMULATE_SINGLE_STEP;
8161#endif
8162
8163 /* generate intermediate code */
8164 pc_start = tb->pc;
8165 cs_base = tb->cs_base;
8166 flags = tb->flags;
8167
8168 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8169 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8170 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8171 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8172 dc->f_st = 0;
8173 dc->vm86 = (flags >> VM_SHIFT) & 1;
8174#ifdef VBOX
8175 dc->vme = !!(env->cr[4] & CR4_VME_MASK);
8176 dc->pvi = !!(env->cr[4] & CR4_PVI_MASK);
8177# ifdef VBOX_WITH_CALL_RECORD
8178 if ( !(env->state & CPU_RAW_RING0)
8179 && (env->cr[0] & CR0_PG_MASK)
8180 && !(env->eflags & X86_EFL_IF)
8181 && dc->code32)
8182 dc->record_call = 1;
8183 else
8184 dc->record_call = 0;
8185# endif
8186#endif /* VBOX */
8187 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8188 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8189 dc->tf = (flags >> TF_SHIFT) & 1;
8190 dc->singlestep_enabled = env->singlestep_enabled;
8191 dc->cc_op = CC_OP_DYNAMIC;
8192 dc->cs_base = cs_base;
8193 dc->tb = tb;
8194 dc->popl_esp_hack = 0;
8195 /* select memory access functions */
8196 dc->mem_index = 0;
8197 if (flags & HF_SOFTMMU_MASK) {
8198 if (dc->cpl == 3)
8199 dc->mem_index = 2 * 4;
8200 else
8201 dc->mem_index = 1 * 4;
8202 }
8203 dc->cpuid_features = env->cpuid_features;
8204 dc->cpuid_ext_features = env->cpuid_ext_features;
8205 dc->cpuid_ext2_features = env->cpuid_ext2_features;
8206 dc->cpuid_ext3_features = env->cpuid_ext3_features;
8207#ifdef TARGET_X86_64
8208 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8209 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8210#endif
8211 dc->flags = flags;
8212 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
8213 (flags & HF_INHIBIT_IRQ_MASK)
8214#ifndef CONFIG_SOFTMMU
8215 || (flags & HF_SOFTMMU_MASK)
8216#endif
8217 );
8218#if 0
8219 /* check addseg logic */
8220 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8221 printf("ERROR addseg\n");
8222#endif
8223
8224 cpu_T[0] = tcg_temp_new();
8225 cpu_T[1] = tcg_temp_new();
8226 cpu_A0 = tcg_temp_new();
8227 cpu_T3 = tcg_temp_new();
8228
8229 cpu_tmp0 = tcg_temp_new();
8230 cpu_tmp1_i64 = tcg_temp_new_i64();
8231 cpu_tmp2_i32 = tcg_temp_new_i32();
8232 cpu_tmp3_i32 = tcg_temp_new_i32();
8233 cpu_tmp4 = tcg_temp_new();
8234 cpu_tmp5 = tcg_temp_new();
8235 cpu_ptr0 = tcg_temp_new_ptr();
8236 cpu_ptr1 = tcg_temp_new_ptr();
8237
8238 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8239
8240 dc->is_jmp = DISAS_NEXT;
8241 pc_ptr = pc_start;
8242 lj = -1;
8243 num_insns = 0;
8244 max_insns = tb->cflags & CF_COUNT_MASK;
8245 if (max_insns == 0)
8246 max_insns = CF_COUNT_MASK;
8247
8248 gen_icount_start();
8249 for(;;) {
8250 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8251 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
8252 if (bp->pc == pc_ptr &&
8253 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
8254 gen_debug(dc, pc_ptr - dc->cs_base);
8255 break;
8256 }
8257 }
8258 }
8259 if (search_pc) {
8260 j = gen_opc_ptr - gen_opc_buf;
8261 if (lj < j) {
8262 lj++;
8263 while (lj < j)
8264 gen_opc_instr_start[lj++] = 0;
8265 }
8266 gen_opc_pc[lj] = pc_ptr;
8267 gen_opc_cc_op[lj] = dc->cc_op;
8268 gen_opc_instr_start[lj] = 1;
8269 gen_opc_icount[lj] = num_insns;
8270 }
8271 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8272 gen_io_start();
8273
8274 pc_ptr = disas_insn(dc, pc_ptr);
8275 num_insns++;
8276 /* stop translation if indicated */
8277 if (dc->is_jmp)
8278 break;
8279#ifdef VBOX
8280# ifdef DEBUG
8281/*
8282 if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS)
8283 {
8284 //should never happen as the jump to the patch code terminates the translation block
8285 dprintf(("QEmu is about to execute instructions in our patch block at %08X!!\n", pc_ptr));
8286 }
8287*/
8288# endif /* DEBUG */
8289 if (env->state & CPU_EMULATE_SINGLE_INSTR)
8290 {
8291 env->state &= ~CPU_EMULATE_SINGLE_INSTR;
8292 gen_jmp_im(pc_ptr - dc->cs_base);
8293 gen_eob(dc);
8294 break;
8295 }
8296#endif /* VBOX */
8297
8298 /* if single step mode, we generate only one instruction and
8299 generate an exception */
8300 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8301 the flag and abort the translation to give the irqs a
8302 change to be happen */
8303 if (dc->tf || dc->singlestep_enabled ||
8304 (flags & HF_INHIBIT_IRQ_MASK)) {
8305 gen_jmp_im(pc_ptr - dc->cs_base);
8306 gen_eob(dc);
8307 break;
8308 }
8309 /* if too long translation, stop generation too */
8310 if (gen_opc_ptr >= gen_opc_end ||
8311 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8312 num_insns >= max_insns) {
8313 gen_jmp_im(pc_ptr - dc->cs_base);
8314 gen_eob(dc);
8315 break;
8316 }
8317 if (singlestep) {
8318 gen_jmp_im(pc_ptr - dc->cs_base);
8319 gen_eob(dc);
8320 break;
8321 }
8322 }
8323 if (tb->cflags & CF_LAST_IO)
8324 gen_io_end();
8325 gen_icount_end(tb, num_insns);
8326 *gen_opc_ptr = INDEX_op_end;
8327 /* we don't forget to fill the last values */
8328 if (search_pc) {
8329 j = gen_opc_ptr - gen_opc_buf;
8330 lj++;
8331 while (lj <= j)
8332 gen_opc_instr_start[lj++] = 0;
8333 }
8334
8335#ifdef DEBUG_DISAS
8336 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8337 int disas_flags;
8338 qemu_log("----------------\n");
8339 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8340#ifdef TARGET_X86_64
8341 if (dc->code64)
8342 disas_flags = 2;
8343 else
8344#endif
8345 disas_flags = !dc->code32;
8346 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
8347 qemu_log("\n");
8348 }
8349#endif
8350
8351 if (!search_pc) {
8352 tb->size = pc_ptr - pc_start;
8353 tb->icount = num_insns;
8354 }
8355}
8356
8357void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8358{
8359 gen_intermediate_code_internal(env, tb, 0);
8360}
8361
8362void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8363{
8364 gen_intermediate_code_internal(env, tb, 1);
8365}
8366
8367void gen_pc_load(CPUState *env, TranslationBlock *tb,
8368 uintptr_t searched_pc, int pc_pos, void *puc)
8369{
8370 int cc_op;
8371#ifdef DEBUG_DISAS
8372 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8373 int i;
8374 qemu_log("RESTORE:\n");
8375 for(i = 0;i <= pc_pos; i++) {
8376 if (gen_opc_instr_start[i]) {
8377 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
8378 }
8379 }
8380 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8381 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
8382 (uint32_t)tb->cs_base);
8383 }
8384#endif
8385 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
8386 cc_op = gen_opc_cc_op[pc_pos];
8387 if (cc_op != CC_OP_DYNAMIC)
8388 env->cc_op = cc_op;
8389}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette