VirtualBox

source: vbox/trunk/src/recompiler/target-i386/translate.c@ 44399

Last change on this file since 44399 was 42601, checked in by vboxsync, 12 years ago

REM: Initial changes to make it work (seemingly) with MinGW-w64.

  • Property svn:eol-style set to native
File size: 269.7 KB
Line 
1/*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#endif /* !VBOX */
37
38#include "cpu.h"
39#include "exec-all.h"
40#include "disas.h"
41#include "tcg-op.h"
42
43#include "helper.h"
44#define GEN_HELPER 1
45#include "helper.h"
46
47#define PREFIX_REPZ 0x01
48#define PREFIX_REPNZ 0x02
49#define PREFIX_LOCK 0x04
50#define PREFIX_DATA 0x08
51#define PREFIX_ADR 0x10
52
53#ifdef TARGET_X86_64
54#define X86_64_ONLY(x) x
55#define X86_64_DEF(...) __VA_ARGS__
56#define CODE64(s) ((s)->code64)
57#define REX_X(s) ((s)->rex_x)
58#define REX_B(s) ((s)->rex_b)
59# ifdef VBOX
60# define IS_LONG_MODE(s) ((s)->lma)
61# endif
62/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
63#if 1
64#define BUGGY_64(x) NULL
65#endif
66#else
67#define X86_64_ONLY(x) NULL
68#define X86_64_DEF(...)
69#define CODE64(s) 0
70#define REX_X(s) 0
71#define REX_B(s) 0
72# ifdef VBOX
73# define IS_LONG_MODE(s) 0
74# endif
75#endif
76
77//#define MACRO_TEST 1
78
79/* global register indexes */
80static TCGv_ptr cpu_env;
81static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
82static TCGv_i32 cpu_cc_op;
83static TCGv cpu_regs[CPU_NB_REGS];
84/* local temps */
85static TCGv cpu_T[2], cpu_T3;
86/* local register indexes (only used inside old micro ops) */
87static TCGv cpu_tmp0, cpu_tmp4;
88static TCGv_ptr cpu_ptr0, cpu_ptr1;
89static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
90static TCGv_i64 cpu_tmp1_i64;
91static TCGv cpu_tmp5;
92
93static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
94
95#include "gen-icount.h"
96
97#ifdef TARGET_X86_64
98static int x86_64_hregs;
99#endif
100
101#ifdef VBOX
102
103/* Special/override code readers to hide patched code. */
104
105uint8_t ldub_code_raw(target_ulong pc)
106{
107 uint8_t b;
108
109 if (!remR3GetOpcode(cpu_single_env, pc, &b))
110 b = ldub_code(pc);
111 return b;
112}
113# define ldub_code(a) ldub_code_raw(a)
114
115uint16_t lduw_code_raw(target_ulong pc)
116{
117 uint16_t u16;
118 u16 = (uint16_t)ldub_code_raw(pc);
119 u16 |= (uint16_t)ldub_code_raw(pc + 1) << 8;
120 return u16;
121}
122# define lduw_code(a) lduw_code_raw(a)
123
124
125uint32_t ldl_code_raw(target_ulong pc)
126{
127 uint32_t u32;
128 u32 = (uint32_t)ldub_code_raw(pc);
129 u32 |= (uint32_t)ldub_code_raw(pc + 1) << 8;
130 u32 |= (uint32_t)ldub_code_raw(pc + 2) << 16;
131 u32 |= (uint32_t)ldub_code_raw(pc + 3) << 24;
132 return u32;
133}
134# define ldl_code(a) ldl_code_raw(a)
135
136#endif /* VBOX */
137
138typedef struct DisasContext {
139 /* current insn context */
140 int override; /* -1 if no override */
141 int prefix;
142 int aflag, dflag;
143 target_ulong pc; /* pc = eip + cs_base */
144 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
145 static state change (stop translation) */
146 /* current block context */
147 target_ulong cs_base; /* base of CS segment */
148 int pe; /* protected mode */
149 int code32; /* 32 bit code segment */
150#ifdef TARGET_X86_64
151 int lma; /* long mode active */
152 int code64; /* 64 bit code segment */
153 int rex_x, rex_b;
154#endif
155 int ss32; /* 32 bit stack segment */
156 int cc_op; /* current CC operation */
157 int addseg; /* non zero if either DS/ES/SS have a non zero base */
158 int f_st; /* currently unused */
159 int vm86; /* vm86 mode */
160#ifdef VBOX
161 int vme; /* CR4.VME */
162 int pvi; /* CR4.PVI */
163 int record_call; /* record calls for CSAM or not? */
164#endif
165 int cpl;
166 int iopl;
167 int tf; /* TF cpu flag */
168 int singlestep_enabled; /* "hardware" single step enabled */
169 int jmp_opt; /* use direct block chaining for direct jumps */
170 int mem_index; /* select memory access functions */
171 uint64_t flags; /* all execution flags */
172 struct TranslationBlock *tb;
173 int popl_esp_hack; /* for correct popl with esp base handling */
174 int rip_offset; /* only used in x86_64, but left for simplicity */
175 int cpuid_features;
176 int cpuid_ext_features;
177 int cpuid_ext2_features;
178 int cpuid_ext3_features;
179} DisasContext;
180
181static void gen_eob(DisasContext *s);
182static void gen_jmp(DisasContext *s, target_ulong eip);
183static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
184
185#ifdef VBOX
186static void gen_check_external_event(void);
187#endif
188
189/* i386 arith/logic operations */
190enum {
191 OP_ADDL,
192 OP_ORL,
193 OP_ADCL,
194 OP_SBBL,
195 OP_ANDL,
196 OP_SUBL,
197 OP_XORL,
198 OP_CMPL,
199};
200
201/* i386 shift ops */
202enum {
203 OP_ROL,
204 OP_ROR,
205 OP_RCL,
206 OP_RCR,
207 OP_SHL,
208 OP_SHR,
209 OP_SHL1, /* undocumented */
210 OP_SAR = 7,
211};
212
213enum {
214 JCC_O,
215 JCC_B,
216 JCC_Z,
217 JCC_BE,
218 JCC_S,
219 JCC_P,
220 JCC_L,
221 JCC_LE,
222};
223
224/* operand size */
225enum {
226 OT_BYTE = 0,
227 OT_WORD,
228 OT_LONG,
229 OT_QUAD,
230};
231
232enum {
233 /* I386 int registers */
234 OR_EAX, /* MUST be even numbered */
235 OR_ECX,
236 OR_EDX,
237 OR_EBX,
238 OR_ESP,
239 OR_EBP,
240 OR_ESI,
241 OR_EDI,
242
243 OR_TMP0 = 16, /* temporary operand register */
244 OR_TMP1,
245 OR_A0, /* temporary register used when doing address evaluation */
246};
247
248static inline void gen_op_movl_T0_0(void)
249{
250 tcg_gen_movi_tl(cpu_T[0], 0);
251}
252
253static inline void gen_op_movl_T0_im(int32_t val)
254{
255 tcg_gen_movi_tl(cpu_T[0], val);
256}
257
258static inline void gen_op_movl_T0_imu(uint32_t val)
259{
260 tcg_gen_movi_tl(cpu_T[0], val);
261}
262
263static inline void gen_op_movl_T1_im(int32_t val)
264{
265 tcg_gen_movi_tl(cpu_T[1], val);
266}
267
268static inline void gen_op_movl_T1_imu(uint32_t val)
269{
270 tcg_gen_movi_tl(cpu_T[1], val);
271}
272
273static inline void gen_op_movl_A0_im(uint32_t val)
274{
275 tcg_gen_movi_tl(cpu_A0, val);
276}
277
278#ifdef TARGET_X86_64
279static inline void gen_op_movq_A0_im(int64_t val)
280{
281 tcg_gen_movi_tl(cpu_A0, val);
282}
283#endif
284
285static inline void gen_movtl_T0_im(target_ulong val)
286{
287 tcg_gen_movi_tl(cpu_T[0], val);
288}
289
290static inline void gen_movtl_T1_im(target_ulong val)
291{
292 tcg_gen_movi_tl(cpu_T[1], val);
293}
294
295static inline void gen_op_andl_T0_ffff(void)
296{
297 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
298}
299
300static inline void gen_op_andl_T0_im(uint32_t val)
301{
302 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
303}
304
305static inline void gen_op_movl_T0_T1(void)
306{
307 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
308}
309
310static inline void gen_op_andl_A0_ffff(void)
311{
312 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
313}
314
315#ifdef TARGET_X86_64
316
317#define NB_OP_SIZES 4
318
319#else /* !TARGET_X86_64 */
320
321#define NB_OP_SIZES 3
322
323#endif /* !TARGET_X86_64 */
324
325#if defined(HOST_WORDS_BIGENDIAN)
326#define REG_B_OFFSET (sizeof(target_ulong) - 1)
327#define REG_H_OFFSET (sizeof(target_ulong) - 2)
328#define REG_W_OFFSET (sizeof(target_ulong) - 2)
329#define REG_L_OFFSET (sizeof(target_ulong) - 4)
330#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
331#else
332#define REG_B_OFFSET 0
333#define REG_H_OFFSET 1
334#define REG_W_OFFSET 0
335#define REG_L_OFFSET 0
336#define REG_LH_OFFSET 4
337#endif
338
339static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
340{
341 TCGv tmp;
342
343 switch(ot) {
344 case OT_BYTE:
345 tmp = tcg_temp_new();
346 tcg_gen_ext8u_tl(tmp, t0);
347 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
348 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xff);
349 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
350 } else {
351 tcg_gen_shli_tl(tmp, tmp, 8);
352 tcg_gen_andi_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], ~0xff00);
353 tcg_gen_or_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], tmp);
354 }
355 tcg_temp_free(tmp);
356 break;
357 case OT_WORD:
358 tmp = tcg_temp_new();
359 tcg_gen_ext16u_tl(tmp, t0);
360 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
361 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
362 tcg_temp_free(tmp);
363 break;
364 default: /* XXX this shouldn't be reached; abort? */
365 case OT_LONG:
366 /* For x86_64, this sets the higher half of register to zero.
367 For i386, this is equivalent to a mov. */
368 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
369 break;
370#ifdef TARGET_X86_64
371 case OT_QUAD:
372 tcg_gen_mov_tl(cpu_regs[reg], t0);
373 break;
374#endif
375 }
376}
377
378static inline void gen_op_mov_reg_T0(int ot, int reg)
379{
380 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
381}
382
383static inline void gen_op_mov_reg_T1(int ot, int reg)
384{
385 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
386}
387
388static inline void gen_op_mov_reg_A0(int size, int reg)
389{
390 TCGv tmp;
391
392 switch(size) {
393 case 0:
394 tmp = tcg_temp_new();
395 tcg_gen_ext16u_tl(tmp, cpu_A0);
396 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
397 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
398 tcg_temp_free(tmp);
399 break;
400 default: /* XXX this shouldn't be reached; abort? */
401 case 1:
402 /* For x86_64, this sets the higher half of register to zero.
403 For i386, this is equivalent to a mov. */
404 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
405 break;
406#ifdef TARGET_X86_64
407 case 2:
408 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
409 break;
410#endif
411 }
412}
413
414static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
415{
416 switch(ot) {
417 case OT_BYTE:
418 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
419 goto std_case;
420 } else {
421 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
422 tcg_gen_ext8u_tl(t0, t0);
423 }
424 break;
425 default:
426 std_case:
427 tcg_gen_mov_tl(t0, cpu_regs[reg]);
428 break;
429 }
430}
431
432static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
433{
434 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
435}
436
437static inline void gen_op_movl_A0_reg(int reg)
438{
439 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
440}
441
442static inline void gen_op_addl_A0_im(int32_t val)
443{
444 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
445#ifdef TARGET_X86_64
446 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
447#endif
448}
449
450#ifdef TARGET_X86_64
451static inline void gen_op_addq_A0_im(int64_t val)
452{
453 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
454}
455#endif
456
457static void gen_add_A0_im(DisasContext *s, int val)
458{
459#ifdef TARGET_X86_64
460 if (CODE64(s))
461 gen_op_addq_A0_im(val);
462 else
463#endif
464 gen_op_addl_A0_im(val);
465}
466
467static inline void gen_op_addl_T0_T1(void)
468{
469 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
470}
471
472static inline void gen_op_jmp_T0(void)
473{
474 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
475}
476
477static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
478{
479 switch(size) {
480 case 0:
481 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
482 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
483 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
484 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
485 break;
486 case 1:
487 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
488 /* For x86_64, this sets the higher half of register to zero.
489 For i386, this is equivalent to a nop. */
490 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
491 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
492 break;
493#ifdef TARGET_X86_64
494 case 2:
495 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
496 break;
497#endif
498 }
499}
500
501static inline void gen_op_add_reg_T0(int size, int reg)
502{
503 switch(size) {
504 case 0:
505 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
506 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
507 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
508 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
509 break;
510 case 1:
511 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
512 /* For x86_64, this sets the higher half of register to zero.
513 For i386, this is equivalent to a nop. */
514 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
515 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
516 break;
517#ifdef TARGET_X86_64
518 case 2:
519 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
520 break;
521#endif
522 }
523}
524
525static inline void gen_op_set_cc_op(int32_t val)
526{
527 tcg_gen_movi_i32(cpu_cc_op, val);
528}
529
530static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
531{
532 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
533 if (shift != 0)
534 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
535 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
536 /* For x86_64, this sets the higher half of register to zero.
537 For i386, this is equivalent to a nop. */
538 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
539}
540
541#ifdef VBOX
542DECLINLINE(void) gen_op_seg_check(int reg, bool keepA0)
543{
544 /* It seems segments doesn't get out of sync - if they do in fact - enable below code. */
545# ifdef FORCE_SEGMENT_SYNC
546# if 1
547 TCGv t0;
548
549 /* Considering poor quality of TCG optimizer - better call directly */
550 t0 = tcg_temp_local_new(TCG_TYPE_TL);
551 tcg_gen_movi_tl(t0, reg);
552 tcg_gen_helper_0_1(helper_sync_seg, t0);
553 tcg_temp_free(t0);
554# else
555 /* Our segments could be outdated, thus check for newselector field to see if update really needed */
556 int skip_label;
557 TCGv t0, a0;
558
559 /* For other segments this check is waste of time, and also TCG is unable to cope with this code,
560 for data/stack segments, as expects alive cpu_T[0] */
561 if (reg != R_GS)
562 return;
563
564 if (keepA0)
565 {
566 /* we need to store old cpu_A0 */
567 a0 = tcg_temp_local_new(TCG_TYPE_TL);
568 tcg_gen_mov_tl(a0, cpu_A0);
569 }
570
571 skip_label = gen_new_label();
572 t0 = tcg_temp_local_new(TCG_TYPE_TL);
573
574 tcg_gen_ld32u_tl(t0, cpu_env, offsetof(CPUState, segs[reg].newselector) + REG_L_OFFSET);
575 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, skip_label);
576 tcg_gen_ld32u_tl(t0, cpu_env, offsetof(CPUState, eflags) + REG_L_OFFSET);
577 tcg_gen_andi_tl(t0, t0, VM_MASK);
578 tcg_gen_brcondi_i32(TCG_COND_NE, t0, 0, skip_label);
579 tcg_gen_movi_tl(t0, reg);
580
581 tcg_gen_helper_0_1(helper_sync_seg, t0);
582
583 tcg_temp_free(t0);
584
585 gen_set_label(skip_label);
586 if (keepA0)
587 {
588 tcg_gen_mov_tl(cpu_A0, a0);
589 tcg_temp_free(a0);
590 }
591# endif /* 0 */
592# endif /* FORCE_SEGMENT_SYNC */
593}
594#endif /* VBOX */
595
596static inline void gen_op_movl_A0_seg(int reg)
597{
598#ifdef VBOX
599 gen_op_seg_check(reg, false);
600#endif
601 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
602}
603
604static inline void gen_op_addl_A0_seg(int reg)
605{
606#ifdef VBOX
607 gen_op_seg_check(reg, true);
608#endif
609 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
610 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
611#ifdef TARGET_X86_64
612 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
613#endif
614}
615
616#ifdef TARGET_X86_64
617static inline void gen_op_movq_A0_seg(int reg)
618{
619#ifdef VBOX
620 gen_op_seg_check(reg, false);
621#endif
622 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
623}
624
625static inline void gen_op_addq_A0_seg(int reg)
626{
627#ifdef VBOX
628 gen_op_seg_check(reg, true);
629#endif
630 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
631 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
632}
633
634static inline void gen_op_movq_A0_reg(int reg)
635{
636 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
637}
638
639static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
640{
641 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
642 if (shift != 0)
643 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
644 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
645}
646#endif
647
648static inline void gen_op_lds_T0_A0(int idx)
649{
650 int mem_index = (idx >> 2) - 1;
651 switch(idx & 3) {
652 case 0:
653 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
654 break;
655 case 1:
656 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
657 break;
658 default:
659 case 2:
660 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
661 break;
662 }
663}
664
665static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
666{
667 int mem_index = (idx >> 2) - 1;
668 switch(idx & 3) {
669 case 0:
670 tcg_gen_qemu_ld8u(t0, a0, mem_index);
671 break;
672 case 1:
673 tcg_gen_qemu_ld16u(t0, a0, mem_index);
674 break;
675 case 2:
676 tcg_gen_qemu_ld32u(t0, a0, mem_index);
677 break;
678 default:
679 case 3:
680 /* Should never happen on 32-bit targets. */
681#ifdef TARGET_X86_64
682 tcg_gen_qemu_ld64(t0, a0, mem_index);
683#endif
684 break;
685 }
686}
687
688/* XXX: always use ldu or lds */
689static inline void gen_op_ld_T0_A0(int idx)
690{
691 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
692}
693
694static inline void gen_op_ldu_T0_A0(int idx)
695{
696 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
697}
698
699static inline void gen_op_ld_T1_A0(int idx)
700{
701 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
702}
703
704static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
705{
706 int mem_index = (idx >> 2) - 1;
707 switch(idx & 3) {
708 case 0:
709 tcg_gen_qemu_st8(t0, a0, mem_index);
710 break;
711 case 1:
712 tcg_gen_qemu_st16(t0, a0, mem_index);
713 break;
714 case 2:
715 tcg_gen_qemu_st32(t0, a0, mem_index);
716 break;
717 default:
718 case 3:
719 /* Should never happen on 32-bit targets. */
720#ifdef TARGET_X86_64
721 tcg_gen_qemu_st64(t0, a0, mem_index);
722#endif
723 break;
724 }
725}
726
727static inline void gen_op_st_T0_A0(int idx)
728{
729 gen_op_st_v(idx, cpu_T[0], cpu_A0);
730}
731
732static inline void gen_op_st_T1_A0(int idx)
733{
734 gen_op_st_v(idx, cpu_T[1], cpu_A0);
735}
736
737#ifdef VBOX
738
739static void gen_check_external_event(void)
740{
741# if 1
742 /** @todo: once TCG codegen improves, we may want to use version
743 from else version */
744 gen_helper_check_external_event();
745# else
746 int skip_label;
747 TCGv t0;
748
749 skip_label = gen_new_label();
750 t0 = tcg_temp_local_new(TCG_TYPE_TL);
751 /* t0 = cpu_tmp0; */
752
753 tcg_gen_ld32u_tl(t0, cpu_env, offsetof(CPUState, interrupt_request));
754 /* Keep in sync with helper_check_external_event() */
755 tcg_gen_andi_tl(t0, t0,
756 CPU_INTERRUPT_EXTERNAL_EXIT
757 | CPU_INTERRUPT_EXTERNAL_TIMER
758 | CPU_INTERRUPT_EXTERNAL_DMA
759 | CPU_INTERRUPT_EXTERNAL_HARD);
760 /** @todo: predict branch as taken */
761 tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, skip_label);
762 tcg_temp_free(t0);
763
764 gen_helper_check_external_event();
765
766 gen_set_label(skip_label);
767# endif
768}
769
770#endif /* VBOX */
771
772static inline void gen_jmp_im(target_ulong pc)
773{
774 tcg_gen_movi_tl(cpu_tmp0, pc);
775 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
776}
777
778#ifdef VBOX
779DECLINLINE(void) gen_update_eip(target_ulong pc)
780{
781 gen_jmp_im(pc);
782# ifdef VBOX_DUMP_STATE
783 gen_helper_dump_state();
784# endif
785}
786#endif /* VBOX */
787
788static inline void gen_string_movl_A0_ESI(DisasContext *s)
789{
790 int override;
791
792 override = s->override;
793#ifdef TARGET_X86_64
794 if (s->aflag == 2) {
795 if (override >= 0) {
796 gen_op_movq_A0_seg(override);
797 gen_op_addq_A0_reg_sN(0, R_ESI);
798 } else {
799 gen_op_movq_A0_reg(R_ESI);
800 }
801 } else
802#endif
803 if (s->aflag) {
804 /* 32 bit address */
805 if (s->addseg && override < 0)
806 override = R_DS;
807 if (override >= 0) {
808 gen_op_movl_A0_seg(override);
809 gen_op_addl_A0_reg_sN(0, R_ESI);
810 } else {
811 gen_op_movl_A0_reg(R_ESI);
812 }
813 } else {
814 /* 16 address, always override */
815 if (override < 0)
816 override = R_DS;
817 gen_op_movl_A0_reg(R_ESI);
818 gen_op_andl_A0_ffff();
819 gen_op_addl_A0_seg(override);
820 }
821}
822
823static inline void gen_string_movl_A0_EDI(DisasContext *s)
824{
825#ifdef TARGET_X86_64
826 if (s->aflag == 2) {
827 gen_op_movq_A0_reg(R_EDI);
828 } else
829#endif
830 if (s->aflag) {
831 if (s->addseg) {
832 gen_op_movl_A0_seg(R_ES);
833 gen_op_addl_A0_reg_sN(0, R_EDI);
834 } else {
835 gen_op_movl_A0_reg(R_EDI);
836 }
837 } else {
838 gen_op_movl_A0_reg(R_EDI);
839 gen_op_andl_A0_ffff();
840 gen_op_addl_A0_seg(R_ES);
841 }
842}
843
844static inline void gen_op_movl_T0_Dshift(int ot)
845{
846 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
847 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
848};
849
850static void gen_extu(int ot, TCGv reg)
851{
852 switch(ot) {
853 case OT_BYTE:
854 tcg_gen_ext8u_tl(reg, reg);
855 break;
856 case OT_WORD:
857 tcg_gen_ext16u_tl(reg, reg);
858 break;
859 case OT_LONG:
860 tcg_gen_ext32u_tl(reg, reg);
861 break;
862 default:
863 break;
864 }
865}
866
867static void gen_exts(int ot, TCGv reg)
868{
869 switch(ot) {
870 case OT_BYTE:
871 tcg_gen_ext8s_tl(reg, reg);
872 break;
873 case OT_WORD:
874 tcg_gen_ext16s_tl(reg, reg);
875 break;
876 case OT_LONG:
877 tcg_gen_ext32s_tl(reg, reg);
878 break;
879 default:
880 break;
881 }
882}
883
884static inline void gen_op_jnz_ecx(int size, int label1)
885{
886 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
887 gen_extu(size + 1, cpu_tmp0);
888 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
889}
890
891static inline void gen_op_jz_ecx(int size, int label1)
892{
893 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
894 gen_extu(size + 1, cpu_tmp0);
895 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
896}
897
898static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
899{
900 switch (ot) {
901 case 0: gen_helper_inb(v, n); break;
902 case 1: gen_helper_inw(v, n); break;
903 case 2: gen_helper_inl(v, n); break;
904 }
905
906}
907
908static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
909{
910 switch (ot) {
911 case 0: gen_helper_outb(v, n); break;
912 case 1: gen_helper_outw(v, n); break;
913 case 2: gen_helper_outl(v, n); break;
914 }
915
916}
917
918static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
919 uint32_t svm_flags)
920{
921 int state_saved;
922 target_ulong next_eip;
923
924 state_saved = 0;
925 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
926 if (s->cc_op != CC_OP_DYNAMIC)
927 gen_op_set_cc_op(s->cc_op);
928 gen_jmp_im(cur_eip);
929 state_saved = 1;
930 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
931 switch (ot) {
932 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
933 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
934 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
935 }
936 }
937 if(s->flags & HF_SVMI_MASK) {
938 if (!state_saved) {
939 if (s->cc_op != CC_OP_DYNAMIC)
940 gen_op_set_cc_op(s->cc_op);
941 gen_jmp_im(cur_eip);
942 }
943 svm_flags |= (1 << (4 + ot));
944 next_eip = s->pc - s->cs_base;
945 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
946 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
947 tcg_const_i32(next_eip - cur_eip));
948 }
949}
950
951static inline void gen_movs(DisasContext *s, int ot)
952{
953 gen_string_movl_A0_ESI(s);
954 gen_op_ld_T0_A0(ot + s->mem_index);
955 gen_string_movl_A0_EDI(s);
956 gen_op_st_T0_A0(ot + s->mem_index);
957 gen_op_movl_T0_Dshift(ot);
958 gen_op_add_reg_T0(s->aflag, R_ESI);
959 gen_op_add_reg_T0(s->aflag, R_EDI);
960}
961
962static inline void gen_update_cc_op(DisasContext *s)
963{
964 if (s->cc_op != CC_OP_DYNAMIC) {
965 gen_op_set_cc_op(s->cc_op);
966 s->cc_op = CC_OP_DYNAMIC;
967 }
968}
969
970static void gen_op_update1_cc(void)
971{
972 tcg_gen_discard_tl(cpu_cc_src);
973 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
974}
975
976static void gen_op_update2_cc(void)
977{
978 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
979 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
980}
981
982static inline void gen_op_cmpl_T0_T1_cc(void)
983{
984 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
985 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
986}
987
988static inline void gen_op_testl_T0_T1_cc(void)
989{
990 tcg_gen_discard_tl(cpu_cc_src);
991 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
992}
993
994static void gen_op_update_neg_cc(void)
995{
996 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
997 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
998}
999
1000/* compute eflags.C to reg */
1001static void gen_compute_eflags_c(TCGv reg)
1002{
1003 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
1004 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
1005}
1006
1007/* compute all eflags to cc_src */
1008static void gen_compute_eflags(TCGv reg)
1009{
1010 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
1011 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
1012}
1013
1014static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
1015{
1016 if (s->cc_op != CC_OP_DYNAMIC)
1017 gen_op_set_cc_op(s->cc_op);
1018 switch(jcc_op) {
1019 case JCC_O:
1020 gen_compute_eflags(cpu_T[0]);
1021 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
1022 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1023 break;
1024 case JCC_B:
1025 gen_compute_eflags_c(cpu_T[0]);
1026 break;
1027 case JCC_Z:
1028 gen_compute_eflags(cpu_T[0]);
1029 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
1030 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1031 break;
1032 case JCC_BE:
1033 gen_compute_eflags(cpu_tmp0);
1034 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
1035 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1036 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1037 break;
1038 case JCC_S:
1039 gen_compute_eflags(cpu_T[0]);
1040 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
1041 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1042 break;
1043 case JCC_P:
1044 gen_compute_eflags(cpu_T[0]);
1045 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
1046 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1047 break;
1048 case JCC_L:
1049 gen_compute_eflags(cpu_tmp0);
1050 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
1051 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
1052 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1053 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1054 break;
1055 default:
1056 case JCC_LE:
1057 gen_compute_eflags(cpu_tmp0);
1058 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
1059 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
1060 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
1061 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1062 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1063 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
1064 break;
1065 }
1066}
1067
1068/* return true if setcc_slow is not needed (WARNING: must be kept in
1069 sync with gen_jcc1) */
1070static int is_fast_jcc_case(DisasContext *s, int b)
1071{
1072 int jcc_op;
1073 jcc_op = (b >> 1) & 7;
1074 switch(s->cc_op) {
1075 /* we optimize the cmp/jcc case */
1076 case CC_OP_SUBB:
1077 case CC_OP_SUBW:
1078 case CC_OP_SUBL:
1079 case CC_OP_SUBQ:
1080 if (jcc_op == JCC_O || jcc_op == JCC_P)
1081 goto slow_jcc;
1082 break;
1083
1084 /* some jumps are easy to compute */
1085 case CC_OP_ADDB:
1086 case CC_OP_ADDW:
1087 case CC_OP_ADDL:
1088 case CC_OP_ADDQ:
1089
1090 case CC_OP_LOGICB:
1091 case CC_OP_LOGICW:
1092 case CC_OP_LOGICL:
1093 case CC_OP_LOGICQ:
1094
1095 case CC_OP_INCB:
1096 case CC_OP_INCW:
1097 case CC_OP_INCL:
1098 case CC_OP_INCQ:
1099
1100 case CC_OP_DECB:
1101 case CC_OP_DECW:
1102 case CC_OP_DECL:
1103 case CC_OP_DECQ:
1104
1105 case CC_OP_SHLB:
1106 case CC_OP_SHLW:
1107 case CC_OP_SHLL:
1108 case CC_OP_SHLQ:
1109 if (jcc_op != JCC_Z && jcc_op != JCC_S)
1110 goto slow_jcc;
1111 break;
1112 default:
1113 slow_jcc:
1114 return 0;
1115 }
1116 return 1;
1117}
1118
1119/* generate a conditional jump to label 'l1' according to jump opcode
1120 value 'b'. In the fast case, T0 is guaranted not to be used. */
1121static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
1122{
1123 int inv, jcc_op, size, cond;
1124 TCGv t0;
1125
1126 inv = b & 1;
1127 jcc_op = (b >> 1) & 7;
1128
1129 switch(cc_op) {
1130 /* we optimize the cmp/jcc case */
1131 case CC_OP_SUBB:
1132 case CC_OP_SUBW:
1133 case CC_OP_SUBL:
1134 case CC_OP_SUBQ:
1135
1136 size = cc_op - CC_OP_SUBB;
1137 switch(jcc_op) {
1138 case JCC_Z:
1139 fast_jcc_z:
1140 switch(size) {
1141 case 0:
1142 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
1143 t0 = cpu_tmp0;
1144 break;
1145 case 1:
1146 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
1147 t0 = cpu_tmp0;
1148 break;
1149#ifdef TARGET_X86_64
1150 case 2:
1151 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
1152 t0 = cpu_tmp0;
1153 break;
1154#endif
1155 default:
1156 t0 = cpu_cc_dst;
1157 break;
1158 }
1159 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
1160 break;
1161 case JCC_S:
1162 fast_jcc_s:
1163 switch(size) {
1164 case 0:
1165 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
1166 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1167 0, l1);
1168 break;
1169 case 1:
1170 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
1171 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1172 0, l1);
1173 break;
1174#ifdef TARGET_X86_64
1175 case 2:
1176 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
1177 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1178 0, l1);
1179 break;
1180#endif
1181 default:
1182 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1183 0, l1);
1184 break;
1185 }
1186 break;
1187
1188 case JCC_B:
1189 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1190 goto fast_jcc_b;
1191 case JCC_BE:
1192 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1193 fast_jcc_b:
1194 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1195 switch(size) {
1196 case 0:
1197 t0 = cpu_tmp0;
1198 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1199 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1200 break;
1201 case 1:
1202 t0 = cpu_tmp0;
1203 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1204 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1205 break;
1206#ifdef TARGET_X86_64
1207 case 2:
1208 t0 = cpu_tmp0;
1209 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1210 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1211 break;
1212#endif
1213 default:
1214 t0 = cpu_cc_src;
1215 break;
1216 }
1217 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1218 break;
1219
1220 case JCC_L:
1221 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1222 goto fast_jcc_l;
1223 case JCC_LE:
1224 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1225 fast_jcc_l:
1226 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1227 switch(size) {
1228 case 0:
1229 t0 = cpu_tmp0;
1230 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1231 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1232 break;
1233 case 1:
1234 t0 = cpu_tmp0;
1235 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1236 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1237 break;
1238#ifdef TARGET_X86_64
1239 case 2:
1240 t0 = cpu_tmp0;
1241 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1242 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1243 break;
1244#endif
1245 default:
1246 t0 = cpu_cc_src;
1247 break;
1248 }
1249 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1250 break;
1251
1252 default:
1253 goto slow_jcc;
1254 }
1255 break;
1256
1257 /* some jumps are easy to compute */
1258 case CC_OP_ADDB:
1259 case CC_OP_ADDW:
1260 case CC_OP_ADDL:
1261 case CC_OP_ADDQ:
1262
1263 case CC_OP_ADCB:
1264 case CC_OP_ADCW:
1265 case CC_OP_ADCL:
1266 case CC_OP_ADCQ:
1267
1268 case CC_OP_SBBB:
1269 case CC_OP_SBBW:
1270 case CC_OP_SBBL:
1271 case CC_OP_SBBQ:
1272
1273 case CC_OP_LOGICB:
1274 case CC_OP_LOGICW:
1275 case CC_OP_LOGICL:
1276 case CC_OP_LOGICQ:
1277
1278 case CC_OP_INCB:
1279 case CC_OP_INCW:
1280 case CC_OP_INCL:
1281 case CC_OP_INCQ:
1282
1283 case CC_OP_DECB:
1284 case CC_OP_DECW:
1285 case CC_OP_DECL:
1286 case CC_OP_DECQ:
1287
1288 case CC_OP_SHLB:
1289 case CC_OP_SHLW:
1290 case CC_OP_SHLL:
1291 case CC_OP_SHLQ:
1292
1293 case CC_OP_SARB:
1294 case CC_OP_SARW:
1295 case CC_OP_SARL:
1296 case CC_OP_SARQ:
1297 switch(jcc_op) {
1298 case JCC_Z:
1299 size = (cc_op - CC_OP_ADDB) & 3;
1300 goto fast_jcc_z;
1301 case JCC_S:
1302 size = (cc_op - CC_OP_ADDB) & 3;
1303 goto fast_jcc_s;
1304 default:
1305 goto slow_jcc;
1306 }
1307 break;
1308 default:
1309 slow_jcc:
1310 gen_setcc_slow_T0(s, jcc_op);
1311 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1312 cpu_T[0], 0, l1);
1313 break;
1314 }
1315}
1316
1317/* XXX: does not work with gdbstub "ice" single step - not a
1318 serious problem */
1319static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1320{
1321 int l1, l2;
1322
1323 l1 = gen_new_label();
1324 l2 = gen_new_label();
1325 gen_op_jnz_ecx(s->aflag, l1);
1326 gen_set_label(l2);
1327 gen_jmp_tb(s, next_eip, 1);
1328 gen_set_label(l1);
1329 return l2;
1330}
1331
1332static inline void gen_stos(DisasContext *s, int ot)
1333{
1334 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1335 gen_string_movl_A0_EDI(s);
1336 gen_op_st_T0_A0(ot + s->mem_index);
1337 gen_op_movl_T0_Dshift(ot);
1338 gen_op_add_reg_T0(s->aflag, R_EDI);
1339}
1340
1341static inline void gen_lods(DisasContext *s, int ot)
1342{
1343 gen_string_movl_A0_ESI(s);
1344 gen_op_ld_T0_A0(ot + s->mem_index);
1345 gen_op_mov_reg_T0(ot, R_EAX);
1346 gen_op_movl_T0_Dshift(ot);
1347 gen_op_add_reg_T0(s->aflag, R_ESI);
1348}
1349
1350static inline void gen_scas(DisasContext *s, int ot)
1351{
1352 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1353 gen_string_movl_A0_EDI(s);
1354 gen_op_ld_T1_A0(ot + s->mem_index);
1355 gen_op_cmpl_T0_T1_cc();
1356 gen_op_movl_T0_Dshift(ot);
1357 gen_op_add_reg_T0(s->aflag, R_EDI);
1358}
1359
1360static inline void gen_cmps(DisasContext *s, int ot)
1361{
1362 gen_string_movl_A0_ESI(s);
1363 gen_op_ld_T0_A0(ot + s->mem_index);
1364 gen_string_movl_A0_EDI(s);
1365 gen_op_ld_T1_A0(ot + s->mem_index);
1366 gen_op_cmpl_T0_T1_cc();
1367 gen_op_movl_T0_Dshift(ot);
1368 gen_op_add_reg_T0(s->aflag, R_ESI);
1369 gen_op_add_reg_T0(s->aflag, R_EDI);
1370}
1371
1372static inline void gen_ins(DisasContext *s, int ot)
1373{
1374 if (use_icount)
1375 gen_io_start();
1376 gen_string_movl_A0_EDI(s);
1377 /* Note: we must do this dummy write first to be restartable in
1378 case of page fault. */
1379 gen_op_movl_T0_0();
1380 gen_op_st_T0_A0(ot + s->mem_index);
1381 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1382 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1383 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1384 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1385 gen_op_st_T0_A0(ot + s->mem_index);
1386 gen_op_movl_T0_Dshift(ot);
1387 gen_op_add_reg_T0(s->aflag, R_EDI);
1388 if (use_icount)
1389 gen_io_end();
1390}
1391
1392static inline void gen_outs(DisasContext *s, int ot)
1393{
1394 if (use_icount)
1395 gen_io_start();
1396 gen_string_movl_A0_ESI(s);
1397 gen_op_ld_T0_A0(ot + s->mem_index);
1398
1399 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1400 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1401 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1402 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1403 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1404
1405 gen_op_movl_T0_Dshift(ot);
1406 gen_op_add_reg_T0(s->aflag, R_ESI);
1407 if (use_icount)
1408 gen_io_end();
1409}
1410
1411/* same method as Valgrind : we generate jumps to current or next
1412 instruction */
1413#define GEN_REPZ(op) \
1414static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1415 target_ulong cur_eip, target_ulong next_eip) \
1416{ \
1417 int l2;\
1418 gen_update_cc_op(s); \
1419 l2 = gen_jz_ecx_string(s, next_eip); \
1420 gen_ ## op(s, ot); \
1421 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1422 /* a loop would cause two single step exceptions if ECX = 1 \
1423 before rep string_insn */ \
1424 if (!s->jmp_opt) \
1425 gen_op_jz_ecx(s->aflag, l2); \
1426 gen_jmp(s, cur_eip); \
1427}
1428
1429#define GEN_REPZ2(op) \
1430static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1431 target_ulong cur_eip, \
1432 target_ulong next_eip, \
1433 int nz) \
1434{ \
1435 int l2;\
1436 gen_update_cc_op(s); \
1437 l2 = gen_jz_ecx_string(s, next_eip); \
1438 gen_ ## op(s, ot); \
1439 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1440 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1441 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1442 if (!s->jmp_opt) \
1443 gen_op_jz_ecx(s->aflag, l2); \
1444 gen_jmp(s, cur_eip); \
1445}
1446
1447GEN_REPZ(movs)
1448GEN_REPZ(stos)
1449GEN_REPZ(lods)
1450GEN_REPZ(ins)
1451GEN_REPZ(outs)
1452GEN_REPZ2(scas)
1453GEN_REPZ2(cmps)
1454
1455static void gen_helper_fp_arith_ST0_FT0(int op)
1456{
1457 switch (op) {
1458 case 0: gen_helper_fadd_ST0_FT0(); break;
1459 case 1: gen_helper_fmul_ST0_FT0(); break;
1460 case 2: gen_helper_fcom_ST0_FT0(); break;
1461 case 3: gen_helper_fcom_ST0_FT0(); break;
1462 case 4: gen_helper_fsub_ST0_FT0(); break;
1463 case 5: gen_helper_fsubr_ST0_FT0(); break;
1464 case 6: gen_helper_fdiv_ST0_FT0(); break;
1465 case 7: gen_helper_fdivr_ST0_FT0(); break;
1466 }
1467}
1468
1469/* NOTE the exception in "r" op ordering */
1470static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1471{
1472 TCGv_i32 tmp = tcg_const_i32(opreg);
1473 switch (op) {
1474 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1475 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1476 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1477 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1478 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1479 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1480 }
1481}
1482
1483/* if d == OR_TMP0, it means memory operand (address in A0) */
1484static void gen_op(DisasContext *s1, int op, int ot, int d)
1485{
1486 if (d != OR_TMP0) {
1487 gen_op_mov_TN_reg(ot, 0, d);
1488 } else {
1489 gen_op_ld_T0_A0(ot + s1->mem_index);
1490 }
1491 switch(op) {
1492 case OP_ADCL:
1493 if (s1->cc_op != CC_OP_DYNAMIC)
1494 gen_op_set_cc_op(s1->cc_op);
1495 gen_compute_eflags_c(cpu_tmp4);
1496 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1497 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1498 if (d != OR_TMP0)
1499 gen_op_mov_reg_T0(ot, d);
1500 else
1501 gen_op_st_T0_A0(ot + s1->mem_index);
1502 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1503 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1504 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1505 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1506 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1507 s1->cc_op = CC_OP_DYNAMIC;
1508 break;
1509 case OP_SBBL:
1510 if (s1->cc_op != CC_OP_DYNAMIC)
1511 gen_op_set_cc_op(s1->cc_op);
1512 gen_compute_eflags_c(cpu_tmp4);
1513 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1514 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1515 if (d != OR_TMP0)
1516 gen_op_mov_reg_T0(ot, d);
1517 else
1518 gen_op_st_T0_A0(ot + s1->mem_index);
1519 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1520 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1521 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1522 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1523 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1524 s1->cc_op = CC_OP_DYNAMIC;
1525 break;
1526 case OP_ADDL:
1527 gen_op_addl_T0_T1();
1528 if (d != OR_TMP0)
1529 gen_op_mov_reg_T0(ot, d);
1530 else
1531 gen_op_st_T0_A0(ot + s1->mem_index);
1532 gen_op_update2_cc();
1533 s1->cc_op = CC_OP_ADDB + ot;
1534 break;
1535 case OP_SUBL:
1536 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1537 if (d != OR_TMP0)
1538 gen_op_mov_reg_T0(ot, d);
1539 else
1540 gen_op_st_T0_A0(ot + s1->mem_index);
1541 gen_op_update2_cc();
1542 s1->cc_op = CC_OP_SUBB + ot;
1543 break;
1544 default:
1545 case OP_ANDL:
1546 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1547 if (d != OR_TMP0)
1548 gen_op_mov_reg_T0(ot, d);
1549 else
1550 gen_op_st_T0_A0(ot + s1->mem_index);
1551 gen_op_update1_cc();
1552 s1->cc_op = CC_OP_LOGICB + ot;
1553 break;
1554 case OP_ORL:
1555 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1556 if (d != OR_TMP0)
1557 gen_op_mov_reg_T0(ot, d);
1558 else
1559 gen_op_st_T0_A0(ot + s1->mem_index);
1560 gen_op_update1_cc();
1561 s1->cc_op = CC_OP_LOGICB + ot;
1562 break;
1563 case OP_XORL:
1564 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1565 if (d != OR_TMP0)
1566 gen_op_mov_reg_T0(ot, d);
1567 else
1568 gen_op_st_T0_A0(ot + s1->mem_index);
1569 gen_op_update1_cc();
1570 s1->cc_op = CC_OP_LOGICB + ot;
1571 break;
1572 case OP_CMPL:
1573 gen_op_cmpl_T0_T1_cc();
1574 s1->cc_op = CC_OP_SUBB + ot;
1575 break;
1576 }
1577}
1578
1579/* if d == OR_TMP0, it means memory operand (address in A0) */
1580static void gen_inc(DisasContext *s1, int ot, int d, int c)
1581{
1582 if (d != OR_TMP0)
1583 gen_op_mov_TN_reg(ot, 0, d);
1584 else
1585 gen_op_ld_T0_A0(ot + s1->mem_index);
1586 if (s1->cc_op != CC_OP_DYNAMIC)
1587 gen_op_set_cc_op(s1->cc_op);
1588 if (c > 0) {
1589 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1590 s1->cc_op = CC_OP_INCB + ot;
1591 } else {
1592 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1593 s1->cc_op = CC_OP_DECB + ot;
1594 }
1595 if (d != OR_TMP0)
1596 gen_op_mov_reg_T0(ot, d);
1597 else
1598 gen_op_st_T0_A0(ot + s1->mem_index);
1599 gen_compute_eflags_c(cpu_cc_src);
1600 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1601}
1602
1603static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1604 int is_right, int is_arith)
1605{
1606 target_ulong mask;
1607 int shift_label;
1608 TCGv t0, t1;
1609
1610 if (ot == OT_QUAD)
1611 mask = 0x3f;
1612 else
1613 mask = 0x1f;
1614
1615 /* load */
1616 if (op1 == OR_TMP0)
1617 gen_op_ld_T0_A0(ot + s->mem_index);
1618 else
1619 gen_op_mov_TN_reg(ot, 0, op1);
1620
1621 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1622
1623 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1624
1625 if (is_right) {
1626 if (is_arith) {
1627 gen_exts(ot, cpu_T[0]);
1628 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1629 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1630 } else {
1631 gen_extu(ot, cpu_T[0]);
1632 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1633 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1634 }
1635 } else {
1636 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1637 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1638 }
1639
1640 /* store */
1641 if (op1 == OR_TMP0)
1642 gen_op_st_T0_A0(ot + s->mem_index);
1643 else
1644 gen_op_mov_reg_T0(ot, op1);
1645
1646 /* update eflags if non zero shift */
1647 if (s->cc_op != CC_OP_DYNAMIC)
1648 gen_op_set_cc_op(s->cc_op);
1649
1650 /* XXX: inefficient */
1651 t0 = tcg_temp_local_new();
1652 t1 = tcg_temp_local_new();
1653
1654 tcg_gen_mov_tl(t0, cpu_T[0]);
1655 tcg_gen_mov_tl(t1, cpu_T3);
1656
1657 shift_label = gen_new_label();
1658 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1659
1660 tcg_gen_mov_tl(cpu_cc_src, t1);
1661 tcg_gen_mov_tl(cpu_cc_dst, t0);
1662 if (is_right)
1663 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1664 else
1665 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1666
1667 gen_set_label(shift_label);
1668 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1669
1670 tcg_temp_free(t0);
1671 tcg_temp_free(t1);
1672}
1673
1674static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1675 int is_right, int is_arith)
1676{
1677 int mask;
1678
1679 if (ot == OT_QUAD)
1680 mask = 0x3f;
1681 else
1682 mask = 0x1f;
1683
1684 /* load */
1685 if (op1 == OR_TMP0)
1686 gen_op_ld_T0_A0(ot + s->mem_index);
1687 else
1688 gen_op_mov_TN_reg(ot, 0, op1);
1689
1690 op2 &= mask;
1691 if (op2 != 0) {
1692 if (is_right) {
1693 if (is_arith) {
1694 gen_exts(ot, cpu_T[0]);
1695 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1696 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1697 } else {
1698 gen_extu(ot, cpu_T[0]);
1699 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1700 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1701 }
1702 } else {
1703 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1704 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1705 }
1706 }
1707
1708 /* store */
1709 if (op1 == OR_TMP0)
1710 gen_op_st_T0_A0(ot + s->mem_index);
1711 else
1712 gen_op_mov_reg_T0(ot, op1);
1713
1714 /* update eflags if non zero shift */
1715 if (op2 != 0) {
1716 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1717 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1718 if (is_right)
1719 s->cc_op = CC_OP_SARB + ot;
1720 else
1721 s->cc_op = CC_OP_SHLB + ot;
1722 }
1723}
1724
1725static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1726{
1727 if (arg2 >= 0)
1728 tcg_gen_shli_tl(ret, arg1, arg2);
1729 else
1730 tcg_gen_shri_tl(ret, arg1, -arg2);
1731}
1732
1733static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1734 int is_right)
1735{
1736 target_ulong mask;
1737 int label1, label2, data_bits;
1738 TCGv t0, t1, t2, a0;
1739
1740 /* XXX: inefficient, but we must use local temps */
1741 t0 = tcg_temp_local_new();
1742 t1 = tcg_temp_local_new();
1743 t2 = tcg_temp_local_new();
1744 a0 = tcg_temp_local_new();
1745
1746 if (ot == OT_QUAD)
1747 mask = 0x3f;
1748 else
1749 mask = 0x1f;
1750
1751 /* load */
1752 if (op1 == OR_TMP0) {
1753 tcg_gen_mov_tl(a0, cpu_A0);
1754 gen_op_ld_v(ot + s->mem_index, t0, a0);
1755 } else {
1756 gen_op_mov_v_reg(ot, t0, op1);
1757 }
1758
1759 tcg_gen_mov_tl(t1, cpu_T[1]);
1760
1761 tcg_gen_andi_tl(t1, t1, mask);
1762
1763 /* Must test zero case to avoid using undefined behaviour in TCG
1764 shifts. */
1765 label1 = gen_new_label();
1766 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1767
1768 if (ot <= OT_WORD)
1769 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1770 else
1771 tcg_gen_mov_tl(cpu_tmp0, t1);
1772
1773 gen_extu(ot, t0);
1774 tcg_gen_mov_tl(t2, t0);
1775
1776 data_bits = 8 << ot;
1777 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1778 fix TCG definition) */
1779 if (is_right) {
1780 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1781 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1782 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1783 } else {
1784 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1785 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1786 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1787 }
1788 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1789
1790 gen_set_label(label1);
1791 /* store */
1792 if (op1 == OR_TMP0) {
1793 gen_op_st_v(ot + s->mem_index, t0, a0);
1794 } else {
1795 gen_op_mov_reg_v(ot, op1, t0);
1796 }
1797
1798 /* update eflags */
1799 if (s->cc_op != CC_OP_DYNAMIC)
1800 gen_op_set_cc_op(s->cc_op);
1801
1802 label2 = gen_new_label();
1803 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1804
1805 gen_compute_eflags(cpu_cc_src);
1806 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1807 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1808 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1809 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1810 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1811 if (is_right) {
1812 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1813 }
1814 tcg_gen_andi_tl(t0, t0, CC_C);
1815 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1816
1817 tcg_gen_discard_tl(cpu_cc_dst);
1818 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1819
1820 gen_set_label(label2);
1821 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1822
1823 tcg_temp_free(t0);
1824 tcg_temp_free(t1);
1825 tcg_temp_free(t2);
1826 tcg_temp_free(a0);
1827}
1828
1829static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1830 int is_right)
1831{
1832 int mask;
1833 int data_bits;
1834 TCGv t0, t1, a0;
1835
1836 /* XXX: inefficient, but we must use local temps */
1837 t0 = tcg_temp_local_new();
1838 t1 = tcg_temp_local_new();
1839 a0 = tcg_temp_local_new();
1840
1841 if (ot == OT_QUAD)
1842 mask = 0x3f;
1843 else
1844 mask = 0x1f;
1845
1846 /* load */
1847 if (op1 == OR_TMP0) {
1848 tcg_gen_mov_tl(a0, cpu_A0);
1849 gen_op_ld_v(ot + s->mem_index, t0, a0);
1850 } else {
1851 gen_op_mov_v_reg(ot, t0, op1);
1852 }
1853
1854 gen_extu(ot, t0);
1855 tcg_gen_mov_tl(t1, t0);
1856
1857 op2 &= mask;
1858 data_bits = 8 << ot;
1859 if (op2 != 0) {
1860 int shift = op2 & ((1 << (3 + ot)) - 1);
1861 if (is_right) {
1862 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1863 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1864 }
1865 else {
1866 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1867 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1868 }
1869 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1870 }
1871
1872 /* store */
1873 if (op1 == OR_TMP0) {
1874 gen_op_st_v(ot + s->mem_index, t0, a0);
1875 } else {
1876 gen_op_mov_reg_v(ot, op1, t0);
1877 }
1878
1879 if (op2 != 0) {
1880 /* update eflags */
1881 if (s->cc_op != CC_OP_DYNAMIC)
1882 gen_op_set_cc_op(s->cc_op);
1883
1884 gen_compute_eflags(cpu_cc_src);
1885 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1886 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1887 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1888 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1889 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1890 if (is_right) {
1891 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1892 }
1893 tcg_gen_andi_tl(t0, t0, CC_C);
1894 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1895
1896 tcg_gen_discard_tl(cpu_cc_dst);
1897 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1898 s->cc_op = CC_OP_EFLAGS;
1899 }
1900
1901 tcg_temp_free(t0);
1902 tcg_temp_free(t1);
1903 tcg_temp_free(a0);
1904}
1905
1906/* XXX: add faster immediate = 1 case */
1907static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1908 int is_right)
1909{
1910 int label1;
1911
1912 if (s->cc_op != CC_OP_DYNAMIC)
1913 gen_op_set_cc_op(s->cc_op);
1914
1915 /* load */
1916 if (op1 == OR_TMP0)
1917 gen_op_ld_T0_A0(ot + s->mem_index);
1918 else
1919 gen_op_mov_TN_reg(ot, 0, op1);
1920
1921 if (is_right) {
1922 switch (ot) {
1923 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1924 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1925 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1926#ifdef TARGET_X86_64
1927 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1928#endif
1929 }
1930 } else {
1931 switch (ot) {
1932 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1933 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1934 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1935#ifdef TARGET_X86_64
1936 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1937#endif
1938 }
1939 }
1940 /* store */
1941 if (op1 == OR_TMP0)
1942 gen_op_st_T0_A0(ot + s->mem_index);
1943 else
1944 gen_op_mov_reg_T0(ot, op1);
1945
1946 /* update eflags */
1947 label1 = gen_new_label();
1948 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1949
1950 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1951 tcg_gen_discard_tl(cpu_cc_dst);
1952 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1953
1954 gen_set_label(label1);
1955 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1956}
1957
1958/* XXX: add faster immediate case */
1959static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1960 int is_right)
1961{
1962 int label1, label2, data_bits;
1963 target_ulong mask;
1964 TCGv t0, t1, t2, a0;
1965
1966 t0 = tcg_temp_local_new();
1967 t1 = tcg_temp_local_new();
1968 t2 = tcg_temp_local_new();
1969 a0 = tcg_temp_local_new();
1970
1971 if (ot == OT_QUAD)
1972 mask = 0x3f;
1973 else
1974 mask = 0x1f;
1975
1976 /* load */
1977 if (op1 == OR_TMP0) {
1978 tcg_gen_mov_tl(a0, cpu_A0);
1979 gen_op_ld_v(ot + s->mem_index, t0, a0);
1980 } else {
1981 gen_op_mov_v_reg(ot, t0, op1);
1982 }
1983
1984 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1985
1986 tcg_gen_mov_tl(t1, cpu_T[1]);
1987 tcg_gen_mov_tl(t2, cpu_T3);
1988
1989 /* Must test zero case to avoid using undefined behaviour in TCG
1990 shifts. */
1991 label1 = gen_new_label();
1992 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1993
1994 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1995 if (ot == OT_WORD) {
1996 /* Note: we implement the Intel behaviour for shift count > 16 */
1997 if (is_right) {
1998 tcg_gen_andi_tl(t0, t0, 0xffff);
1999 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
2000 tcg_gen_or_tl(t0, t0, cpu_tmp0);
2001 tcg_gen_ext32u_tl(t0, t0);
2002
2003 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
2004
2005 /* only needed if count > 16, but a test would complicate */
2006 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
2007 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
2008
2009 tcg_gen_shr_tl(t0, t0, t2);
2010
2011 tcg_gen_or_tl(t0, t0, cpu_tmp0);
2012 } else {
2013 /* XXX: not optimal */
2014 tcg_gen_andi_tl(t0, t0, 0xffff);
2015 tcg_gen_shli_tl(t1, t1, 16);
2016 tcg_gen_or_tl(t1, t1, t0);
2017 tcg_gen_ext32u_tl(t1, t1);
2018
2019 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
2020 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
2021 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
2022 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
2023
2024 tcg_gen_shl_tl(t0, t0, t2);
2025 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
2026 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
2027 tcg_gen_or_tl(t0, t0, t1);
2028 }
2029 } else {
2030 data_bits = 8 << ot;
2031 if (is_right) {
2032 if (ot == OT_LONG)
2033 tcg_gen_ext32u_tl(t0, t0);
2034
2035 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
2036
2037 tcg_gen_shr_tl(t0, t0, t2);
2038 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
2039 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
2040 tcg_gen_or_tl(t0, t0, t1);
2041
2042 } else {
2043 if (ot == OT_LONG)
2044 tcg_gen_ext32u_tl(t1, t1);
2045
2046 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
2047
2048 tcg_gen_shl_tl(t0, t0, t2);
2049 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
2050 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
2051 tcg_gen_or_tl(t0, t0, t1);
2052 }
2053 }
2054 tcg_gen_mov_tl(t1, cpu_tmp4);
2055
2056 gen_set_label(label1);
2057 /* store */
2058 if (op1 == OR_TMP0) {
2059 gen_op_st_v(ot + s->mem_index, t0, a0);
2060 } else {
2061 gen_op_mov_reg_v(ot, op1, t0);
2062 }
2063
2064 /* update eflags */
2065 if (s->cc_op != CC_OP_DYNAMIC)
2066 gen_op_set_cc_op(s->cc_op);
2067
2068 label2 = gen_new_label();
2069 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
2070
2071 tcg_gen_mov_tl(cpu_cc_src, t1);
2072 tcg_gen_mov_tl(cpu_cc_dst, t0);
2073 if (is_right) {
2074 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
2075 } else {
2076 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
2077 }
2078 gen_set_label(label2);
2079 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
2080
2081 tcg_temp_free(t0);
2082 tcg_temp_free(t1);
2083 tcg_temp_free(t2);
2084 tcg_temp_free(a0);
2085}
2086
2087static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2088{
2089 if (s != OR_TMP1)
2090 gen_op_mov_TN_reg(ot, 1, s);
2091 switch(op) {
2092 case OP_ROL:
2093 gen_rot_rm_T1(s1, ot, d, 0);
2094 break;
2095 case OP_ROR:
2096 gen_rot_rm_T1(s1, ot, d, 1);
2097 break;
2098 case OP_SHL:
2099 case OP_SHL1:
2100 gen_shift_rm_T1(s1, ot, d, 0, 0);
2101 break;
2102 case OP_SHR:
2103 gen_shift_rm_T1(s1, ot, d, 1, 0);
2104 break;
2105 case OP_SAR:
2106 gen_shift_rm_T1(s1, ot, d, 1, 1);
2107 break;
2108 case OP_RCL:
2109 gen_rotc_rm_T1(s1, ot, d, 0);
2110 break;
2111 case OP_RCR:
2112 gen_rotc_rm_T1(s1, ot, d, 1);
2113 break;
2114 }
2115}
2116
2117static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2118{
2119 switch(op) {
2120 case OP_ROL:
2121 gen_rot_rm_im(s1, ot, d, c, 0);
2122 break;
2123 case OP_ROR:
2124 gen_rot_rm_im(s1, ot, d, c, 1);
2125 break;
2126 case OP_SHL:
2127 case OP_SHL1:
2128 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2129 break;
2130 case OP_SHR:
2131 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2132 break;
2133 case OP_SAR:
2134 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2135 break;
2136 default:
2137 /* currently not optimized */
2138 gen_op_movl_T1_im(c);
2139 gen_shift(s1, op, ot, d, OR_TMP1);
2140 break;
2141 }
2142}
2143
2144static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
2145{
2146 target_long disp;
2147 int havesib;
2148 int base;
2149 int index;
2150 int scale;
2151 int opreg;
2152 int mod, rm, code, override, must_add_seg;
2153
2154 override = s->override;
2155 must_add_seg = s->addseg;
2156 if (override >= 0)
2157 must_add_seg = 1;
2158 mod = (modrm >> 6) & 3;
2159 rm = modrm & 7;
2160
2161 if (s->aflag) {
2162
2163 havesib = 0;
2164 base = rm;
2165 index = 0;
2166 scale = 0;
2167
2168 if (base == 4) {
2169 havesib = 1;
2170 code = ldub_code(s->pc++);
2171 scale = (code >> 6) & 3;
2172 index = ((code >> 3) & 7) | REX_X(s);
2173 base = (code & 7);
2174 }
2175 base |= REX_B(s);
2176
2177 switch (mod) {
2178 case 0:
2179 if ((base & 7) == 5) {
2180 base = -1;
2181 disp = (int32_t)ldl_code(s->pc);
2182 s->pc += 4;
2183 if (CODE64(s) && !havesib) {
2184 disp += s->pc + s->rip_offset;
2185 }
2186 } else {
2187 disp = 0;
2188 }
2189 break;
2190 case 1:
2191 disp = (int8_t)ldub_code(s->pc++);
2192 break;
2193 default:
2194 case 2:
2195#ifdef VBOX
2196 disp = (int32_t)ldl_code(s->pc);
2197#else
2198 disp = ldl_code(s->pc);
2199#endif
2200 s->pc += 4;
2201 break;
2202 }
2203
2204 if (base >= 0) {
2205 /* for correct popl handling with esp */
2206 if (base == 4 && s->popl_esp_hack)
2207 disp += s->popl_esp_hack;
2208#ifdef TARGET_X86_64
2209 if (s->aflag == 2) {
2210 gen_op_movq_A0_reg(base);
2211 if (disp != 0) {
2212 gen_op_addq_A0_im(disp);
2213 }
2214 } else
2215#endif
2216 {
2217 gen_op_movl_A0_reg(base);
2218 if (disp != 0)
2219 gen_op_addl_A0_im(disp);
2220 }
2221 } else {
2222#ifdef TARGET_X86_64
2223 if (s->aflag == 2) {
2224 gen_op_movq_A0_im(disp);
2225 } else
2226#endif
2227 {
2228 gen_op_movl_A0_im(disp);
2229 }
2230 }
2231 /* index == 4 means no index */
2232 if (havesib && (index != 4)) {
2233#ifdef TARGET_X86_64
2234 if (s->aflag == 2) {
2235 gen_op_addq_A0_reg_sN(scale, index);
2236 } else
2237#endif
2238 {
2239 gen_op_addl_A0_reg_sN(scale, index);
2240 }
2241 }
2242 if (must_add_seg) {
2243 if (override < 0) {
2244 if (base == R_EBP || base == R_ESP)
2245 override = R_SS;
2246 else
2247 override = R_DS;
2248 }
2249#ifdef TARGET_X86_64
2250 if (s->aflag == 2) {
2251 gen_op_addq_A0_seg(override);
2252 } else
2253#endif
2254 {
2255 gen_op_addl_A0_seg(override);
2256 }
2257 }
2258 } else {
2259 switch (mod) {
2260 case 0:
2261 if (rm == 6) {
2262 disp = lduw_code(s->pc);
2263 s->pc += 2;
2264 gen_op_movl_A0_im(disp);
2265 rm = 0; /* avoid SS override */
2266 goto no_rm;
2267 } else {
2268 disp = 0;
2269 }
2270 break;
2271 case 1:
2272 disp = (int8_t)ldub_code(s->pc++);
2273 break;
2274 default:
2275 case 2:
2276 disp = lduw_code(s->pc);
2277 s->pc += 2;
2278 break;
2279 }
2280 switch(rm) {
2281 case 0:
2282 gen_op_movl_A0_reg(R_EBX);
2283 gen_op_addl_A0_reg_sN(0, R_ESI);
2284 break;
2285 case 1:
2286 gen_op_movl_A0_reg(R_EBX);
2287 gen_op_addl_A0_reg_sN(0, R_EDI);
2288 break;
2289 case 2:
2290 gen_op_movl_A0_reg(R_EBP);
2291 gen_op_addl_A0_reg_sN(0, R_ESI);
2292 break;
2293 case 3:
2294 gen_op_movl_A0_reg(R_EBP);
2295 gen_op_addl_A0_reg_sN(0, R_EDI);
2296 break;
2297 case 4:
2298 gen_op_movl_A0_reg(R_ESI);
2299 break;
2300 case 5:
2301 gen_op_movl_A0_reg(R_EDI);
2302 break;
2303 case 6:
2304 gen_op_movl_A0_reg(R_EBP);
2305 break;
2306 default:
2307 case 7:
2308 gen_op_movl_A0_reg(R_EBX);
2309 break;
2310 }
2311 if (disp != 0)
2312 gen_op_addl_A0_im(disp);
2313 gen_op_andl_A0_ffff();
2314 no_rm:
2315 if (must_add_seg) {
2316 if (override < 0) {
2317 if (rm == 2 || rm == 3 || rm == 6)
2318 override = R_SS;
2319 else
2320 override = R_DS;
2321 }
2322 gen_op_addl_A0_seg(override);
2323 }
2324 }
2325
2326 opreg = OR_A0;
2327 disp = 0;
2328 *reg_ptr = opreg;
2329 *offset_ptr = disp;
2330}
2331
2332static void gen_nop_modrm(DisasContext *s, int modrm)
2333{
2334 int mod, rm, base, code;
2335
2336 mod = (modrm >> 6) & 3;
2337 if (mod == 3)
2338 return;
2339 rm = modrm & 7;
2340
2341 if (s->aflag) {
2342
2343 base = rm;
2344
2345 if (base == 4) {
2346 code = ldub_code(s->pc++);
2347 base = (code & 7);
2348 }
2349
2350 switch (mod) {
2351 case 0:
2352 if (base == 5) {
2353 s->pc += 4;
2354 }
2355 break;
2356 case 1:
2357 s->pc++;
2358 break;
2359 default:
2360 case 2:
2361 s->pc += 4;
2362 break;
2363 }
2364 } else {
2365 switch (mod) {
2366 case 0:
2367 if (rm == 6) {
2368 s->pc += 2;
2369 }
2370 break;
2371 case 1:
2372 s->pc++;
2373 break;
2374 default:
2375 case 2:
2376 s->pc += 2;
2377 break;
2378 }
2379 }
2380}
2381
2382/* used for LEA and MOV AX, mem */
2383static void gen_add_A0_ds_seg(DisasContext *s)
2384{
2385 int override, must_add_seg;
2386 must_add_seg = s->addseg;
2387 override = R_DS;
2388 if (s->override >= 0) {
2389 override = s->override;
2390 must_add_seg = 1;
2391 }
2392 if (must_add_seg) {
2393#ifdef TARGET_X86_64
2394 if (CODE64(s)) {
2395 gen_op_addq_A0_seg(override);
2396 } else
2397#endif
2398 {
2399 gen_op_addl_A0_seg(override);
2400 }
2401 }
2402}
2403
2404/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2405 OR_TMP0 */
2406static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2407{
2408 int mod, rm, opreg, disp;
2409
2410 mod = (modrm >> 6) & 3;
2411 rm = (modrm & 7) | REX_B(s);
2412 if (mod == 3) {
2413 if (is_store) {
2414 if (reg != OR_TMP0)
2415 gen_op_mov_TN_reg(ot, 0, reg);
2416 gen_op_mov_reg_T0(ot, rm);
2417 } else {
2418 gen_op_mov_TN_reg(ot, 0, rm);
2419 if (reg != OR_TMP0)
2420 gen_op_mov_reg_T0(ot, reg);
2421 }
2422 } else {
2423 gen_lea_modrm(s, modrm, &opreg, &disp);
2424 if (is_store) {
2425 if (reg != OR_TMP0)
2426 gen_op_mov_TN_reg(ot, 0, reg);
2427 gen_op_st_T0_A0(ot + s->mem_index);
2428 } else {
2429 gen_op_ld_T0_A0(ot + s->mem_index);
2430 if (reg != OR_TMP0)
2431 gen_op_mov_reg_T0(ot, reg);
2432 }
2433 }
2434}
2435
2436static inline uint32_t insn_get(DisasContext *s, int ot)
2437{
2438 uint32_t ret;
2439
2440 switch(ot) {
2441 case OT_BYTE:
2442 ret = ldub_code(s->pc);
2443 s->pc++;
2444 break;
2445 case OT_WORD:
2446 ret = lduw_code(s->pc);
2447 s->pc += 2;
2448 break;
2449 default:
2450 case OT_LONG:
2451 ret = ldl_code(s->pc);
2452 s->pc += 4;
2453 break;
2454 }
2455 return ret;
2456}
2457
2458static inline int insn_const_size(unsigned int ot)
2459{
2460 if (ot <= OT_LONG)
2461 return 1 << ot;
2462 else
2463 return 4;
2464}
2465
2466static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2467{
2468 TranslationBlock *tb;
2469 target_ulong pc;
2470
2471 pc = s->cs_base + eip;
2472 tb = s->tb;
2473 /* NOTE: we handle the case where the TB spans two pages here */
2474 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2475 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2476#ifdef VBOX
2477 gen_check_external_event();
2478#endif /* VBOX */
2479 /* jump to same page: we can use a direct jump */
2480 tcg_gen_goto_tb(tb_num);
2481 gen_jmp_im(eip);
2482 tcg_gen_exit_tb((intptr_t)tb + tb_num);
2483 } else {
2484 /* jump to another page: currently not optimized */
2485 gen_jmp_im(eip);
2486 gen_eob(s);
2487 }
2488}
2489
2490static inline void gen_jcc(DisasContext *s, int b,
2491 target_ulong val, target_ulong next_eip)
2492{
2493 int l1, l2, cc_op;
2494
2495 cc_op = s->cc_op;
2496 gen_update_cc_op(s);
2497 if (s->jmp_opt) {
2498 l1 = gen_new_label();
2499 gen_jcc1(s, cc_op, b, l1);
2500
2501 gen_goto_tb(s, 0, next_eip);
2502
2503 gen_set_label(l1);
2504 gen_goto_tb(s, 1, val);
2505 s->is_jmp = DISAS_TB_JUMP;
2506 } else {
2507
2508 l1 = gen_new_label();
2509 l2 = gen_new_label();
2510 gen_jcc1(s, cc_op, b, l1);
2511
2512 gen_jmp_im(next_eip);
2513 tcg_gen_br(l2);
2514
2515 gen_set_label(l1);
2516 gen_jmp_im(val);
2517 gen_set_label(l2);
2518 gen_eob(s);
2519 }
2520}
2521
2522static void gen_setcc(DisasContext *s, int b)
2523{
2524 int inv, jcc_op, l1;
2525 TCGv t0;
2526
2527 if (is_fast_jcc_case(s, b)) {
2528 /* nominal case: we use a jump */
2529 /* XXX: make it faster by adding new instructions in TCG */
2530 t0 = tcg_temp_local_new();
2531 tcg_gen_movi_tl(t0, 0);
2532 l1 = gen_new_label();
2533 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2534 tcg_gen_movi_tl(t0, 1);
2535 gen_set_label(l1);
2536 tcg_gen_mov_tl(cpu_T[0], t0);
2537 tcg_temp_free(t0);
2538 } else {
2539 /* slow case: it is more efficient not to generate a jump,
2540 although it is questionnable whether this optimization is
2541 worth to */
2542 inv = b & 1;
2543 jcc_op = (b >> 1) & 7;
2544 gen_setcc_slow_T0(s, jcc_op);
2545 if (inv) {
2546 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2547 }
2548 }
2549}
2550
2551static inline void gen_op_movl_T0_seg(int seg_reg)
2552{
2553 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2554 offsetof(CPUX86State,segs[seg_reg].selector));
2555}
2556
2557static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2558{
2559 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2560 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2561 offsetof(CPUX86State,segs[seg_reg].selector));
2562 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2563 tcg_gen_st_tl(cpu_T[0], cpu_env,
2564 offsetof(CPUX86State,segs[seg_reg].base));
2565}
2566
2567/* move T0 to seg_reg and compute if the CPU state may change. Never
2568 call this function with seg_reg == R_CS */
2569static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2570{
2571 if (s->pe && !s->vm86) {
2572 /* XXX: optimize by finding processor state dynamically */
2573 if (s->cc_op != CC_OP_DYNAMIC)
2574 gen_op_set_cc_op(s->cc_op);
2575 gen_jmp_im(cur_eip);
2576 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2577 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2578 /* abort translation because the addseg value may change or
2579 because ss32 may change. For R_SS, translation must always
2580 stop as a special handling must be done to disable hardware
2581 interrupts for the next instruction */
2582 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2583 s->is_jmp = DISAS_TB_JUMP;
2584 } else {
2585 gen_op_movl_seg_T0_vm(seg_reg);
2586 if (seg_reg == R_SS)
2587 s->is_jmp = DISAS_TB_JUMP;
2588 }
2589}
2590
2591static inline int svm_is_rep(int prefixes)
2592{
2593 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2594}
2595
2596static inline void
2597gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2598 uint32_t type, uint64_t param)
2599{
2600 /* no SVM activated; fast case */
2601 if (likely(!(s->flags & HF_SVMI_MASK)))
2602 return;
2603 if (s->cc_op != CC_OP_DYNAMIC)
2604 gen_op_set_cc_op(s->cc_op);
2605 gen_jmp_im(pc_start - s->cs_base);
2606 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2607 tcg_const_i64(param));
2608}
2609
2610static inline void
2611gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2612{
2613 gen_svm_check_intercept_param(s, pc_start, type, 0);
2614}
2615
2616static inline void gen_stack_update(DisasContext *s, int addend)
2617{
2618#ifdef TARGET_X86_64
2619 if (CODE64(s)) {
2620 gen_op_add_reg_im(2, R_ESP, addend);
2621 } else
2622#endif
2623 if (s->ss32) {
2624 gen_op_add_reg_im(1, R_ESP, addend);
2625 } else {
2626 gen_op_add_reg_im(0, R_ESP, addend);
2627 }
2628}
2629
2630/* generate a push. It depends on ss32, addseg and dflag */
2631static void gen_push_T0(DisasContext *s)
2632{
2633#ifdef TARGET_X86_64
2634 if (CODE64(s)) {
2635 gen_op_movq_A0_reg(R_ESP);
2636 if (s->dflag) {
2637 gen_op_addq_A0_im(-8);
2638 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2639 } else {
2640 gen_op_addq_A0_im(-2);
2641 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2642 }
2643 gen_op_mov_reg_A0(2, R_ESP);
2644 } else
2645#endif
2646 {
2647 gen_op_movl_A0_reg(R_ESP);
2648 if (!s->dflag)
2649 gen_op_addl_A0_im(-2);
2650 else
2651 gen_op_addl_A0_im(-4);
2652 if (s->ss32) {
2653 if (s->addseg) {
2654 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2655 gen_op_addl_A0_seg(R_SS);
2656 }
2657 } else {
2658 gen_op_andl_A0_ffff();
2659 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2660 gen_op_addl_A0_seg(R_SS);
2661 }
2662 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2663 if (s->ss32 && !s->addseg)
2664 gen_op_mov_reg_A0(1, R_ESP);
2665 else
2666 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2667 }
2668}
2669
2670/* generate a push. It depends on ss32, addseg and dflag */
2671/* slower version for T1, only used for call Ev */
2672static void gen_push_T1(DisasContext *s)
2673{
2674#ifdef TARGET_X86_64
2675 if (CODE64(s)) {
2676 gen_op_movq_A0_reg(R_ESP);
2677 if (s->dflag) {
2678 gen_op_addq_A0_im(-8);
2679 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2680 } else {
2681 gen_op_addq_A0_im(-2);
2682 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2683 }
2684 gen_op_mov_reg_A0(2, R_ESP);
2685 } else
2686#endif
2687 {
2688 gen_op_movl_A0_reg(R_ESP);
2689 if (!s->dflag)
2690 gen_op_addl_A0_im(-2);
2691 else
2692 gen_op_addl_A0_im(-4);
2693 if (s->ss32) {
2694 if (s->addseg) {
2695 gen_op_addl_A0_seg(R_SS);
2696 }
2697 } else {
2698 gen_op_andl_A0_ffff();
2699 gen_op_addl_A0_seg(R_SS);
2700 }
2701 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2702
2703 if (s->ss32 && !s->addseg)
2704 gen_op_mov_reg_A0(1, R_ESP);
2705 else
2706 gen_stack_update(s, (-2) << s->dflag);
2707 }
2708}
2709
2710/* two step pop is necessary for precise exceptions */
2711static void gen_pop_T0(DisasContext *s)
2712{
2713#ifdef TARGET_X86_64
2714 if (CODE64(s)) {
2715 gen_op_movq_A0_reg(R_ESP);
2716 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2717 } else
2718#endif
2719 {
2720 gen_op_movl_A0_reg(R_ESP);
2721 if (s->ss32) {
2722 if (s->addseg)
2723 gen_op_addl_A0_seg(R_SS);
2724 } else {
2725 gen_op_andl_A0_ffff();
2726 gen_op_addl_A0_seg(R_SS);
2727 }
2728 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2729 }
2730}
2731
2732static void gen_pop_update(DisasContext *s)
2733{
2734#ifdef TARGET_X86_64
2735 if (CODE64(s) && s->dflag) {
2736 gen_stack_update(s, 8);
2737 } else
2738#endif
2739 {
2740 gen_stack_update(s, 2 << s->dflag);
2741 }
2742}
2743
2744static void gen_stack_A0(DisasContext *s)
2745{
2746 gen_op_movl_A0_reg(R_ESP);
2747 if (!s->ss32)
2748 gen_op_andl_A0_ffff();
2749 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2750 if (s->addseg)
2751 gen_op_addl_A0_seg(R_SS);
2752}
2753
2754/* NOTE: wrap around in 16 bit not fully handled */
2755static void gen_pusha(DisasContext *s)
2756{
2757 int i;
2758 gen_op_movl_A0_reg(R_ESP);
2759 gen_op_addl_A0_im(-16 << s->dflag);
2760 if (!s->ss32)
2761 gen_op_andl_A0_ffff();
2762 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2763 if (s->addseg)
2764 gen_op_addl_A0_seg(R_SS);
2765 for(i = 0;i < 8; i++) {
2766 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2767 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2768 gen_op_addl_A0_im(2 << s->dflag);
2769 }
2770 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2771}
2772
2773/* NOTE: wrap around in 16 bit not fully handled */
2774static void gen_popa(DisasContext *s)
2775{
2776 int i;
2777 gen_op_movl_A0_reg(R_ESP);
2778 if (!s->ss32)
2779 gen_op_andl_A0_ffff();
2780 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2781 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2782 if (s->addseg)
2783 gen_op_addl_A0_seg(R_SS);
2784 for(i = 0;i < 8; i++) {
2785 /* ESP is not reloaded */
2786 if (i != 3) {
2787 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2788 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2789 }
2790 gen_op_addl_A0_im(2 << s->dflag);
2791 }
2792 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2793}
2794
2795static void gen_enter(DisasContext *s, int esp_addend, int level)
2796{
2797 int ot, opsize;
2798
2799 level &= 0x1f;
2800#ifdef TARGET_X86_64
2801 if (CODE64(s)) {
2802 ot = s->dflag ? OT_QUAD : OT_WORD;
2803 opsize = 1 << ot;
2804
2805 gen_op_movl_A0_reg(R_ESP);
2806 gen_op_addq_A0_im(-opsize);
2807 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2808
2809 /* push bp */
2810 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2811 gen_op_st_T0_A0(ot + s->mem_index);
2812 if (level) {
2813 /* XXX: must save state */
2814 gen_helper_enter64_level(tcg_const_i32(level),
2815 tcg_const_i32((ot == OT_QUAD)),
2816 cpu_T[1]);
2817 }
2818 gen_op_mov_reg_T1(ot, R_EBP);
2819 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2820 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2821 } else
2822#endif
2823 {
2824 ot = s->dflag + OT_WORD;
2825 opsize = 2 << s->dflag;
2826
2827 gen_op_movl_A0_reg(R_ESP);
2828 gen_op_addl_A0_im(-opsize);
2829 if (!s->ss32)
2830 gen_op_andl_A0_ffff();
2831 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2832 if (s->addseg)
2833 gen_op_addl_A0_seg(R_SS);
2834 /* push bp */
2835 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2836 gen_op_st_T0_A0(ot + s->mem_index);
2837 if (level) {
2838 /* XXX: must save state */
2839 gen_helper_enter_level(tcg_const_i32(level),
2840 tcg_const_i32(s->dflag),
2841 cpu_T[1]);
2842 }
2843 gen_op_mov_reg_T1(ot, R_EBP);
2844 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2845 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2846 }
2847}
2848
2849static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2850{
2851 if (s->cc_op != CC_OP_DYNAMIC)
2852 gen_op_set_cc_op(s->cc_op);
2853 gen_jmp_im(cur_eip);
2854 gen_helper_raise_exception(tcg_const_i32(trapno));
2855 s->is_jmp = DISAS_TB_JUMP;
2856}
2857
2858/* an interrupt is different from an exception because of the
2859 privilege checks */
2860static void gen_interrupt(DisasContext *s, int intno,
2861 target_ulong cur_eip, target_ulong next_eip)
2862{
2863 if (s->cc_op != CC_OP_DYNAMIC)
2864 gen_op_set_cc_op(s->cc_op);
2865 gen_jmp_im(cur_eip);
2866 gen_helper_raise_interrupt(tcg_const_i32(intno),
2867 tcg_const_i32(next_eip - cur_eip));
2868 s->is_jmp = DISAS_TB_JUMP;
2869}
2870
2871static void gen_debug(DisasContext *s, target_ulong cur_eip)
2872{
2873 if (s->cc_op != CC_OP_DYNAMIC)
2874 gen_op_set_cc_op(s->cc_op);
2875 gen_jmp_im(cur_eip);
2876 gen_helper_debug();
2877 s->is_jmp = DISAS_TB_JUMP;
2878}
2879
2880/* generate a generic end of block. Trace exception is also generated
2881 if needed */
2882static void gen_eob(DisasContext *s)
2883{
2884 if (s->cc_op != CC_OP_DYNAMIC)
2885 gen_op_set_cc_op(s->cc_op);
2886 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2887 gen_helper_reset_inhibit_irq();
2888 }
2889 if (s->tb->flags & HF_RF_MASK) {
2890 gen_helper_reset_rf();
2891 }
2892 if ( s->singlestep_enabled
2893#ifdef VBOX
2894 && ( !(cpu_single_env->state & CPU_EMULATE_SINGLE_STEP)
2895 || !(s->prefix & (PREFIX_REPNZ | PREFIX_REPZ) ))
2896#endif
2897 ) {
2898 gen_helper_debug();
2899 } else if (s->tf) {
2900 gen_helper_single_step();
2901 } else {
2902 tcg_gen_exit_tb(0);
2903 }
2904 s->is_jmp = DISAS_TB_JUMP;
2905}
2906
2907/* generate a jump to eip. No segment change must happen before as a
2908 direct call to the next block may occur */
2909static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2910{
2911 if (s->jmp_opt) {
2912 gen_update_cc_op(s);
2913 gen_goto_tb(s, tb_num, eip);
2914 s->is_jmp = DISAS_TB_JUMP;
2915 } else {
2916 gen_jmp_im(eip);
2917 gen_eob(s);
2918 }
2919}
2920
2921static void gen_jmp(DisasContext *s, target_ulong eip)
2922{
2923 gen_jmp_tb(s, eip, 0);
2924}
2925
2926static inline void gen_ldq_env_A0(int idx, int offset)
2927{
2928 int mem_index = (idx >> 2) - 1;
2929 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2930 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2931}
2932
2933static inline void gen_stq_env_A0(int idx, int offset)
2934{
2935 int mem_index = (idx >> 2) - 1;
2936 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2937 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2938}
2939
2940static inline void gen_ldo_env_A0(int idx, int offset)
2941{
2942 int mem_index = (idx >> 2) - 1;
2943 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2944 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2945 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2946 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2947 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2948}
2949
2950static inline void gen_sto_env_A0(int idx, int offset)
2951{
2952 int mem_index = (idx >> 2) - 1;
2953 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2954 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2955 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2956 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2957 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2958}
2959
2960static inline void gen_op_movo(int d_offset, int s_offset)
2961{
2962 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2963 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2964 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2965 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2966}
2967
2968static inline void gen_op_movq(int d_offset, int s_offset)
2969{
2970 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2971 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2972}
2973
2974static inline void gen_op_movl(int d_offset, int s_offset)
2975{
2976 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2977 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2978}
2979
2980static inline void gen_op_movq_env_0(int d_offset)
2981{
2982 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2983 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2984}
2985
2986#define SSE_SPECIAL ((void *)1)
2987#define SSE_DUMMY ((void *)2)
2988
2989#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2990#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2991 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2992
2993static void *sse_op_table1[256][4] = {
2994 /* 3DNow! extensions */
2995 [0x0e] = { SSE_DUMMY }, /* femms */
2996 [0x0f] = { SSE_DUMMY }, /* pf... */
2997 /* pure SSE operations */
2998 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2999 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
3000 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
3001 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
3002 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
3003 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
3004 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
3005 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
3006
3007 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
3008 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
3009 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
3010 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
3011 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
3012 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
3013 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
3014 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
3015 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
3016 [0x51] = SSE_FOP(sqrt),
3017 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
3018 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
3019 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
3020 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
3021 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
3022 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
3023 [0x58] = SSE_FOP(add),
3024 [0x59] = SSE_FOP(mul),
3025 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
3026 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
3027 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
3028 [0x5c] = SSE_FOP(sub),
3029 [0x5d] = SSE_FOP(min),
3030 [0x5e] = SSE_FOP(div),
3031 [0x5f] = SSE_FOP(max),
3032
3033 [0xc2] = SSE_FOP(cmpeq),
3034 [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
3035
3036 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
3037 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
3038
3039 /* MMX ops and their SSE extensions */
3040 [0x60] = MMX_OP2(punpcklbw),
3041 [0x61] = MMX_OP2(punpcklwd),
3042 [0x62] = MMX_OP2(punpckldq),
3043 [0x63] = MMX_OP2(packsswb),
3044 [0x64] = MMX_OP2(pcmpgtb),
3045 [0x65] = MMX_OP2(pcmpgtw),
3046 [0x66] = MMX_OP2(pcmpgtl),
3047 [0x67] = MMX_OP2(packuswb),
3048 [0x68] = MMX_OP2(punpckhbw),
3049 [0x69] = MMX_OP2(punpckhwd),
3050 [0x6a] = MMX_OP2(punpckhdq),
3051 [0x6b] = MMX_OP2(packssdw),
3052 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
3053 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
3054 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
3055 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
3056 [0x70] = { gen_helper_pshufw_mmx,
3057 gen_helper_pshufd_xmm,
3058 gen_helper_pshufhw_xmm,
3059 gen_helper_pshuflw_xmm },
3060 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
3061 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
3062 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
3063 [0x74] = MMX_OP2(pcmpeqb),
3064 [0x75] = MMX_OP2(pcmpeqw),
3065 [0x76] = MMX_OP2(pcmpeql),
3066 [0x77] = { SSE_DUMMY }, /* emms */
3067 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
3068 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
3069 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
3070 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
3071 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
3072 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
3073 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
3074 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
3075 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
3076 [0xd1] = MMX_OP2(psrlw),
3077 [0xd2] = MMX_OP2(psrld),
3078 [0xd3] = MMX_OP2(psrlq),
3079 [0xd4] = MMX_OP2(paddq),
3080 [0xd5] = MMX_OP2(pmullw),
3081 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
3082 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
3083 [0xd8] = MMX_OP2(psubusb),
3084 [0xd9] = MMX_OP2(psubusw),
3085 [0xda] = MMX_OP2(pminub),
3086 [0xdb] = MMX_OP2(pand),
3087 [0xdc] = MMX_OP2(paddusb),
3088 [0xdd] = MMX_OP2(paddusw),
3089 [0xde] = MMX_OP2(pmaxub),
3090 [0xdf] = MMX_OP2(pandn),
3091 [0xe0] = MMX_OP2(pavgb),
3092 [0xe1] = MMX_OP2(psraw),
3093 [0xe2] = MMX_OP2(psrad),
3094 [0xe3] = MMX_OP2(pavgw),
3095 [0xe4] = MMX_OP2(pmulhuw),
3096 [0xe5] = MMX_OP2(pmulhw),
3097 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
3098 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3099 [0xe8] = MMX_OP2(psubsb),
3100 [0xe9] = MMX_OP2(psubsw),
3101 [0xea] = MMX_OP2(pminsw),
3102 [0xeb] = MMX_OP2(por),
3103 [0xec] = MMX_OP2(paddsb),
3104 [0xed] = MMX_OP2(paddsw),
3105 [0xee] = MMX_OP2(pmaxsw),
3106 [0xef] = MMX_OP2(pxor),
3107 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
3108 [0xf1] = MMX_OP2(psllw),
3109 [0xf2] = MMX_OP2(pslld),
3110 [0xf3] = MMX_OP2(psllq),
3111 [0xf4] = MMX_OP2(pmuludq),
3112 [0xf5] = MMX_OP2(pmaddwd),
3113 [0xf6] = MMX_OP2(psadbw),
3114 [0xf7] = MMX_OP2(maskmov),
3115 [0xf8] = MMX_OP2(psubb),
3116 [0xf9] = MMX_OP2(psubw),
3117 [0xfa] = MMX_OP2(psubl),
3118 [0xfb] = MMX_OP2(psubq),
3119 [0xfc] = MMX_OP2(paddb),
3120 [0xfd] = MMX_OP2(paddw),
3121 [0xfe] = MMX_OP2(paddl),
3122};
3123
3124static void *sse_op_table2[3 * 8][2] = {
3125 [0 + 2] = MMX_OP2(psrlw),
3126 [0 + 4] = MMX_OP2(psraw),
3127 [0 + 6] = MMX_OP2(psllw),
3128 [8 + 2] = MMX_OP2(psrld),
3129 [8 + 4] = MMX_OP2(psrad),
3130 [8 + 6] = MMX_OP2(pslld),
3131 [16 + 2] = MMX_OP2(psrlq),
3132 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3133 [16 + 6] = MMX_OP2(psllq),
3134 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3135};
3136
3137static void *sse_op_table3[4 * 3] = {
3138 gen_helper_cvtsi2ss,
3139 gen_helper_cvtsi2sd,
3140 X86_64_ONLY(gen_helper_cvtsq2ss),
3141 X86_64_ONLY(gen_helper_cvtsq2sd),
3142
3143 gen_helper_cvttss2si,
3144 gen_helper_cvttsd2si,
3145 X86_64_ONLY(gen_helper_cvttss2sq),
3146 X86_64_ONLY(gen_helper_cvttsd2sq),
3147
3148 gen_helper_cvtss2si,
3149 gen_helper_cvtsd2si,
3150 X86_64_ONLY(gen_helper_cvtss2sq),
3151 X86_64_ONLY(gen_helper_cvtsd2sq),
3152};
3153
3154static void *sse_op_table4[8][4] = {
3155 SSE_FOP(cmpeq),
3156 SSE_FOP(cmplt),
3157 SSE_FOP(cmple),
3158 SSE_FOP(cmpunord),
3159 SSE_FOP(cmpneq),
3160 SSE_FOP(cmpnlt),
3161 SSE_FOP(cmpnle),
3162 SSE_FOP(cmpord),
3163};
3164
3165static void *sse_op_table5[256] = {
3166 [0x0c] = gen_helper_pi2fw,
3167 [0x0d] = gen_helper_pi2fd,
3168 [0x1c] = gen_helper_pf2iw,
3169 [0x1d] = gen_helper_pf2id,
3170 [0x8a] = gen_helper_pfnacc,
3171 [0x8e] = gen_helper_pfpnacc,
3172 [0x90] = gen_helper_pfcmpge,
3173 [0x94] = gen_helper_pfmin,
3174 [0x96] = gen_helper_pfrcp,
3175 [0x97] = gen_helper_pfrsqrt,
3176 [0x9a] = gen_helper_pfsub,
3177 [0x9e] = gen_helper_pfadd,
3178 [0xa0] = gen_helper_pfcmpgt,
3179 [0xa4] = gen_helper_pfmax,
3180 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3181 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3182 [0xaa] = gen_helper_pfsubr,
3183 [0xae] = gen_helper_pfacc,
3184 [0xb0] = gen_helper_pfcmpeq,
3185 [0xb4] = gen_helper_pfmul,
3186 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3187 [0xb7] = gen_helper_pmulhrw_mmx,
3188 [0xbb] = gen_helper_pswapd,
3189 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3190};
3191
3192struct sse_op_helper_s {
3193 void *op[2]; uint32_t ext_mask;
3194};
3195#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3196#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3197#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3198#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3199static struct sse_op_helper_s sse_op_table6[256] = {
3200 [0x00] = SSSE3_OP(pshufb),
3201 [0x01] = SSSE3_OP(phaddw),
3202 [0x02] = SSSE3_OP(phaddd),
3203 [0x03] = SSSE3_OP(phaddsw),
3204 [0x04] = SSSE3_OP(pmaddubsw),
3205 [0x05] = SSSE3_OP(phsubw),
3206 [0x06] = SSSE3_OP(phsubd),
3207 [0x07] = SSSE3_OP(phsubsw),
3208 [0x08] = SSSE3_OP(psignb),
3209 [0x09] = SSSE3_OP(psignw),
3210 [0x0a] = SSSE3_OP(psignd),
3211 [0x0b] = SSSE3_OP(pmulhrsw),
3212 [0x10] = SSE41_OP(pblendvb),
3213 [0x14] = SSE41_OP(blendvps),
3214 [0x15] = SSE41_OP(blendvpd),
3215 [0x17] = SSE41_OP(ptest),
3216 [0x1c] = SSSE3_OP(pabsb),
3217 [0x1d] = SSSE3_OP(pabsw),
3218 [0x1e] = SSSE3_OP(pabsd),
3219 [0x20] = SSE41_OP(pmovsxbw),
3220 [0x21] = SSE41_OP(pmovsxbd),
3221 [0x22] = SSE41_OP(pmovsxbq),
3222 [0x23] = SSE41_OP(pmovsxwd),
3223 [0x24] = SSE41_OP(pmovsxwq),
3224 [0x25] = SSE41_OP(pmovsxdq),
3225 [0x28] = SSE41_OP(pmuldq),
3226 [0x29] = SSE41_OP(pcmpeqq),
3227 [0x2a] = SSE41_SPECIAL, /* movntqda */
3228 [0x2b] = SSE41_OP(packusdw),
3229 [0x30] = SSE41_OP(pmovzxbw),
3230 [0x31] = SSE41_OP(pmovzxbd),
3231 [0x32] = SSE41_OP(pmovzxbq),
3232 [0x33] = SSE41_OP(pmovzxwd),
3233 [0x34] = SSE41_OP(pmovzxwq),
3234 [0x35] = SSE41_OP(pmovzxdq),
3235 [0x37] = SSE42_OP(pcmpgtq),
3236 [0x38] = SSE41_OP(pminsb),
3237 [0x39] = SSE41_OP(pminsd),
3238 [0x3a] = SSE41_OP(pminuw),
3239 [0x3b] = SSE41_OP(pminud),
3240 [0x3c] = SSE41_OP(pmaxsb),
3241 [0x3d] = SSE41_OP(pmaxsd),
3242 [0x3e] = SSE41_OP(pmaxuw),
3243 [0x3f] = SSE41_OP(pmaxud),
3244 [0x40] = SSE41_OP(pmulld),
3245 [0x41] = SSE41_OP(phminposuw),
3246};
3247
3248static struct sse_op_helper_s sse_op_table7[256] = {
3249 [0x08] = SSE41_OP(roundps),
3250 [0x09] = SSE41_OP(roundpd),
3251 [0x0a] = SSE41_OP(roundss),
3252 [0x0b] = SSE41_OP(roundsd),
3253 [0x0c] = SSE41_OP(blendps),
3254 [0x0d] = SSE41_OP(blendpd),
3255 [0x0e] = SSE41_OP(pblendw),
3256 [0x0f] = SSSE3_OP(palignr),
3257 [0x14] = SSE41_SPECIAL, /* pextrb */
3258 [0x15] = SSE41_SPECIAL, /* pextrw */
3259 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3260 [0x17] = SSE41_SPECIAL, /* extractps */
3261 [0x20] = SSE41_SPECIAL, /* pinsrb */
3262 [0x21] = SSE41_SPECIAL, /* insertps */
3263 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3264 [0x40] = SSE41_OP(dpps),
3265 [0x41] = SSE41_OP(dppd),
3266 [0x42] = SSE41_OP(mpsadbw),
3267 [0x60] = SSE42_OP(pcmpestrm),
3268 [0x61] = SSE42_OP(pcmpestri),
3269 [0x62] = SSE42_OP(pcmpistrm),
3270 [0x63] = SSE42_OP(pcmpistri),
3271};
3272
3273static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3274{
3275 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3276 int modrm, mod, rm, reg, reg_addr, offset_addr;
3277 void *sse_op2;
3278
3279 b &= 0xff;
3280 if (s->prefix & PREFIX_DATA)
3281 b1 = 1;
3282 else if (s->prefix & PREFIX_REPZ)
3283 b1 = 2;
3284 else if (s->prefix & PREFIX_REPNZ)
3285 b1 = 3;
3286 else
3287 b1 = 0;
3288 sse_op2 = sse_op_table1[b][b1];
3289 if (!sse_op2)
3290 goto illegal_op;
3291 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3292 is_xmm = 1;
3293 } else {
3294 if (b1 == 0) {
3295 /* MMX case */
3296 is_xmm = 0;
3297 } else {
3298 is_xmm = 1;
3299 }
3300 }
3301 /* simple MMX/SSE operation */
3302 if (s->flags & HF_TS_MASK) {
3303 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3304 return;
3305 }
3306 if (s->flags & HF_EM_MASK) {
3307 illegal_op:
3308 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3309 return;
3310 }
3311 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3312 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3313 goto illegal_op;
3314 if (b == 0x0e) {
3315 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3316 goto illegal_op;
3317 /* femms */
3318 gen_helper_emms();
3319 return;
3320 }
3321 if (b == 0x77) {
3322 /* emms */
3323 gen_helper_emms();
3324 return;
3325 }
3326 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3327 the static cpu state) */
3328 if (!is_xmm) {
3329 gen_helper_enter_mmx();
3330 }
3331
3332 modrm = ldub_code(s->pc++);
3333 reg = ((modrm >> 3) & 7);
3334 if (is_xmm)
3335 reg |= rex_r;
3336 mod = (modrm >> 6) & 3;
3337 if (sse_op2 == SSE_SPECIAL) {
3338 b |= (b1 << 8);
3339 switch(b) {
3340 case 0x0e7: /* movntq */
3341 if (mod == 3)
3342 goto illegal_op;
3343 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3344 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3345 break;
3346 case 0x1e7: /* movntdq */
3347 case 0x02b: /* movntps */
3348 case 0x12b: /* movntps */
3349 if (mod == 3)
3350 goto illegal_op;
3351 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3352 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3353 break;
3354 case 0x3f0: /* lddqu */
3355 if (mod == 3)
3356 goto illegal_op;
3357 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3358 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3359 break;
3360 case 0x22b: /* movntss */
3361 case 0x32b: /* movntsd */
3362 if (mod == 3)
3363 goto illegal_op;
3364 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3365 if (b1 & 1) {
3366 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3367 xmm_regs[reg]));
3368 } else {
3369 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3370 xmm_regs[reg].XMM_L(0)));
3371 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3372 }
3373 break;
3374 case 0x6e: /* movd mm, ea */
3375#ifdef TARGET_X86_64
3376 if (s->dflag == 2) {
3377 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3378 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3379 } else
3380#endif
3381 {
3382 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3383 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3384 offsetof(CPUX86State,fpregs[reg].mmx));
3385 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3386 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3387 }
3388 break;
3389 case 0x16e: /* movd xmm, ea */
3390#ifdef TARGET_X86_64
3391 if (s->dflag == 2) {
3392 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3393 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3394 offsetof(CPUX86State,xmm_regs[reg]));
3395 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3396 } else
3397#endif
3398 {
3399 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3400 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3401 offsetof(CPUX86State,xmm_regs[reg]));
3402 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3403 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3404 }
3405 break;
3406 case 0x6f: /* movq mm, ea */
3407 if (mod != 3) {
3408 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3409 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3410 } else {
3411 rm = (modrm & 7);
3412 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3413 offsetof(CPUX86State,fpregs[rm].mmx));
3414 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3415 offsetof(CPUX86State,fpregs[reg].mmx));
3416 }
3417 break;
3418 case 0x010: /* movups */
3419 case 0x110: /* movupd */
3420 case 0x028: /* movaps */
3421 case 0x128: /* movapd */
3422 case 0x16f: /* movdqa xmm, ea */
3423 case 0x26f: /* movdqu xmm, ea */
3424 if (mod != 3) {
3425 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3426 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3427 } else {
3428 rm = (modrm & 7) | REX_B(s);
3429 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3430 offsetof(CPUX86State,xmm_regs[rm]));
3431 }
3432 break;
3433 case 0x210: /* movss xmm, ea */
3434 if (mod != 3) {
3435 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3436 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3437 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3438 gen_op_movl_T0_0();
3439 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3440 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3441 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3442 } else {
3443 rm = (modrm & 7) | REX_B(s);
3444 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3445 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3446 }
3447 break;
3448 case 0x310: /* movsd xmm, ea */
3449 if (mod != 3) {
3450 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3451 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3452 gen_op_movl_T0_0();
3453 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3454 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3455 } else {
3456 rm = (modrm & 7) | REX_B(s);
3457 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3458 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3459 }
3460 break;
3461 case 0x012: /* movlps */
3462 case 0x112: /* movlpd */
3463 if (mod != 3) {
3464 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3465 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3466 } else {
3467 /* movhlps */
3468 rm = (modrm & 7) | REX_B(s);
3469 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3470 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3471 }
3472 break;
3473 case 0x212: /* movsldup */
3474 if (mod != 3) {
3475 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3476 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3477 } else {
3478 rm = (modrm & 7) | REX_B(s);
3479 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3480 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3481 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3482 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3483 }
3484 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3485 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3486 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3487 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3488 break;
3489 case 0x312: /* movddup */
3490 if (mod != 3) {
3491 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3492 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3493 } else {
3494 rm = (modrm & 7) | REX_B(s);
3495 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3496 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3497 }
3498 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3499 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3500 break;
3501 case 0x016: /* movhps */
3502 case 0x116: /* movhpd */
3503 if (mod != 3) {
3504 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3505 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3506 } else {
3507 /* movlhps */
3508 rm = (modrm & 7) | REX_B(s);
3509 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3510 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3511 }
3512 break;
3513 case 0x216: /* movshdup */
3514 if (mod != 3) {
3515 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3516 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3517 } else {
3518 rm = (modrm & 7) | REX_B(s);
3519 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3520 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3521 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3522 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3523 }
3524 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3525 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3526 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3527 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3528 break;
3529 case 0x178:
3530 case 0x378:
3531 {
3532 int bit_index, field_length;
3533
3534 if (b1 == 1 && reg != 0)
3535 goto illegal_op;
3536 field_length = ldub_code(s->pc++) & 0x3F;
3537 bit_index = ldub_code(s->pc++) & 0x3F;
3538 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3539 offsetof(CPUX86State,xmm_regs[reg]));
3540 if (b1 == 1)
3541 gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
3542 tcg_const_i32(field_length));
3543 else
3544 gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
3545 tcg_const_i32(field_length));
3546 }
3547 break;
3548 case 0x7e: /* movd ea, mm */
3549#ifdef TARGET_X86_64
3550 if (s->dflag == 2) {
3551 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3552 offsetof(CPUX86State,fpregs[reg].mmx));
3553 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3554 } else
3555#endif
3556 {
3557 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3558 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3559 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3560 }
3561 break;
3562 case 0x17e: /* movd ea, xmm */
3563#ifdef TARGET_X86_64
3564 if (s->dflag == 2) {
3565 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3566 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3567 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3568 } else
3569#endif
3570 {
3571 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3572 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3573 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3574 }
3575 break;
3576 case 0x27e: /* movq xmm, ea */
3577 if (mod != 3) {
3578 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3579 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3580 } else {
3581 rm = (modrm & 7) | REX_B(s);
3582 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3583 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3584 }
3585 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3586 break;
3587 case 0x7f: /* movq ea, mm */
3588 if (mod != 3) {
3589 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3590 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3591 } else {
3592 rm = (modrm & 7);
3593 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3594 offsetof(CPUX86State,fpregs[reg].mmx));
3595 }
3596 break;
3597 case 0x011: /* movups */
3598 case 0x111: /* movupd */
3599 case 0x029: /* movaps */
3600 case 0x129: /* movapd */
3601 case 0x17f: /* movdqa ea, xmm */
3602 case 0x27f: /* movdqu ea, xmm */
3603 if (mod != 3) {
3604 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3605 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3606 } else {
3607 rm = (modrm & 7) | REX_B(s);
3608 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3609 offsetof(CPUX86State,xmm_regs[reg]));
3610 }
3611 break;
3612 case 0x211: /* movss ea, xmm */
3613 if (mod != 3) {
3614 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3615 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3616 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3617 } else {
3618 rm = (modrm & 7) | REX_B(s);
3619 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3620 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3621 }
3622 break;
3623 case 0x311: /* movsd ea, xmm */
3624 if (mod != 3) {
3625 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3626 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3627 } else {
3628 rm = (modrm & 7) | REX_B(s);
3629 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3630 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3631 }
3632 break;
3633 case 0x013: /* movlps */
3634 case 0x113: /* movlpd */
3635 if (mod != 3) {
3636 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3637 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3638 } else {
3639 goto illegal_op;
3640 }
3641 break;
3642 case 0x017: /* movhps */
3643 case 0x117: /* movhpd */
3644 if (mod != 3) {
3645 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3646 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3647 } else {
3648 goto illegal_op;
3649 }
3650 break;
3651 case 0x71: /* shift mm, im */
3652 case 0x72:
3653 case 0x73:
3654 case 0x171: /* shift xmm, im */
3655 case 0x172:
3656 case 0x173:
3657 if (b1 >= 2) {
3658 goto illegal_op;
3659 }
3660 val = ldub_code(s->pc++);
3661 if (is_xmm) {
3662 gen_op_movl_T0_im(val);
3663 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3664 gen_op_movl_T0_0();
3665 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3666 op1_offset = offsetof(CPUX86State,xmm_t0);
3667 } else {
3668 gen_op_movl_T0_im(val);
3669 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3670 gen_op_movl_T0_0();
3671 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3672 op1_offset = offsetof(CPUX86State,mmx_t0);
3673 }
3674 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3675 if (!sse_op2)
3676 goto illegal_op;
3677 if (is_xmm) {
3678 rm = (modrm & 7) | REX_B(s);
3679 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3680 } else {
3681 rm = (modrm & 7);
3682 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3683 }
3684 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3685 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3686 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3687 break;
3688 case 0x050: /* movmskps */
3689 rm = (modrm & 7) | REX_B(s);
3690 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3691 offsetof(CPUX86State,xmm_regs[rm]));
3692 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3693 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3694 gen_op_mov_reg_T0(OT_LONG, reg);
3695 break;
3696 case 0x150: /* movmskpd */
3697 rm = (modrm & 7) | REX_B(s);
3698 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3699 offsetof(CPUX86State,xmm_regs[rm]));
3700 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3701 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3702 gen_op_mov_reg_T0(OT_LONG, reg);
3703 break;
3704 case 0x02a: /* cvtpi2ps */
3705 case 0x12a: /* cvtpi2pd */
3706 gen_helper_enter_mmx();
3707 if (mod != 3) {
3708 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3709 op2_offset = offsetof(CPUX86State,mmx_t0);
3710 gen_ldq_env_A0(s->mem_index, op2_offset);
3711 } else {
3712 rm = (modrm & 7);
3713 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3714 }
3715 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3716 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3717 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3718 switch(b >> 8) {
3719 case 0x0:
3720 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3721 break;
3722 default:
3723 case 0x1:
3724 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3725 break;
3726 }
3727 break;
3728 case 0x22a: /* cvtsi2ss */
3729 case 0x32a: /* cvtsi2sd */
3730 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3731 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3732 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3733 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3734 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3735 if (ot == OT_LONG) {
3736 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3737 ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3738 } else {
3739 ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3740 }
3741 break;
3742 case 0x02c: /* cvttps2pi */
3743 case 0x12c: /* cvttpd2pi */
3744 case 0x02d: /* cvtps2pi */
3745 case 0x12d: /* cvtpd2pi */
3746 gen_helper_enter_mmx();
3747 if (mod != 3) {
3748 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3749 op2_offset = offsetof(CPUX86State,xmm_t0);
3750 gen_ldo_env_A0(s->mem_index, op2_offset);
3751 } else {
3752 rm = (modrm & 7) | REX_B(s);
3753 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3754 }
3755 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3756 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3757 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3758 switch(b) {
3759 case 0x02c:
3760 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3761 break;
3762 case 0x12c:
3763 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3764 break;
3765 case 0x02d:
3766 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3767 break;
3768 case 0x12d:
3769 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3770 break;
3771 }
3772 break;
3773 case 0x22c: /* cvttss2si */
3774 case 0x32c: /* cvttsd2si */
3775 case 0x22d: /* cvtss2si */
3776 case 0x32d: /* cvtsd2si */
3777 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3778 if (mod != 3) {
3779 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3780 if ((b >> 8) & 1) {
3781 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3782 } else {
3783 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3784 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3785 }
3786 op2_offset = offsetof(CPUX86State,xmm_t0);
3787 } else {
3788 rm = (modrm & 7) | REX_B(s);
3789 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3790 }
3791 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3792 (b & 1) * 4];
3793 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3794 if (ot == OT_LONG) {
3795 ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3796 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3797 } else {
3798 ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3799 }
3800 gen_op_mov_reg_T0(ot, reg);
3801 break;
3802 case 0xc4: /* pinsrw */
3803 case 0x1c4:
3804 s->rip_offset = 1;
3805 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3806 val = ldub_code(s->pc++);
3807 if (b1) {
3808 val &= 7;
3809 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3810 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3811 } else {
3812 val &= 3;
3813 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3814 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3815 }
3816 break;
3817 case 0xc5: /* pextrw */
3818 case 0x1c5:
3819 if (mod != 3)
3820 goto illegal_op;
3821 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3822 val = ldub_code(s->pc++);
3823 if (b1) {
3824 val &= 7;
3825 rm = (modrm & 7) | REX_B(s);
3826 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3827 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3828 } else {
3829 val &= 3;
3830 rm = (modrm & 7);
3831 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3832 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3833 }
3834 reg = ((modrm >> 3) & 7) | rex_r;
3835 gen_op_mov_reg_T0(ot, reg);
3836 break;
3837 case 0x1d6: /* movq ea, xmm */
3838 if (mod != 3) {
3839 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3840 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3841 } else {
3842 rm = (modrm & 7) | REX_B(s);
3843 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3844 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3845 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3846 }
3847 break;
3848 case 0x2d6: /* movq2dq */
3849 gen_helper_enter_mmx();
3850 rm = (modrm & 7);
3851 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3852 offsetof(CPUX86State,fpregs[rm].mmx));
3853 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3854 break;
3855 case 0x3d6: /* movdq2q */
3856 gen_helper_enter_mmx();
3857 rm = (modrm & 7) | REX_B(s);
3858 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3859 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3860 break;
3861 case 0xd7: /* pmovmskb */
3862 case 0x1d7:
3863 if (mod != 3)
3864 goto illegal_op;
3865 if (b1) {
3866 rm = (modrm & 7) | REX_B(s);
3867 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3868 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3869 } else {
3870 rm = (modrm & 7);
3871 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3872 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3873 }
3874 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3875 reg = ((modrm >> 3) & 7) | rex_r;
3876 gen_op_mov_reg_T0(OT_LONG, reg);
3877 break;
3878 case 0x138:
3879 if (s->prefix & PREFIX_REPNZ)
3880 goto crc32;
3881 case 0x038:
3882 b = modrm;
3883 modrm = ldub_code(s->pc++);
3884 rm = modrm & 7;
3885 reg = ((modrm >> 3) & 7) | rex_r;
3886 mod = (modrm >> 6) & 3;
3887 if (b1 >= 2) {
3888 goto illegal_op;
3889 }
3890
3891 sse_op2 = sse_op_table6[b].op[b1];
3892 if (!sse_op2)
3893 goto illegal_op;
3894 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3895 goto illegal_op;
3896
3897 if (b1) {
3898 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3899 if (mod == 3) {
3900 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3901 } else {
3902 op2_offset = offsetof(CPUX86State,xmm_t0);
3903 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3904 switch (b) {
3905 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3906 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3907 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3908 gen_ldq_env_A0(s->mem_index, op2_offset +
3909 offsetof(XMMReg, XMM_Q(0)));
3910 break;
3911 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3912 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3913 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3914 (s->mem_index >> 2) - 1);
3915 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3916 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3917 offsetof(XMMReg, XMM_L(0)));
3918 break;
3919 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3920 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3921 (s->mem_index >> 2) - 1);
3922 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3923 offsetof(XMMReg, XMM_W(0)));
3924 break;
3925 case 0x2a: /* movntqda */
3926 gen_ldo_env_A0(s->mem_index, op1_offset);
3927 return;
3928 default:
3929 gen_ldo_env_A0(s->mem_index, op2_offset);
3930 }
3931 }
3932 } else {
3933 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3934 if (mod == 3) {
3935 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3936 } else {
3937 op2_offset = offsetof(CPUX86State,mmx_t0);
3938 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3939 gen_ldq_env_A0(s->mem_index, op2_offset);
3940 }
3941 }
3942 if (sse_op2 == SSE_SPECIAL)
3943 goto illegal_op;
3944
3945 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3946 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3947 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3948
3949 if (b == 0x17)
3950 s->cc_op = CC_OP_EFLAGS;
3951 break;
3952 case 0x338: /* crc32 */
3953 crc32:
3954 b = modrm;
3955 modrm = ldub_code(s->pc++);
3956 reg = ((modrm >> 3) & 7) | rex_r;
3957
3958 if (b != 0xf0 && b != 0xf1)
3959 goto illegal_op;
3960 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3961 goto illegal_op;
3962
3963 if (b == 0xf0)
3964 ot = OT_BYTE;
3965 else if (b == 0xf1 && s->dflag != 2)
3966 if (s->prefix & PREFIX_DATA)
3967 ot = OT_WORD;
3968 else
3969 ot = OT_LONG;
3970 else
3971 ot = OT_QUAD;
3972
3973 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3974 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3975 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3976 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3977 cpu_T[0], tcg_const_i32(8 << ot));
3978
3979 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3980 gen_op_mov_reg_T0(ot, reg);
3981 break;
3982 case 0x03a:
3983 case 0x13a:
3984 b = modrm;
3985 modrm = ldub_code(s->pc++);
3986 rm = modrm & 7;
3987 reg = ((modrm >> 3) & 7) | rex_r;
3988 mod = (modrm >> 6) & 3;
3989 if (b1 >= 2) {
3990 goto illegal_op;
3991 }
3992
3993 sse_op2 = sse_op_table7[b].op[b1];
3994 if (!sse_op2)
3995 goto illegal_op;
3996 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3997 goto illegal_op;
3998
3999 if (sse_op2 == SSE_SPECIAL) {
4000 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
4001 rm = (modrm & 7) | REX_B(s);
4002 if (mod != 3)
4003 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4004 reg = ((modrm >> 3) & 7) | rex_r;
4005 val = ldub_code(s->pc++);
4006 switch (b) {
4007 case 0x14: /* pextrb */
4008 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4009 xmm_regs[reg].XMM_B(val & 15)));
4010 if (mod == 3)
4011 gen_op_mov_reg_T0(ot, rm);
4012 else
4013 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
4014 (s->mem_index >> 2) - 1);
4015 break;
4016 case 0x15: /* pextrw */
4017 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4018 xmm_regs[reg].XMM_W(val & 7)));
4019 if (mod == 3)
4020 gen_op_mov_reg_T0(ot, rm);
4021 else
4022 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
4023 (s->mem_index >> 2) - 1);
4024 break;
4025 case 0x16:
4026 if (ot == OT_LONG) { /* pextrd */
4027 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4028 offsetof(CPUX86State,
4029 xmm_regs[reg].XMM_L(val & 3)));
4030 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4031 if (mod == 3)
4032 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4033 else
4034 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4035 (s->mem_index >> 2) - 1);
4036 } else { /* pextrq */
4037#ifdef TARGET_X86_64
4038 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4039 offsetof(CPUX86State,
4040 xmm_regs[reg].XMM_Q(val & 1)));
4041 if (mod == 3)
4042 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4043 else
4044 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4045 (s->mem_index >> 2) - 1);
4046#else
4047 goto illegal_op;
4048#endif
4049 }
4050 break;
4051 case 0x17: /* extractps */
4052 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4053 xmm_regs[reg].XMM_L(val & 3)));
4054 if (mod == 3)
4055 gen_op_mov_reg_T0(ot, rm);
4056 else
4057 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4058 (s->mem_index >> 2) - 1);
4059 break;
4060 case 0x20: /* pinsrb */
4061 if (mod == 3)
4062 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4063 else
4064 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
4065 (s->mem_index >> 2) - 1);
4066 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
4067 xmm_regs[reg].XMM_B(val & 15)));
4068 break;
4069 case 0x21: /* insertps */
4070 if (mod == 3) {
4071 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4072 offsetof(CPUX86State,xmm_regs[rm]
4073 .XMM_L((val >> 6) & 3)));
4074 } else {
4075 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4076 (s->mem_index >> 2) - 1);
4077 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4078 }
4079 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4080 offsetof(CPUX86State,xmm_regs[reg]
4081 .XMM_L((val >> 4) & 3)));
4082 if ((val >> 0) & 1)
4083 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4084 cpu_env, offsetof(CPUX86State,
4085 xmm_regs[reg].XMM_L(0)));
4086 if ((val >> 1) & 1)
4087 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4088 cpu_env, offsetof(CPUX86State,
4089 xmm_regs[reg].XMM_L(1)));
4090 if ((val >> 2) & 1)
4091 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4092 cpu_env, offsetof(CPUX86State,
4093 xmm_regs[reg].XMM_L(2)));
4094 if ((val >> 3) & 1)
4095 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4096 cpu_env, offsetof(CPUX86State,
4097 xmm_regs[reg].XMM_L(3)));
4098 break;
4099 case 0x22:
4100 if (ot == OT_LONG) { /* pinsrd */
4101 if (mod == 3)
4102 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4103 else
4104 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4105 (s->mem_index >> 2) - 1);
4106 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4107 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4108 offsetof(CPUX86State,
4109 xmm_regs[reg].XMM_L(val & 3)));
4110 } else { /* pinsrq */
4111#ifdef TARGET_X86_64
4112 if (mod == 3)
4113 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4114 else
4115 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4116 (s->mem_index >> 2) - 1);
4117 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4118 offsetof(CPUX86State,
4119 xmm_regs[reg].XMM_Q(val & 1)));
4120#else
4121 goto illegal_op;
4122#endif
4123 }
4124 break;
4125 }
4126 return;
4127 }
4128
4129 if (b1) {
4130 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4131 if (mod == 3) {
4132 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4133 } else {
4134 op2_offset = offsetof(CPUX86State,xmm_t0);
4135 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4136 gen_ldo_env_A0(s->mem_index, op2_offset);
4137 }
4138 } else {
4139 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4140 if (mod == 3) {
4141 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4142 } else {
4143 op2_offset = offsetof(CPUX86State,mmx_t0);
4144 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4145 gen_ldq_env_A0(s->mem_index, op2_offset);
4146 }
4147 }
4148 val = ldub_code(s->pc++);
4149
4150 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4151 s->cc_op = CC_OP_EFLAGS;
4152
4153 if (s->dflag == 2)
4154 /* The helper must use entire 64-bit gp registers */
4155 val |= 1 << 8;
4156 }
4157
4158 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4159 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4160 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4161 break;
4162 default:
4163 goto illegal_op;
4164 }
4165 } else {
4166 /* generic MMX or SSE operation */
4167 switch(b) {
4168 case 0x70: /* pshufx insn */
4169 case 0xc6: /* pshufx insn */
4170 case 0xc2: /* compare insns */
4171 s->rip_offset = 1;
4172 break;
4173 default:
4174 break;
4175 }
4176 if (is_xmm) {
4177 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4178 if (mod != 3) {
4179 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4180 op2_offset = offsetof(CPUX86State,xmm_t0);
4181 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4182 b == 0xc2)) {
4183 /* specific case for SSE single instructions */
4184 if (b1 == 2) {
4185 /* 32 bit access */
4186 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4187 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4188 } else {
4189 /* 64 bit access */
4190 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4191 }
4192 } else {
4193 gen_ldo_env_A0(s->mem_index, op2_offset);
4194 }
4195 } else {
4196 rm = (modrm & 7) | REX_B(s);
4197 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4198 }
4199 } else {
4200 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4201 if (mod != 3) {
4202 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4203 op2_offset = offsetof(CPUX86State,mmx_t0);
4204 gen_ldq_env_A0(s->mem_index, op2_offset);
4205 } else {
4206 rm = (modrm & 7);
4207 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4208 }
4209 }
4210 switch(b) {
4211 case 0x0f: /* 3DNow! data insns */
4212 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4213 goto illegal_op;
4214 val = ldub_code(s->pc++);
4215 sse_op2 = sse_op_table5[val];
4216 if (!sse_op2)
4217 goto illegal_op;
4218 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4219 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4220 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4221 break;
4222 case 0x70: /* pshufx insn */
4223 case 0xc6: /* pshufx insn */
4224 val = ldub_code(s->pc++);
4225 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4226 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4227 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4228 break;
4229 case 0xc2:
4230 /* compare insns */
4231 val = ldub_code(s->pc++);
4232 if (val >= 8)
4233 goto illegal_op;
4234 sse_op2 = sse_op_table4[val][b1];
4235 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4236 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4237 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4238 break;
4239 case 0xf7:
4240 /* maskmov : we must prepare A0 */
4241 if (mod != 3)
4242 goto illegal_op;
4243#ifdef TARGET_X86_64
4244 if (s->aflag == 2) {
4245 gen_op_movq_A0_reg(R_EDI);
4246 } else
4247#endif
4248 {
4249 gen_op_movl_A0_reg(R_EDI);
4250 if (s->aflag == 0)
4251 gen_op_andl_A0_ffff();
4252 }
4253 gen_add_A0_ds_seg(s);
4254
4255 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4256 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4257 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
4258 break;
4259 default:
4260 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4261 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4262 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4263 break;
4264 }
4265 if (b == 0x2e || b == 0x2f) {
4266 s->cc_op = CC_OP_EFLAGS;
4267 }
4268 }
4269}
4270
4271#ifdef VBOX
4272/* Checks if it's an invalid lock sequence. Only a few instructions
4273 can be used together with the lock prefix and of those only the
4274 form that write a memory operand. So, this is kind of annoying
4275 work to do...
4276 The AMD manual lists the following instructions.
4277 ADC
4278 ADD
4279 AND
4280 BTC
4281 BTR
4282 BTS
4283 CMPXCHG
4284 CMPXCHG8B
4285 CMPXCHG16B
4286 DEC
4287 INC
4288 NEG
4289 NOT
4290 OR
4291 SBB
4292 SUB
4293 XADD
4294 XCHG
4295 XOR */
4296static bool is_invalid_lock_sequence(DisasContext *s, target_ulong pc_start, int b)
4297{
4298 target_ulong pc = s->pc;
4299 int modrm, mod, op;
4300
4301 /* X={8,16,32,64} Y={16,32,64} */
4302 switch (b)
4303 {
4304 /* /2: ADC reg/memX, immX */
4305 /* /0: ADD reg/memX, immX */
4306 /* /4: AND reg/memX, immX */
4307 /* /1: OR reg/memX, immX */
4308 /* /3: SBB reg/memX, immX */
4309 /* /5: SUB reg/memX, immX */
4310 /* /6: XOR reg/memX, immX */
4311 case 0x80:
4312 case 0x81:
4313 case 0x83:
4314 modrm = ldub_code(pc++);
4315 op = (modrm >> 3) & 7;
4316 if (op == 7) /* /7: CMP */
4317 break;
4318 mod = (modrm >> 6) & 3;
4319 if (mod == 3) /* register destination */
4320 break;
4321 return false;
4322
4323 case 0x10: /* /r: ADC reg/mem8, reg8 */
4324 case 0x11: /* /r: ADC reg/memX, regY */
4325 case 0x00: /* /r: ADD reg/mem8, reg8 */
4326 case 0x01: /* /r: ADD reg/memX, regY */
4327 case 0x20: /* /r: AND reg/mem8, reg8 */
4328 case 0x21: /* /r: AND reg/memY, regY */
4329 case 0x08: /* /r: OR reg/mem8, reg8 */
4330 case 0x09: /* /r: OR reg/memY, regY */
4331 case 0x18: /* /r: SBB reg/mem8, reg8 */
4332 case 0x19: /* /r: SBB reg/memY, regY */
4333 case 0x28: /* /r: SUB reg/mem8, reg8 */
4334 case 0x29: /* /r: SUB reg/memY, regY */
4335 case 0x86: /* /r: XCHG reg/mem8, reg8 or XCHG reg8, reg/mem8 */
4336 case 0x87: /* /r: XCHG reg/memY, regY or XCHG regY, reg/memY */
4337 case 0x30: /* /r: XOR reg/mem8, reg8 */
4338 case 0x31: /* /r: XOR reg/memY, regY */
4339 modrm = ldub_code(pc++);
4340 mod = (modrm >> 6) & 3;
4341 if (mod == 3) /* register destination */
4342 break;
4343 return false;
4344
4345 /* /1: DEC reg/memX */
4346 /* /0: INC reg/memX */
4347 case 0xfe:
4348 case 0xff:
4349 modrm = ldub_code(pc++);
4350 mod = (modrm >> 6) & 3;
4351 if (mod == 3) /* register destination */
4352 break;
4353 return false;
4354
4355 /* /3: NEG reg/memX */
4356 /* /2: NOT reg/memX */
4357 case 0xf6:
4358 case 0xf7:
4359 modrm = ldub_code(pc++);
4360 mod = (modrm >> 6) & 3;
4361 if (mod == 3) /* register destination */
4362 break;
4363 return false;
4364
4365 case 0x0f:
4366 b = ldub_code(pc++);
4367 switch (b)
4368 {
4369 /* /7: BTC reg/memY, imm8 */
4370 /* /6: BTR reg/memY, imm8 */
4371 /* /5: BTS reg/memY, imm8 */
4372 case 0xba:
4373 modrm = ldub_code(pc++);
4374 op = (modrm >> 3) & 7;
4375 if (op < 5)
4376 break;
4377 mod = (modrm >> 6) & 3;
4378 if (mod == 3) /* register destination */
4379 break;
4380 return false;
4381
4382 case 0xbb: /* /r: BTC reg/memY, regY */
4383 case 0xb3: /* /r: BTR reg/memY, regY */
4384 case 0xab: /* /r: BTS reg/memY, regY */
4385 case 0xb0: /* /r: CMPXCHG reg/mem8, reg8 */
4386 case 0xb1: /* /r: CMPXCHG reg/memY, regY */
4387 case 0xc0: /* /r: XADD reg/mem8, reg8 */
4388 case 0xc1: /* /r: XADD reg/memY, regY */
4389 modrm = ldub_code(pc++);
4390 mod = (modrm >> 6) & 3;
4391 if (mod == 3) /* register destination */
4392 break;
4393 return false;
4394
4395 /* /1: CMPXCHG8B mem64 or CMPXCHG16B mem128 */
4396 case 0xc7:
4397 modrm = ldub_code(pc++);
4398 op = (modrm >> 3) & 7;
4399 if (op != 1)
4400 break;
4401 return false;
4402 }
4403 break;
4404 }
4405
4406 /* illegal sequence. The s->pc is past the lock prefix and that
4407 is sufficient for the TB, I think. */
4408 Log(("illegal lock sequence %RGv (b=%#x)\n", pc_start, b));
4409 return true;
4410}
4411#endif /* VBOX */
4412
4413/* convert one instruction. s->is_jmp is set if the translation must
4414 be stopped. Return the next pc value */
4415static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4416{
4417 int b, prefixes, aflag, dflag;
4418 int shift, ot;
4419 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4420 target_ulong next_eip, tval;
4421 int rex_w, rex_r;
4422
4423 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4424 tcg_gen_debug_insn_start(pc_start);
4425 s->pc = pc_start;
4426 prefixes = 0;
4427 aflag = s->code32;
4428 dflag = s->code32;
4429 s->override = -1;
4430 rex_w = -1;
4431 rex_r = 0;
4432#ifdef TARGET_X86_64
4433 s->rex_x = 0;
4434 s->rex_b = 0;
4435 x86_64_hregs = 0;
4436#endif
4437 s->rip_offset = 0; /* for relative ip address */
4438#ifdef VBOX
4439 /* nike: seems only slow down things */
4440# if 0
4441 /* Always update EIP. Otherwise one must be very careful with generated code that can raise exceptions. */
4442
4443 gen_update_eip(pc_start - s->cs_base);
4444# endif
4445#endif /* VBOX */
4446
4447 next_byte:
4448 b = ldub_code(s->pc);
4449 s->pc++;
4450 /* check prefixes */
4451#ifdef TARGET_X86_64
4452 if (CODE64(s)) {
4453 switch (b) {
4454 case 0xf3:
4455 prefixes |= PREFIX_REPZ;
4456 goto next_byte;
4457 case 0xf2:
4458 prefixes |= PREFIX_REPNZ;
4459 goto next_byte;
4460 case 0xf0:
4461 prefixes |= PREFIX_LOCK;
4462 goto next_byte;
4463 case 0x2e:
4464 s->override = R_CS;
4465 goto next_byte;
4466 case 0x36:
4467 s->override = R_SS;
4468 goto next_byte;
4469 case 0x3e:
4470 s->override = R_DS;
4471 goto next_byte;
4472 case 0x26:
4473 s->override = R_ES;
4474 goto next_byte;
4475 case 0x64:
4476 s->override = R_FS;
4477 goto next_byte;
4478 case 0x65:
4479 s->override = R_GS;
4480 goto next_byte;
4481 case 0x66:
4482 prefixes |= PREFIX_DATA;
4483 goto next_byte;
4484 case 0x67:
4485 prefixes |= PREFIX_ADR;
4486 goto next_byte;
4487 case 0x40 ... 0x4f:
4488 /* REX prefix */
4489 rex_w = (b >> 3) & 1;
4490 rex_r = (b & 0x4) << 1;
4491 s->rex_x = (b & 0x2) << 2;
4492 REX_B(s) = (b & 0x1) << 3;
4493 x86_64_hregs = 1; /* select uniform byte register addressing */
4494 goto next_byte;
4495 }
4496 if (rex_w == 1) {
4497 /* 0x66 is ignored if rex.w is set */
4498 dflag = 2;
4499 } else {
4500 if (prefixes & PREFIX_DATA)
4501 dflag ^= 1;
4502 }
4503 if (!(prefixes & PREFIX_ADR))
4504 aflag = 2;
4505 } else
4506#endif
4507 {
4508 switch (b) {
4509 case 0xf3:
4510 prefixes |= PREFIX_REPZ;
4511 goto next_byte;
4512 case 0xf2:
4513 prefixes |= PREFIX_REPNZ;
4514 goto next_byte;
4515 case 0xf0:
4516 prefixes |= PREFIX_LOCK;
4517 goto next_byte;
4518 case 0x2e:
4519 s->override = R_CS;
4520 goto next_byte;
4521 case 0x36:
4522 s->override = R_SS;
4523 goto next_byte;
4524 case 0x3e:
4525 s->override = R_DS;
4526 goto next_byte;
4527 case 0x26:
4528 s->override = R_ES;
4529 goto next_byte;
4530 case 0x64:
4531 s->override = R_FS;
4532 goto next_byte;
4533 case 0x65:
4534 s->override = R_GS;
4535 goto next_byte;
4536 case 0x66:
4537 prefixes |= PREFIX_DATA;
4538 goto next_byte;
4539 case 0x67:
4540 prefixes |= PREFIX_ADR;
4541 goto next_byte;
4542 }
4543 if (prefixes & PREFIX_DATA)
4544 dflag ^= 1;
4545 if (prefixes & PREFIX_ADR)
4546 aflag ^= 1;
4547 }
4548
4549 s->prefix = prefixes;
4550 s->aflag = aflag;
4551 s->dflag = dflag;
4552
4553 /* lock generation */
4554#ifndef VBOX
4555 if (prefixes & PREFIX_LOCK)
4556 gen_helper_lock();
4557#else /* VBOX */
4558 if (prefixes & PREFIX_LOCK) {
4559 if (is_invalid_lock_sequence(s, pc_start, b)) {
4560 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
4561 return s->pc;
4562 }
4563 gen_helper_lock();
4564 }
4565#endif /* VBOX */
4566
4567 /* now check op code */
4568 reswitch:
4569 switch(b) {
4570 case 0x0f:
4571 /**************************/
4572 /* extended op code */
4573 b = ldub_code(s->pc++) | 0x100;
4574 goto reswitch;
4575
4576 /**************************/
4577 /* arith & logic */
4578 case 0x00 ... 0x05:
4579 case 0x08 ... 0x0d:
4580 case 0x10 ... 0x15:
4581 case 0x18 ... 0x1d:
4582 case 0x20 ... 0x25:
4583 case 0x28 ... 0x2d:
4584 case 0x30 ... 0x35:
4585 case 0x38 ... 0x3d:
4586 {
4587 int op, f, val;
4588 op = (b >> 3) & 7;
4589 f = (b >> 1) & 3;
4590
4591 if ((b & 1) == 0)
4592 ot = OT_BYTE;
4593 else
4594 ot = dflag + OT_WORD;
4595
4596 switch(f) {
4597 case 0: /* OP Ev, Gv */
4598 modrm = ldub_code(s->pc++);
4599 reg = ((modrm >> 3) & 7) | rex_r;
4600 mod = (modrm >> 6) & 3;
4601 rm = (modrm & 7) | REX_B(s);
4602 if (mod != 3) {
4603 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4604 opreg = OR_TMP0;
4605 } else if (op == OP_XORL && rm == reg) {
4606 xor_zero:
4607 /* xor reg, reg optimisation */
4608 gen_op_movl_T0_0();
4609 s->cc_op = CC_OP_LOGICB + ot;
4610 gen_op_mov_reg_T0(ot, reg);
4611 gen_op_update1_cc();
4612 break;
4613 } else {
4614 opreg = rm;
4615 }
4616 gen_op_mov_TN_reg(ot, 1, reg);
4617 gen_op(s, op, ot, opreg);
4618 break;
4619 case 1: /* OP Gv, Ev */
4620 modrm = ldub_code(s->pc++);
4621 mod = (modrm >> 6) & 3;
4622 reg = ((modrm >> 3) & 7) | rex_r;
4623 rm = (modrm & 7) | REX_B(s);
4624 if (mod != 3) {
4625 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4626 gen_op_ld_T1_A0(ot + s->mem_index);
4627 } else if (op == OP_XORL && rm == reg) {
4628 goto xor_zero;
4629 } else {
4630 gen_op_mov_TN_reg(ot, 1, rm);
4631 }
4632 gen_op(s, op, ot, reg);
4633 break;
4634 case 2: /* OP A, Iv */
4635 val = insn_get(s, ot);
4636 gen_op_movl_T1_im(val);
4637 gen_op(s, op, ot, OR_EAX);
4638 break;
4639 }
4640 }
4641 break;
4642
4643 case 0x82:
4644 if (CODE64(s))
4645 goto illegal_op;
4646 case 0x80: /* GRP1 */
4647 case 0x81:
4648 case 0x83:
4649 {
4650 int val;
4651
4652 if ((b & 1) == 0)
4653 ot = OT_BYTE;
4654 else
4655 ot = dflag + OT_WORD;
4656
4657 modrm = ldub_code(s->pc++);
4658 mod = (modrm >> 6) & 3;
4659 rm = (modrm & 7) | REX_B(s);
4660 op = (modrm >> 3) & 7;
4661
4662 if (mod != 3) {
4663 if (b == 0x83)
4664 s->rip_offset = 1;
4665 else
4666 s->rip_offset = insn_const_size(ot);
4667 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4668 opreg = OR_TMP0;
4669 } else {
4670 opreg = rm;
4671 }
4672
4673 switch(b) {
4674 default:
4675 case 0x80:
4676 case 0x81:
4677 case 0x82:
4678 val = insn_get(s, ot);
4679 break;
4680 case 0x83:
4681 val = (int8_t)insn_get(s, OT_BYTE);
4682 break;
4683 }
4684 gen_op_movl_T1_im(val);
4685 gen_op(s, op, ot, opreg);
4686 }
4687 break;
4688
4689 /**************************/
4690 /* inc, dec, and other misc arith */
4691 case 0x40 ... 0x47: /* inc Gv */
4692 ot = dflag ? OT_LONG : OT_WORD;
4693 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4694 break;
4695 case 0x48 ... 0x4f: /* dec Gv */
4696 ot = dflag ? OT_LONG : OT_WORD;
4697 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4698 break;
4699 case 0xf6: /* GRP3 */
4700 case 0xf7:
4701 if ((b & 1) == 0)
4702 ot = OT_BYTE;
4703 else
4704 ot = dflag + OT_WORD;
4705
4706 modrm = ldub_code(s->pc++);
4707 mod = (modrm >> 6) & 3;
4708 rm = (modrm & 7) | REX_B(s);
4709 op = (modrm >> 3) & 7;
4710 if (mod != 3) {
4711 if (op == 0)
4712 s->rip_offset = insn_const_size(ot);
4713 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4714 gen_op_ld_T0_A0(ot + s->mem_index);
4715 } else {
4716 gen_op_mov_TN_reg(ot, 0, rm);
4717 }
4718
4719 switch(op) {
4720 case 0: /* test */
4721 val = insn_get(s, ot);
4722 gen_op_movl_T1_im(val);
4723 gen_op_testl_T0_T1_cc();
4724 s->cc_op = CC_OP_LOGICB + ot;
4725 break;
4726 case 2: /* not */
4727 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4728 if (mod != 3) {
4729 gen_op_st_T0_A0(ot + s->mem_index);
4730 } else {
4731 gen_op_mov_reg_T0(ot, rm);
4732 }
4733 break;
4734 case 3: /* neg */
4735 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4736 if (mod != 3) {
4737 gen_op_st_T0_A0(ot + s->mem_index);
4738 } else {
4739 gen_op_mov_reg_T0(ot, rm);
4740 }
4741 gen_op_update_neg_cc();
4742 s->cc_op = CC_OP_SUBB + ot;
4743 break;
4744 case 4: /* mul */
4745 switch(ot) {
4746 case OT_BYTE:
4747 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4748 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4749 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4750 /* XXX: use 32 bit mul which could be faster */
4751 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4752 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4753 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4754 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4755 s->cc_op = CC_OP_MULB;
4756 break;
4757 case OT_WORD:
4758 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4759 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4760 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4761 /* XXX: use 32 bit mul which could be faster */
4762 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4763 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4764 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4765 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4766 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4767 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4768 s->cc_op = CC_OP_MULW;
4769 break;
4770 default:
4771 case OT_LONG:
4772#ifdef TARGET_X86_64
4773 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4774 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4775 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4776 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4777 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4778 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4779 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4780 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4781 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4782#else
4783 {
4784 TCGv_i64 t0, t1;
4785 t0 = tcg_temp_new_i64();
4786 t1 = tcg_temp_new_i64();
4787 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4788 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4789 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4790 tcg_gen_mul_i64(t0, t0, t1);
4791 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4792 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4793 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4794 tcg_gen_shri_i64(t0, t0, 32);
4795 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4796 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4797 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4798 }
4799#endif
4800 s->cc_op = CC_OP_MULL;
4801 break;
4802#ifdef TARGET_X86_64
4803 case OT_QUAD:
4804 gen_helper_mulq_EAX_T0(cpu_T[0]);
4805 s->cc_op = CC_OP_MULQ;
4806 break;
4807#endif
4808 }
4809 break;
4810 case 5: /* imul */
4811 switch(ot) {
4812 case OT_BYTE:
4813 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4814 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4815 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4816 /* XXX: use 32 bit mul which could be faster */
4817 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4818 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4819 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4820 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4821 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4822 s->cc_op = CC_OP_MULB;
4823 break;
4824 case OT_WORD:
4825 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4826 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4827 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4828 /* XXX: use 32 bit mul which could be faster */
4829 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4830 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4831 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4832 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4833 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4834 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4835 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4836 s->cc_op = CC_OP_MULW;
4837 break;
4838 default:
4839 case OT_LONG:
4840#ifdef TARGET_X86_64
4841 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4842 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4843 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4844 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4845 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4846 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4847 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4848 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4849 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4850 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4851#else
4852 {
4853 TCGv_i64 t0, t1;
4854 t0 = tcg_temp_new_i64();
4855 t1 = tcg_temp_new_i64();
4856 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4857 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4858 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4859 tcg_gen_mul_i64(t0, t0, t1);
4860 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4861 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4862 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4863 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4864 tcg_gen_shri_i64(t0, t0, 32);
4865 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4866 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4867 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4868 }
4869#endif
4870 s->cc_op = CC_OP_MULL;
4871 break;
4872#ifdef TARGET_X86_64
4873 case OT_QUAD:
4874 gen_helper_imulq_EAX_T0(cpu_T[0]);
4875 s->cc_op = CC_OP_MULQ;
4876 break;
4877#endif
4878 }
4879 break;
4880 case 6: /* div */
4881 switch(ot) {
4882 case OT_BYTE:
4883 gen_jmp_im(pc_start - s->cs_base);
4884 gen_helper_divb_AL(cpu_T[0]);
4885 break;
4886 case OT_WORD:
4887 gen_jmp_im(pc_start - s->cs_base);
4888 gen_helper_divw_AX(cpu_T[0]);
4889 break;
4890 default:
4891 case OT_LONG:
4892 gen_jmp_im(pc_start - s->cs_base);
4893 gen_helper_divl_EAX(cpu_T[0]);
4894 break;
4895#ifdef TARGET_X86_64
4896 case OT_QUAD:
4897 gen_jmp_im(pc_start - s->cs_base);
4898 gen_helper_divq_EAX(cpu_T[0]);
4899 break;
4900#endif
4901 }
4902 break;
4903 case 7: /* idiv */
4904 switch(ot) {
4905 case OT_BYTE:
4906 gen_jmp_im(pc_start - s->cs_base);
4907 gen_helper_idivb_AL(cpu_T[0]);
4908 break;
4909 case OT_WORD:
4910 gen_jmp_im(pc_start - s->cs_base);
4911 gen_helper_idivw_AX(cpu_T[0]);
4912 break;
4913 default:
4914 case OT_LONG:
4915 gen_jmp_im(pc_start - s->cs_base);
4916 gen_helper_idivl_EAX(cpu_T[0]);
4917 break;
4918#ifdef TARGET_X86_64
4919 case OT_QUAD:
4920 gen_jmp_im(pc_start - s->cs_base);
4921 gen_helper_idivq_EAX(cpu_T[0]);
4922 break;
4923#endif
4924 }
4925 break;
4926 default:
4927 goto illegal_op;
4928 }
4929 break;
4930
4931 case 0xfe: /* GRP4 */
4932 case 0xff: /* GRP5 */
4933 if ((b & 1) == 0)
4934 ot = OT_BYTE;
4935 else
4936 ot = dflag + OT_WORD;
4937
4938 modrm = ldub_code(s->pc++);
4939 mod = (modrm >> 6) & 3;
4940 rm = (modrm & 7) | REX_B(s);
4941 op = (modrm >> 3) & 7;
4942 if (op >= 2 && b == 0xfe) {
4943 goto illegal_op;
4944 }
4945 if (CODE64(s)) {
4946 if (op == 2 || op == 4) {
4947 /* operand size for jumps is 64 bit */
4948 ot = OT_QUAD;
4949 } else if (op == 3 || op == 5) {
4950 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4951 } else if (op == 6) {
4952 /* default push size is 64 bit */
4953 ot = dflag ? OT_QUAD : OT_WORD;
4954 }
4955 }
4956 if (mod != 3) {
4957 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4958 if (op >= 2 && op != 3 && op != 5)
4959 gen_op_ld_T0_A0(ot + s->mem_index);
4960 } else {
4961 gen_op_mov_TN_reg(ot, 0, rm);
4962 }
4963
4964 switch(op) {
4965 case 0: /* inc Ev */
4966 if (mod != 3)
4967 opreg = OR_TMP0;
4968 else
4969 opreg = rm;
4970 gen_inc(s, ot, opreg, 1);
4971 break;
4972 case 1: /* dec Ev */
4973 if (mod != 3)
4974 opreg = OR_TMP0;
4975 else
4976 opreg = rm;
4977 gen_inc(s, ot, opreg, -1);
4978 break;
4979 case 2: /* call Ev */
4980 /* XXX: optimize if memory (no 'and' is necessary) */
4981#ifdef VBOX_WITH_CALL_RECORD
4982 if (s->record_call)
4983 gen_op_record_call();
4984#endif
4985 if (s->dflag == 0)
4986 gen_op_andl_T0_ffff();
4987 next_eip = s->pc - s->cs_base;
4988 gen_movtl_T1_im(next_eip);
4989 gen_push_T1(s);
4990 gen_op_jmp_T0();
4991 gen_eob(s);
4992 break;
4993 case 3: /* lcall Ev */
4994 gen_op_ld_T1_A0(ot + s->mem_index);
4995 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4996 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4997 do_lcall:
4998 if (s->pe && !s->vm86) {
4999 if (s->cc_op != CC_OP_DYNAMIC)
5000 gen_op_set_cc_op(s->cc_op);
5001 gen_jmp_im(pc_start - s->cs_base);
5002 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5003 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
5004 tcg_const_i32(dflag),
5005 tcg_const_i32(s->pc - pc_start));
5006 } else {
5007 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5008 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
5009 tcg_const_i32(dflag),
5010 tcg_const_i32(s->pc - s->cs_base));
5011 }
5012 gen_eob(s);
5013 break;
5014 case 4: /* jmp Ev */
5015 if (s->dflag == 0)
5016 gen_op_andl_T0_ffff();
5017 gen_op_jmp_T0();
5018 gen_eob(s);
5019 break;
5020 case 5: /* ljmp Ev */
5021 gen_op_ld_T1_A0(ot + s->mem_index);
5022 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5023 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5024 do_ljmp:
5025 if (s->pe && !s->vm86) {
5026 if (s->cc_op != CC_OP_DYNAMIC)
5027 gen_op_set_cc_op(s->cc_op);
5028 gen_jmp_im(pc_start - s->cs_base);
5029 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5030 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
5031 tcg_const_i32(s->pc - pc_start));
5032 } else {
5033 gen_op_movl_seg_T0_vm(R_CS);
5034 gen_op_movl_T0_T1();
5035 gen_op_jmp_T0();
5036 }
5037 gen_eob(s);
5038 break;
5039 case 6: /* push Ev */
5040 gen_push_T0(s);
5041 break;
5042 default:
5043 goto illegal_op;
5044 }
5045 break;
5046
5047 case 0x84: /* test Ev, Gv */
5048 case 0x85:
5049 if ((b & 1) == 0)
5050 ot = OT_BYTE;
5051 else
5052 ot = dflag + OT_WORD;
5053
5054 modrm = ldub_code(s->pc++);
5055 reg = ((modrm >> 3) & 7) | rex_r;
5056
5057 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5058 gen_op_mov_TN_reg(ot, 1, reg);
5059 gen_op_testl_T0_T1_cc();
5060 s->cc_op = CC_OP_LOGICB + ot;
5061 break;
5062
5063 case 0xa8: /* test eAX, Iv */
5064 case 0xa9:
5065 if ((b & 1) == 0)
5066 ot = OT_BYTE;
5067 else
5068 ot = dflag + OT_WORD;
5069 val = insn_get(s, ot);
5070
5071 gen_op_mov_TN_reg(ot, 0, OR_EAX);
5072 gen_op_movl_T1_im(val);
5073 gen_op_testl_T0_T1_cc();
5074 s->cc_op = CC_OP_LOGICB + ot;
5075 break;
5076
5077 case 0x98: /* CWDE/CBW */
5078#ifdef TARGET_X86_64
5079 if (dflag == 2) {
5080 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5081 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5082 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
5083 } else
5084#endif
5085 if (dflag == 1) {
5086 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5087 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5088 gen_op_mov_reg_T0(OT_LONG, R_EAX);
5089 } else {
5090 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
5091 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5092 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5093 }
5094 break;
5095 case 0x99: /* CDQ/CWD */
5096#ifdef TARGET_X86_64
5097 if (dflag == 2) {
5098 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5099 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5100 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
5101 } else
5102#endif
5103 if (dflag == 1) {
5104 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5105 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5106 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5107 gen_op_mov_reg_T0(OT_LONG, R_EDX);
5108 } else {
5109 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5110 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5111 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5112 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5113 }
5114 break;
5115 case 0x1af: /* imul Gv, Ev */
5116 case 0x69: /* imul Gv, Ev, I */
5117 case 0x6b:
5118 ot = dflag + OT_WORD;
5119 modrm = ldub_code(s->pc++);
5120 reg = ((modrm >> 3) & 7) | rex_r;
5121 if (b == 0x69)
5122 s->rip_offset = insn_const_size(ot);
5123 else if (b == 0x6b)
5124 s->rip_offset = 1;
5125 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5126 if (b == 0x69) {
5127 val = insn_get(s, ot);
5128 gen_op_movl_T1_im(val);
5129 } else if (b == 0x6b) {
5130 val = (int8_t)insn_get(s, OT_BYTE);
5131 gen_op_movl_T1_im(val);
5132 } else {
5133 gen_op_mov_TN_reg(ot, 1, reg);
5134 }
5135
5136#ifdef TARGET_X86_64
5137 if (ot == OT_QUAD) {
5138 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
5139 } else
5140#endif
5141 if (ot == OT_LONG) {
5142#ifdef TARGET_X86_64
5143 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5144 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
5145 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5146 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5147 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
5148 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5149#else
5150 {
5151 TCGv_i64 t0, t1;
5152 t0 = tcg_temp_new_i64();
5153 t1 = tcg_temp_new_i64();
5154 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
5155 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
5156 tcg_gen_mul_i64(t0, t0, t1);
5157 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
5158 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5159 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
5160 tcg_gen_shri_i64(t0, t0, 32);
5161 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
5162 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
5163 }
5164#endif
5165 } else {
5166 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5167 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5168 /* XXX: use 32 bit mul which could be faster */
5169 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5170 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5171 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5172 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5173 }
5174 gen_op_mov_reg_T0(ot, reg);
5175 s->cc_op = CC_OP_MULB + ot;
5176 break;
5177 case 0x1c0:
5178 case 0x1c1: /* xadd Ev, Gv */
5179 if ((b & 1) == 0)
5180 ot = OT_BYTE;
5181 else
5182 ot = dflag + OT_WORD;
5183 modrm = ldub_code(s->pc++);
5184 reg = ((modrm >> 3) & 7) | rex_r;
5185 mod = (modrm >> 6) & 3;
5186 if (mod == 3) {
5187 rm = (modrm & 7) | REX_B(s);
5188 gen_op_mov_TN_reg(ot, 0, reg);
5189 gen_op_mov_TN_reg(ot, 1, rm);
5190 gen_op_addl_T0_T1();
5191 gen_op_mov_reg_T1(ot, reg);
5192 gen_op_mov_reg_T0(ot, rm);
5193 } else {
5194 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5195 gen_op_mov_TN_reg(ot, 0, reg);
5196 gen_op_ld_T1_A0(ot + s->mem_index);
5197 gen_op_addl_T0_T1();
5198 gen_op_st_T0_A0(ot + s->mem_index);
5199 gen_op_mov_reg_T1(ot, reg);
5200 }
5201 gen_op_update2_cc();
5202 s->cc_op = CC_OP_ADDB + ot;
5203 break;
5204 case 0x1b0:
5205 case 0x1b1: /* cmpxchg Ev, Gv */
5206 {
5207 int label1, label2;
5208 TCGv t0, t1, t2, a0;
5209
5210 if ((b & 1) == 0)
5211 ot = OT_BYTE;
5212 else
5213 ot = dflag + OT_WORD;
5214 modrm = ldub_code(s->pc++);
5215 reg = ((modrm >> 3) & 7) | rex_r;
5216 mod = (modrm >> 6) & 3;
5217 t0 = tcg_temp_local_new();
5218 t1 = tcg_temp_local_new();
5219 t2 = tcg_temp_local_new();
5220 a0 = tcg_temp_local_new();
5221 gen_op_mov_v_reg(ot, t1, reg);
5222 if (mod == 3) {
5223 rm = (modrm & 7) | REX_B(s);
5224 gen_op_mov_v_reg(ot, t0, rm);
5225 } else {
5226 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5227 tcg_gen_mov_tl(a0, cpu_A0);
5228 gen_op_ld_v(ot + s->mem_index, t0, a0);
5229 rm = 0; /* avoid warning */
5230 }
5231 label1 = gen_new_label();
5232 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
5233 gen_extu(ot, t2);
5234 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
5235 if (mod == 3) {
5236 label2 = gen_new_label();
5237 gen_op_mov_reg_v(ot, R_EAX, t0);
5238 tcg_gen_br(label2);
5239 gen_set_label(label1);
5240 gen_op_mov_reg_v(ot, rm, t1);
5241 gen_set_label(label2);
5242 } else {
5243 tcg_gen_mov_tl(t1, t0);
5244 gen_op_mov_reg_v(ot, R_EAX, t0);
5245 gen_set_label(label1);
5246 /* always store */
5247 gen_op_st_v(ot + s->mem_index, t1, a0);
5248 }
5249 tcg_gen_mov_tl(cpu_cc_src, t0);
5250 tcg_gen_mov_tl(cpu_cc_dst, t2);
5251 s->cc_op = CC_OP_SUBB + ot;
5252 tcg_temp_free(t0);
5253 tcg_temp_free(t1);
5254 tcg_temp_free(t2);
5255 tcg_temp_free(a0);
5256 }
5257 break;
5258 case 0x1c7: /* cmpxchg8b */
5259 modrm = ldub_code(s->pc++);
5260 mod = (modrm >> 6) & 3;
5261 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5262 goto illegal_op;
5263#ifdef TARGET_X86_64
5264 if (dflag == 2) {
5265 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5266 goto illegal_op;
5267 gen_jmp_im(pc_start - s->cs_base);
5268 if (s->cc_op != CC_OP_DYNAMIC)
5269 gen_op_set_cc_op(s->cc_op);
5270 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5271 gen_helper_cmpxchg16b(cpu_A0);
5272 } else
5273#endif
5274 {
5275 if (!(s->cpuid_features & CPUID_CX8))
5276 goto illegal_op;
5277 gen_jmp_im(pc_start - s->cs_base);
5278 if (s->cc_op != CC_OP_DYNAMIC)
5279 gen_op_set_cc_op(s->cc_op);
5280 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5281 gen_helper_cmpxchg8b(cpu_A0);
5282 }
5283 s->cc_op = CC_OP_EFLAGS;
5284 break;
5285
5286 /**************************/
5287 /* push/pop */
5288 case 0x50 ... 0x57: /* push */
5289 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5290 gen_push_T0(s);
5291 break;
5292 case 0x58 ... 0x5f: /* pop */
5293 if (CODE64(s)) {
5294 ot = dflag ? OT_QUAD : OT_WORD;
5295 } else {
5296 ot = dflag + OT_WORD;
5297 }
5298 gen_pop_T0(s);
5299 /* NOTE: order is important for pop %sp */
5300 gen_pop_update(s);
5301 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5302 break;
5303 case 0x60: /* pusha */
5304 if (CODE64(s))
5305 goto illegal_op;
5306 gen_pusha(s);
5307 break;
5308 case 0x61: /* popa */
5309 if (CODE64(s))
5310 goto illegal_op;
5311 gen_popa(s);
5312 break;
5313 case 0x68: /* push Iv */
5314 case 0x6a:
5315 if (CODE64(s)) {
5316 ot = dflag ? OT_QUAD : OT_WORD;
5317 } else {
5318 ot = dflag + OT_WORD;
5319 }
5320 if (b == 0x68)
5321 val = insn_get(s, ot);
5322 else
5323 val = (int8_t)insn_get(s, OT_BYTE);
5324 gen_op_movl_T0_im(val);
5325 gen_push_T0(s);
5326 break;
5327 case 0x8f: /* pop Ev */
5328 if (CODE64(s)) {
5329 ot = dflag ? OT_QUAD : OT_WORD;
5330 } else {
5331 ot = dflag + OT_WORD;
5332 }
5333 modrm = ldub_code(s->pc++);
5334 mod = (modrm >> 6) & 3;
5335 gen_pop_T0(s);
5336 if (mod == 3) {
5337 /* NOTE: order is important for pop %sp */
5338 gen_pop_update(s);
5339 rm = (modrm & 7) | REX_B(s);
5340 gen_op_mov_reg_T0(ot, rm);
5341 } else {
5342 /* NOTE: order is important too for MMU exceptions */
5343 s->popl_esp_hack = 1 << ot;
5344 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5345 s->popl_esp_hack = 0;
5346 gen_pop_update(s);
5347 }
5348 break;
5349 case 0xc8: /* enter */
5350 {
5351 int level;
5352 val = lduw_code(s->pc);
5353 s->pc += 2;
5354 level = ldub_code(s->pc++);
5355 gen_enter(s, val, level);
5356 }
5357 break;
5358 case 0xc9: /* leave */
5359 /* XXX: exception not precise (ESP is updated before potential exception) */
5360 if (CODE64(s)) {
5361 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5362 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5363 } else if (s->ss32) {
5364 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5365 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5366 } else {
5367 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5368 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5369 }
5370 gen_pop_T0(s);
5371 if (CODE64(s)) {
5372 ot = dflag ? OT_QUAD : OT_WORD;
5373 } else {
5374 ot = dflag + OT_WORD;
5375 }
5376 gen_op_mov_reg_T0(ot, R_EBP);
5377 gen_pop_update(s);
5378 break;
5379 case 0x06: /* push es */
5380 case 0x0e: /* push cs */
5381 case 0x16: /* push ss */
5382 case 0x1e: /* push ds */
5383 if (CODE64(s))
5384 goto illegal_op;
5385 gen_op_movl_T0_seg(b >> 3);
5386 gen_push_T0(s);
5387 break;
5388 case 0x1a0: /* push fs */
5389 case 0x1a8: /* push gs */
5390 gen_op_movl_T0_seg((b >> 3) & 7);
5391 gen_push_T0(s);
5392 break;
5393 case 0x07: /* pop es */
5394 case 0x17: /* pop ss */
5395 case 0x1f: /* pop ds */
5396 if (CODE64(s))
5397 goto illegal_op;
5398 reg = b >> 3;
5399 gen_pop_T0(s);
5400 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5401 gen_pop_update(s);
5402 if (reg == R_SS) {
5403 /* if reg == SS, inhibit interrupts/trace. */
5404 /* If several instructions disable interrupts, only the
5405 _first_ does it */
5406 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5407 gen_helper_set_inhibit_irq();
5408 s->tf = 0;
5409 }
5410 if (s->is_jmp) {
5411 gen_jmp_im(s->pc - s->cs_base);
5412 gen_eob(s);
5413 }
5414 break;
5415 case 0x1a1: /* pop fs */
5416 case 0x1a9: /* pop gs */
5417 gen_pop_T0(s);
5418 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5419 gen_pop_update(s);
5420 if (s->is_jmp) {
5421 gen_jmp_im(s->pc - s->cs_base);
5422 gen_eob(s);
5423 }
5424 break;
5425
5426 /**************************/
5427 /* mov */
5428 case 0x88:
5429 case 0x89: /* mov Gv, Ev */
5430 if ((b & 1) == 0)
5431 ot = OT_BYTE;
5432 else
5433 ot = dflag + OT_WORD;
5434 modrm = ldub_code(s->pc++);
5435 reg = ((modrm >> 3) & 7) | rex_r;
5436
5437 /* generate a generic store */
5438 gen_ldst_modrm(s, modrm, ot, reg, 1);
5439 break;
5440 case 0xc6:
5441 case 0xc7: /* mov Ev, Iv */
5442 if ((b & 1) == 0)
5443 ot = OT_BYTE;
5444 else
5445 ot = dflag + OT_WORD;
5446 modrm = ldub_code(s->pc++);
5447 mod = (modrm >> 6) & 3;
5448 if (mod != 3) {
5449 s->rip_offset = insn_const_size(ot);
5450 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5451 }
5452 val = insn_get(s, ot);
5453 gen_op_movl_T0_im(val);
5454 if (mod != 3)
5455 gen_op_st_T0_A0(ot + s->mem_index);
5456 else
5457 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5458 break;
5459 case 0x8a:
5460 case 0x8b: /* mov Ev, Gv */
5461#ifdef VBOX /* dtrace hot fix */
5462 if (prefixes & PREFIX_LOCK)
5463 goto illegal_op;
5464#endif
5465 if ((b & 1) == 0)
5466 ot = OT_BYTE;
5467 else
5468 ot = OT_WORD + dflag;
5469 modrm = ldub_code(s->pc++);
5470 reg = ((modrm >> 3) & 7) | rex_r;
5471
5472 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5473 gen_op_mov_reg_T0(ot, reg);
5474 break;
5475 case 0x8e: /* mov seg, Gv */
5476 modrm = ldub_code(s->pc++);
5477 reg = (modrm >> 3) & 7;
5478 if (reg >= 6 || reg == R_CS)
5479 goto illegal_op;
5480 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5481 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5482 if (reg == R_SS) {
5483 /* if reg == SS, inhibit interrupts/trace */
5484 /* If several instructions disable interrupts, only the
5485 _first_ does it */
5486 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5487 gen_helper_set_inhibit_irq();
5488 s->tf = 0;
5489 }
5490 if (s->is_jmp) {
5491 gen_jmp_im(s->pc - s->cs_base);
5492 gen_eob(s);
5493 }
5494 break;
5495 case 0x8c: /* mov Gv, seg */
5496 modrm = ldub_code(s->pc++);
5497 reg = (modrm >> 3) & 7;
5498 mod = (modrm >> 6) & 3;
5499 if (reg >= 6)
5500 goto illegal_op;
5501 gen_op_movl_T0_seg(reg);
5502 if (mod == 3)
5503 ot = OT_WORD + dflag;
5504 else
5505 ot = OT_WORD;
5506 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5507 break;
5508
5509 case 0x1b6: /* movzbS Gv, Eb */
5510 case 0x1b7: /* movzwS Gv, Eb */
5511 case 0x1be: /* movsbS Gv, Eb */
5512 case 0x1bf: /* movswS Gv, Eb */
5513 {
5514 int d_ot;
5515 /* d_ot is the size of destination */
5516 d_ot = dflag + OT_WORD;
5517 /* ot is the size of source */
5518 ot = (b & 1) + OT_BYTE;
5519 modrm = ldub_code(s->pc++);
5520 reg = ((modrm >> 3) & 7) | rex_r;
5521 mod = (modrm >> 6) & 3;
5522 rm = (modrm & 7) | REX_B(s);
5523
5524 if (mod == 3) {
5525 gen_op_mov_TN_reg(ot, 0, rm);
5526 switch(ot | (b & 8)) {
5527 case OT_BYTE:
5528 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5529 break;
5530 case OT_BYTE | 8:
5531 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5532 break;
5533 case OT_WORD:
5534 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5535 break;
5536 default:
5537 case OT_WORD | 8:
5538 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5539 break;
5540 }
5541 gen_op_mov_reg_T0(d_ot, reg);
5542 } else {
5543 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5544 if (b & 8) {
5545 gen_op_lds_T0_A0(ot + s->mem_index);
5546 } else {
5547 gen_op_ldu_T0_A0(ot + s->mem_index);
5548 }
5549 gen_op_mov_reg_T0(d_ot, reg);
5550 }
5551 }
5552 break;
5553
5554 case 0x8d: /* lea */
5555 ot = dflag + OT_WORD;
5556 modrm = ldub_code(s->pc++);
5557 mod = (modrm >> 6) & 3;
5558 if (mod == 3)
5559 goto illegal_op;
5560 reg = ((modrm >> 3) & 7) | rex_r;
5561 /* we must ensure that no segment is added */
5562 s->override = -1;
5563 val = s->addseg;
5564 s->addseg = 0;
5565 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5566 s->addseg = val;
5567 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5568 break;
5569
5570 case 0xa0: /* mov EAX, Ov */
5571 case 0xa1:
5572 case 0xa2: /* mov Ov, EAX */
5573 case 0xa3:
5574 {
5575 target_ulong offset_addr;
5576
5577 if ((b & 1) == 0)
5578 ot = OT_BYTE;
5579 else
5580 ot = dflag + OT_WORD;
5581#ifdef TARGET_X86_64
5582 if (s->aflag == 2) {
5583 offset_addr = ldq_code(s->pc);
5584 s->pc += 8;
5585 gen_op_movq_A0_im(offset_addr);
5586 } else
5587#endif
5588 {
5589 if (s->aflag) {
5590 offset_addr = insn_get(s, OT_LONG);
5591 } else {
5592 offset_addr = insn_get(s, OT_WORD);
5593 }
5594 gen_op_movl_A0_im(offset_addr);
5595 }
5596 gen_add_A0_ds_seg(s);
5597 if ((b & 2) == 0) {
5598 gen_op_ld_T0_A0(ot + s->mem_index);
5599 gen_op_mov_reg_T0(ot, R_EAX);
5600 } else {
5601 gen_op_mov_TN_reg(ot, 0, R_EAX);
5602 gen_op_st_T0_A0(ot + s->mem_index);
5603 }
5604 }
5605 break;
5606 case 0xd7: /* xlat */
5607#ifdef TARGET_X86_64
5608 if (s->aflag == 2) {
5609 gen_op_movq_A0_reg(R_EBX);
5610 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5611 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5612 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5613 } else
5614#endif
5615 {
5616 gen_op_movl_A0_reg(R_EBX);
5617 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5618 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5619 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5620 if (s->aflag == 0)
5621 gen_op_andl_A0_ffff();
5622 else
5623 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5624 }
5625 gen_add_A0_ds_seg(s);
5626 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5627 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5628 break;
5629 case 0xb0 ... 0xb7: /* mov R, Ib */
5630 val = insn_get(s, OT_BYTE);
5631 gen_op_movl_T0_im(val);
5632 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5633 break;
5634 case 0xb8 ... 0xbf: /* mov R, Iv */
5635#ifdef TARGET_X86_64
5636 if (dflag == 2) {
5637 uint64_t tmp;
5638 /* 64 bit case */
5639 tmp = ldq_code(s->pc);
5640 s->pc += 8;
5641 reg = (b & 7) | REX_B(s);
5642 gen_movtl_T0_im(tmp);
5643 gen_op_mov_reg_T0(OT_QUAD, reg);
5644 } else
5645#endif
5646 {
5647 ot = dflag ? OT_LONG : OT_WORD;
5648 val = insn_get(s, ot);
5649 reg = (b & 7) | REX_B(s);
5650 gen_op_movl_T0_im(val);
5651 gen_op_mov_reg_T0(ot, reg);
5652 }
5653 break;
5654
5655 case 0x91 ... 0x97: /* xchg R, EAX */
5656 do_xchg_reg_eax:
5657 ot = dflag + OT_WORD;
5658 reg = (b & 7) | REX_B(s);
5659 rm = R_EAX;
5660 goto do_xchg_reg;
5661 case 0x86:
5662 case 0x87: /* xchg Ev, Gv */
5663 if ((b & 1) == 0)
5664 ot = OT_BYTE;
5665 else
5666 ot = dflag + OT_WORD;
5667 modrm = ldub_code(s->pc++);
5668 reg = ((modrm >> 3) & 7) | rex_r;
5669 mod = (modrm >> 6) & 3;
5670 if (mod == 3) {
5671 rm = (modrm & 7) | REX_B(s);
5672 do_xchg_reg:
5673 gen_op_mov_TN_reg(ot, 0, reg);
5674 gen_op_mov_TN_reg(ot, 1, rm);
5675 gen_op_mov_reg_T0(ot, rm);
5676 gen_op_mov_reg_T1(ot, reg);
5677 } else {
5678 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5679 gen_op_mov_TN_reg(ot, 0, reg);
5680 /* for xchg, lock is implicit */
5681 if (!(prefixes & PREFIX_LOCK))
5682 gen_helper_lock();
5683 gen_op_ld_T1_A0(ot + s->mem_index);
5684 gen_op_st_T0_A0(ot + s->mem_index);
5685 if (!(prefixes & PREFIX_LOCK))
5686 gen_helper_unlock();
5687 gen_op_mov_reg_T1(ot, reg);
5688 }
5689 break;
5690 case 0xc4: /* les Gv */
5691 if (CODE64(s))
5692 goto illegal_op;
5693 op = R_ES;
5694 goto do_lxx;
5695 case 0xc5: /* lds Gv */
5696 if (CODE64(s))
5697 goto illegal_op;
5698 op = R_DS;
5699 goto do_lxx;
5700 case 0x1b2: /* lss Gv */
5701 op = R_SS;
5702 goto do_lxx;
5703 case 0x1b4: /* lfs Gv */
5704 op = R_FS;
5705 goto do_lxx;
5706 case 0x1b5: /* lgs Gv */
5707 op = R_GS;
5708 do_lxx:
5709 ot = dflag ? OT_LONG : OT_WORD;
5710 modrm = ldub_code(s->pc++);
5711 reg = ((modrm >> 3) & 7) | rex_r;
5712 mod = (modrm >> 6) & 3;
5713 if (mod == 3)
5714 goto illegal_op;
5715 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5716 gen_op_ld_T1_A0(ot + s->mem_index);
5717 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5718 /* load the segment first to handle exceptions properly */
5719 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5720 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5721 /* then put the data */
5722 gen_op_mov_reg_T1(ot, reg);
5723 if (s->is_jmp) {
5724 gen_jmp_im(s->pc - s->cs_base);
5725 gen_eob(s);
5726 }
5727 break;
5728
5729 /************************/
5730 /* shifts */
5731 case 0xc0:
5732 case 0xc1:
5733 /* shift Ev,Ib */
5734 shift = 2;
5735 grp2:
5736 {
5737 if ((b & 1) == 0)
5738 ot = OT_BYTE;
5739 else
5740 ot = dflag + OT_WORD;
5741
5742 modrm = ldub_code(s->pc++);
5743 mod = (modrm >> 6) & 3;
5744 op = (modrm >> 3) & 7;
5745
5746 if (mod != 3) {
5747 if (shift == 2) {
5748 s->rip_offset = 1;
5749 }
5750 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5751 opreg = OR_TMP0;
5752 } else {
5753 opreg = (modrm & 7) | REX_B(s);
5754 }
5755
5756 /* simpler op */
5757 if (shift == 0) {
5758 gen_shift(s, op, ot, opreg, OR_ECX);
5759 } else {
5760 if (shift == 2) {
5761 shift = ldub_code(s->pc++);
5762 }
5763 gen_shifti(s, op, ot, opreg, shift);
5764 }
5765 }
5766 break;
5767 case 0xd0:
5768 case 0xd1:
5769 /* shift Ev,1 */
5770 shift = 1;
5771 goto grp2;
5772 case 0xd2:
5773 case 0xd3:
5774 /* shift Ev,cl */
5775 shift = 0;
5776 goto grp2;
5777
5778 case 0x1a4: /* shld imm */
5779 op = 0;
5780 shift = 1;
5781 goto do_shiftd;
5782 case 0x1a5: /* shld cl */
5783 op = 0;
5784 shift = 0;
5785 goto do_shiftd;
5786 case 0x1ac: /* shrd imm */
5787 op = 1;
5788 shift = 1;
5789 goto do_shiftd;
5790 case 0x1ad: /* shrd cl */
5791 op = 1;
5792 shift = 0;
5793 do_shiftd:
5794 ot = dflag + OT_WORD;
5795 modrm = ldub_code(s->pc++);
5796 mod = (modrm >> 6) & 3;
5797 rm = (modrm & 7) | REX_B(s);
5798 reg = ((modrm >> 3) & 7) | rex_r;
5799 if (mod != 3) {
5800 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5801 opreg = OR_TMP0;
5802 } else {
5803 opreg = rm;
5804 }
5805 gen_op_mov_TN_reg(ot, 1, reg);
5806
5807 if (shift) {
5808 val = ldub_code(s->pc++);
5809 tcg_gen_movi_tl(cpu_T3, val);
5810 } else {
5811 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
5812 }
5813 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5814 break;
5815
5816 /************************/
5817 /* floats */
5818 case 0xd8 ... 0xdf:
5819 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5820 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5821 /* XXX: what to do if illegal op ? */
5822 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5823 break;
5824 }
5825 modrm = ldub_code(s->pc++);
5826 mod = (modrm >> 6) & 3;
5827 rm = modrm & 7;
5828 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5829 if (mod != 3) {
5830 /* memory op */
5831 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5832 switch(op) {
5833 case 0x00 ... 0x07: /* fxxxs */
5834 case 0x10 ... 0x17: /* fixxxl */
5835 case 0x20 ... 0x27: /* fxxxl */
5836 case 0x30 ... 0x37: /* fixxx */
5837 {
5838 int op1;
5839 op1 = op & 7;
5840
5841 switch(op >> 4) {
5842 case 0:
5843 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5844 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5845 gen_helper_flds_FT0(cpu_tmp2_i32);
5846 break;
5847 case 1:
5848 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5849 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5850 gen_helper_fildl_FT0(cpu_tmp2_i32);
5851 break;
5852 case 2:
5853 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5854 (s->mem_index >> 2) - 1);
5855 gen_helper_fldl_FT0(cpu_tmp1_i64);
5856 break;
5857 case 3:
5858 default:
5859 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5861 gen_helper_fildl_FT0(cpu_tmp2_i32);
5862 break;
5863 }
5864
5865 gen_helper_fp_arith_ST0_FT0(op1);
5866 if (op1 == 3) {
5867 /* fcomp needs pop */
5868 gen_helper_fpop();
5869 }
5870 }
5871 break;
5872 case 0x08: /* flds */
5873 case 0x0a: /* fsts */
5874 case 0x0b: /* fstps */
5875 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5876 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5877 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5878 switch(op & 7) {
5879 case 0:
5880 switch(op >> 4) {
5881 case 0:
5882 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5883 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5884 gen_helper_flds_ST0(cpu_tmp2_i32);
5885 break;
5886 case 1:
5887 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5888 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5889 gen_helper_fildl_ST0(cpu_tmp2_i32);
5890 break;
5891 case 2:
5892 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5893 (s->mem_index >> 2) - 1);
5894 gen_helper_fldl_ST0(cpu_tmp1_i64);
5895 break;
5896 case 3:
5897 default:
5898 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5899 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5900 gen_helper_fildl_ST0(cpu_tmp2_i32);
5901 break;
5902 }
5903 break;
5904 case 1:
5905 /* XXX: the corresponding CPUID bit must be tested ! */
5906 switch(op >> 4) {
5907 case 1:
5908 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5909 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5910 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5911 break;
5912 case 2:
5913 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5914 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5915 (s->mem_index >> 2) - 1);
5916 break;
5917 case 3:
5918 default:
5919 gen_helper_fistt_ST0(cpu_tmp2_i32);
5920 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5921 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5922 break;
5923 }
5924 gen_helper_fpop();
5925 break;
5926 default:
5927 switch(op >> 4) {
5928 case 0:
5929 gen_helper_fsts_ST0(cpu_tmp2_i32);
5930 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5931 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5932 break;
5933 case 1:
5934 gen_helper_fistl_ST0(cpu_tmp2_i32);
5935 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5936 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5937 break;
5938 case 2:
5939 gen_helper_fstl_ST0(cpu_tmp1_i64);
5940 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5941 (s->mem_index >> 2) - 1);
5942 break;
5943 case 3:
5944 default:
5945 gen_helper_fist_ST0(cpu_tmp2_i32);
5946 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5947 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5948 break;
5949 }
5950 if ((op & 7) == 3)
5951 gen_helper_fpop();
5952 break;
5953 }
5954 break;
5955 case 0x0c: /* fldenv mem */
5956 if (s->cc_op != CC_OP_DYNAMIC)
5957 gen_op_set_cc_op(s->cc_op);
5958 gen_jmp_im(pc_start - s->cs_base);
5959 gen_helper_fldenv(
5960 cpu_A0, tcg_const_i32(s->dflag));
5961 break;
5962 case 0x0d: /* fldcw mem */
5963 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5964 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5965 gen_helper_fldcw(cpu_tmp2_i32);
5966 break;
5967 case 0x0e: /* fnstenv mem */
5968 if (s->cc_op != CC_OP_DYNAMIC)
5969 gen_op_set_cc_op(s->cc_op);
5970 gen_jmp_im(pc_start - s->cs_base);
5971 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5972 break;
5973 case 0x0f: /* fnstcw mem */
5974 gen_helper_fnstcw(cpu_tmp2_i32);
5975 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5976 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5977 break;
5978 case 0x1d: /* fldt mem */
5979 if (s->cc_op != CC_OP_DYNAMIC)
5980 gen_op_set_cc_op(s->cc_op);
5981 gen_jmp_im(pc_start - s->cs_base);
5982 gen_helper_fldt_ST0(cpu_A0);
5983 break;
5984 case 0x1f: /* fstpt mem */
5985 if (s->cc_op != CC_OP_DYNAMIC)
5986 gen_op_set_cc_op(s->cc_op);
5987 gen_jmp_im(pc_start - s->cs_base);
5988 gen_helper_fstt_ST0(cpu_A0);
5989 gen_helper_fpop();
5990 break;
5991 case 0x2c: /* frstor mem */
5992 if (s->cc_op != CC_OP_DYNAMIC)
5993 gen_op_set_cc_op(s->cc_op);
5994 gen_jmp_im(pc_start - s->cs_base);
5995 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5996 break;
5997 case 0x2e: /* fnsave mem */
5998 if (s->cc_op != CC_OP_DYNAMIC)
5999 gen_op_set_cc_op(s->cc_op);
6000 gen_jmp_im(pc_start - s->cs_base);
6001 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
6002 break;
6003 case 0x2f: /* fnstsw mem */
6004 gen_helper_fnstsw(cpu_tmp2_i32);
6005 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6006 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6007 break;
6008 case 0x3c: /* fbld */
6009 if (s->cc_op != CC_OP_DYNAMIC)
6010 gen_op_set_cc_op(s->cc_op);
6011 gen_jmp_im(pc_start - s->cs_base);
6012 gen_helper_fbld_ST0(cpu_A0);
6013 break;
6014 case 0x3e: /* fbstp */
6015 if (s->cc_op != CC_OP_DYNAMIC)
6016 gen_op_set_cc_op(s->cc_op);
6017 gen_jmp_im(pc_start - s->cs_base);
6018 gen_helper_fbst_ST0(cpu_A0);
6019 gen_helper_fpop();
6020 break;
6021 case 0x3d: /* fildll */
6022 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
6023 (s->mem_index >> 2) - 1);
6024 gen_helper_fildll_ST0(cpu_tmp1_i64);
6025 break;
6026 case 0x3f: /* fistpll */
6027 gen_helper_fistll_ST0(cpu_tmp1_i64);
6028 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
6029 (s->mem_index >> 2) - 1);
6030 gen_helper_fpop();
6031 break;
6032 default:
6033 goto illegal_op;
6034 }
6035 } else {
6036 /* register float ops */
6037 opreg = rm;
6038
6039 switch(op) {
6040 case 0x08: /* fld sti */
6041 gen_helper_fpush();
6042 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
6043 break;
6044 case 0x09: /* fxchg sti */
6045 case 0x29: /* fxchg4 sti, undocumented op */
6046 case 0x39: /* fxchg7 sti, undocumented op */
6047 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
6048 break;
6049 case 0x0a: /* grp d9/2 */
6050 switch(rm) {
6051 case 0: /* fnop */
6052 /* check exceptions (FreeBSD FPU probe) */
6053 if (s->cc_op != CC_OP_DYNAMIC)
6054 gen_op_set_cc_op(s->cc_op);
6055 gen_jmp_im(pc_start - s->cs_base);
6056 gen_helper_fwait();
6057 break;
6058 default:
6059 goto illegal_op;
6060 }
6061 break;
6062 case 0x0c: /* grp d9/4 */
6063 switch(rm) {
6064 case 0: /* fchs */
6065 gen_helper_fchs_ST0();
6066 break;
6067 case 1: /* fabs */
6068 gen_helper_fabs_ST0();
6069 break;
6070 case 4: /* ftst */
6071 gen_helper_fldz_FT0();
6072 gen_helper_fcom_ST0_FT0();
6073 break;
6074 case 5: /* fxam */
6075 gen_helper_fxam_ST0();
6076 break;
6077 default:
6078 goto illegal_op;
6079 }
6080 break;
6081 case 0x0d: /* grp d9/5 */
6082 {
6083 switch(rm) {
6084 case 0:
6085 gen_helper_fpush();
6086 gen_helper_fld1_ST0();
6087 break;
6088 case 1:
6089 gen_helper_fpush();
6090 gen_helper_fldl2t_ST0();
6091 break;
6092 case 2:
6093 gen_helper_fpush();
6094 gen_helper_fldl2e_ST0();
6095 break;
6096 case 3:
6097 gen_helper_fpush();
6098 gen_helper_fldpi_ST0();
6099 break;
6100 case 4:
6101 gen_helper_fpush();
6102 gen_helper_fldlg2_ST0();
6103 break;
6104 case 5:
6105 gen_helper_fpush();
6106 gen_helper_fldln2_ST0();
6107 break;
6108 case 6:
6109 gen_helper_fpush();
6110 gen_helper_fldz_ST0();
6111 break;
6112 default:
6113 goto illegal_op;
6114 }
6115 }
6116 break;
6117 case 0x0e: /* grp d9/6 */
6118 switch(rm) {
6119 case 0: /* f2xm1 */
6120 gen_helper_f2xm1();
6121 break;
6122 case 1: /* fyl2x */
6123 gen_helper_fyl2x();
6124 break;
6125 case 2: /* fptan */
6126 gen_helper_fptan();
6127 break;
6128 case 3: /* fpatan */
6129 gen_helper_fpatan();
6130 break;
6131 case 4: /* fxtract */
6132 gen_helper_fxtract();
6133 break;
6134 case 5: /* fprem1 */
6135 gen_helper_fprem1();
6136 break;
6137 case 6: /* fdecstp */
6138 gen_helper_fdecstp();
6139 break;
6140 default:
6141 case 7: /* fincstp */
6142 gen_helper_fincstp();
6143 break;
6144 }
6145 break;
6146 case 0x0f: /* grp d9/7 */
6147 switch(rm) {
6148 case 0: /* fprem */
6149 gen_helper_fprem();
6150 break;
6151 case 1: /* fyl2xp1 */
6152 gen_helper_fyl2xp1();
6153 break;
6154 case 2: /* fsqrt */
6155 gen_helper_fsqrt();
6156 break;
6157 case 3: /* fsincos */
6158 gen_helper_fsincos();
6159 break;
6160 case 5: /* fscale */
6161 gen_helper_fscale();
6162 break;
6163 case 4: /* frndint */
6164 gen_helper_frndint();
6165 break;
6166 case 6: /* fsin */
6167 gen_helper_fsin();
6168 break;
6169 default:
6170 case 7: /* fcos */
6171 gen_helper_fcos();
6172 break;
6173 }
6174 break;
6175 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6176 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6177 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6178 {
6179 int op1;
6180
6181 op1 = op & 7;
6182 if (op >= 0x20) {
6183 gen_helper_fp_arith_STN_ST0(op1, opreg);
6184 if (op >= 0x30)
6185 gen_helper_fpop();
6186 } else {
6187 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6188 gen_helper_fp_arith_ST0_FT0(op1);
6189 }
6190 }
6191 break;
6192 case 0x02: /* fcom */
6193 case 0x22: /* fcom2, undocumented op */
6194 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6195 gen_helper_fcom_ST0_FT0();
6196 break;
6197 case 0x03: /* fcomp */
6198 case 0x23: /* fcomp3, undocumented op */
6199 case 0x32: /* fcomp5, undocumented op */
6200 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6201 gen_helper_fcom_ST0_FT0();
6202 gen_helper_fpop();
6203 break;
6204 case 0x15: /* da/5 */
6205 switch(rm) {
6206 case 1: /* fucompp */
6207 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
6208 gen_helper_fucom_ST0_FT0();
6209 gen_helper_fpop();
6210 gen_helper_fpop();
6211 break;
6212 default:
6213 goto illegal_op;
6214 }
6215 break;
6216 case 0x1c:
6217 switch(rm) {
6218 case 0: /* feni (287 only, just do nop here) */
6219 break;
6220 case 1: /* fdisi (287 only, just do nop here) */
6221 break;
6222 case 2: /* fclex */
6223 gen_helper_fclex();
6224 break;
6225 case 3: /* fninit */
6226 gen_helper_fninit();
6227 break;
6228 case 4: /* fsetpm (287 only, just do nop here) */
6229 break;
6230 default:
6231 goto illegal_op;
6232 }
6233 break;
6234 case 0x1d: /* fucomi */
6235 if (s->cc_op != CC_OP_DYNAMIC)
6236 gen_op_set_cc_op(s->cc_op);
6237 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6238 gen_helper_fucomi_ST0_FT0();
6239 s->cc_op = CC_OP_EFLAGS;
6240 break;
6241 case 0x1e: /* fcomi */
6242 if (s->cc_op != CC_OP_DYNAMIC)
6243 gen_op_set_cc_op(s->cc_op);
6244 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6245 gen_helper_fcomi_ST0_FT0();
6246 s->cc_op = CC_OP_EFLAGS;
6247 break;
6248 case 0x28: /* ffree sti */
6249 gen_helper_ffree_STN(tcg_const_i32(opreg));
6250 break;
6251 case 0x2a: /* fst sti */
6252 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
6253 break;
6254 case 0x2b: /* fstp sti */
6255 case 0x0b: /* fstp1 sti, undocumented op */
6256 case 0x3a: /* fstp8 sti, undocumented op */
6257 case 0x3b: /* fstp9 sti, undocumented op */
6258 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
6259 gen_helper_fpop();
6260 break;
6261 case 0x2c: /* fucom st(i) */
6262 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6263 gen_helper_fucom_ST0_FT0();
6264 break;
6265 case 0x2d: /* fucomp st(i) */
6266 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6267 gen_helper_fucom_ST0_FT0();
6268 gen_helper_fpop();
6269 break;
6270 case 0x33: /* de/3 */
6271 switch(rm) {
6272 case 1: /* fcompp */
6273 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
6274 gen_helper_fcom_ST0_FT0();
6275 gen_helper_fpop();
6276 gen_helper_fpop();
6277 break;
6278 default:
6279 goto illegal_op;
6280 }
6281 break;
6282 case 0x38: /* ffreep sti, undocumented op */
6283 gen_helper_ffree_STN(tcg_const_i32(opreg));
6284 gen_helper_fpop();
6285 break;
6286 case 0x3c: /* df/4 */
6287 switch(rm) {
6288 case 0:
6289 gen_helper_fnstsw(cpu_tmp2_i32);
6290 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6291 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6292 break;
6293 default:
6294 goto illegal_op;
6295 }
6296 break;
6297 case 0x3d: /* fucomip */
6298 if (s->cc_op != CC_OP_DYNAMIC)
6299 gen_op_set_cc_op(s->cc_op);
6300 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6301 gen_helper_fucomi_ST0_FT0();
6302 gen_helper_fpop();
6303 s->cc_op = CC_OP_EFLAGS;
6304 break;
6305 case 0x3e: /* fcomip */
6306 if (s->cc_op != CC_OP_DYNAMIC)
6307 gen_op_set_cc_op(s->cc_op);
6308 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6309 gen_helper_fcomi_ST0_FT0();
6310 gen_helper_fpop();
6311 s->cc_op = CC_OP_EFLAGS;
6312 break;
6313 case 0x10 ... 0x13: /* fcmovxx */
6314 case 0x18 ... 0x1b:
6315 {
6316 int op1, l1;
6317 static const uint8_t fcmov_cc[8] = {
6318 (JCC_B << 1),
6319 (JCC_Z << 1),
6320 (JCC_BE << 1),
6321 (JCC_P << 1),
6322 };
6323 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6324 l1 = gen_new_label();
6325 gen_jcc1(s, s->cc_op, op1, l1);
6326 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
6327 gen_set_label(l1);
6328 }
6329 break;
6330 default:
6331 goto illegal_op;
6332 }
6333 }
6334 break;
6335 /************************/
6336 /* string ops */
6337
6338 case 0xa4: /* movsS */
6339 case 0xa5:
6340 if ((b & 1) == 0)
6341 ot = OT_BYTE;
6342 else
6343 ot = dflag + OT_WORD;
6344
6345 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6346 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6347 } else {
6348 gen_movs(s, ot);
6349 }
6350 break;
6351
6352 case 0xaa: /* stosS */
6353 case 0xab:
6354 if ((b & 1) == 0)
6355 ot = OT_BYTE;
6356 else
6357 ot = dflag + OT_WORD;
6358
6359 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6360 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6361 } else {
6362 gen_stos(s, ot);
6363 }
6364 break;
6365 case 0xac: /* lodsS */
6366 case 0xad:
6367 if ((b & 1) == 0)
6368 ot = OT_BYTE;
6369 else
6370 ot = dflag + OT_WORD;
6371 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6372 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6373 } else {
6374 gen_lods(s, ot);
6375 }
6376 break;
6377 case 0xae: /* scasS */
6378 case 0xaf:
6379 if ((b & 1) == 0)
6380 ot = OT_BYTE;
6381 else
6382 ot = dflag + OT_WORD;
6383 if (prefixes & PREFIX_REPNZ) {
6384 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6385 } else if (prefixes & PREFIX_REPZ) {
6386 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6387 } else {
6388 gen_scas(s, ot);
6389 s->cc_op = CC_OP_SUBB + ot;
6390 }
6391 break;
6392
6393 case 0xa6: /* cmpsS */
6394 case 0xa7:
6395 if ((b & 1) == 0)
6396 ot = OT_BYTE;
6397 else
6398 ot = dflag + OT_WORD;
6399 if (prefixes & PREFIX_REPNZ) {
6400 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6401 } else if (prefixes & PREFIX_REPZ) {
6402 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6403 } else {
6404 gen_cmps(s, ot);
6405 s->cc_op = CC_OP_SUBB + ot;
6406 }
6407 break;
6408 case 0x6c: /* insS */
6409 case 0x6d:
6410 if ((b & 1) == 0)
6411 ot = OT_BYTE;
6412 else
6413 ot = dflag ? OT_LONG : OT_WORD;
6414 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6415 gen_op_andl_T0_ffff();
6416 gen_check_io(s, ot, pc_start - s->cs_base,
6417 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6418 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6419 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6420 } else {
6421 gen_ins(s, ot);
6422 if (use_icount) {
6423 gen_jmp(s, s->pc - s->cs_base);
6424 }
6425 }
6426 break;
6427 case 0x6e: /* outsS */
6428 case 0x6f:
6429 if ((b & 1) == 0)
6430 ot = OT_BYTE;
6431 else
6432 ot = dflag ? OT_LONG : OT_WORD;
6433 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6434 gen_op_andl_T0_ffff();
6435 gen_check_io(s, ot, pc_start - s->cs_base,
6436 svm_is_rep(prefixes) | 4);
6437 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6438 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6439 } else {
6440 gen_outs(s, ot);
6441 if (use_icount) {
6442 gen_jmp(s, s->pc - s->cs_base);
6443 }
6444 }
6445 break;
6446
6447 /************************/
6448 /* port I/O */
6449
6450 case 0xe4:
6451 case 0xe5:
6452 if ((b & 1) == 0)
6453 ot = OT_BYTE;
6454 else
6455 ot = dflag ? OT_LONG : OT_WORD;
6456 val = ldub_code(s->pc++);
6457 gen_op_movl_T0_im(val);
6458 gen_check_io(s, ot, pc_start - s->cs_base,
6459 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6460 if (use_icount)
6461 gen_io_start();
6462 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6463 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6464 gen_op_mov_reg_T1(ot, R_EAX);
6465 if (use_icount) {
6466 gen_io_end();
6467 gen_jmp(s, s->pc - s->cs_base);
6468 }
6469 break;
6470 case 0xe6:
6471 case 0xe7:
6472 if ((b & 1) == 0)
6473 ot = OT_BYTE;
6474 else
6475 ot = dflag ? OT_LONG : OT_WORD;
6476 val = ldub_code(s->pc++);
6477 gen_op_movl_T0_im(val);
6478 gen_check_io(s, ot, pc_start - s->cs_base,
6479 svm_is_rep(prefixes));
6480 gen_op_mov_TN_reg(ot, 1, R_EAX);
6481
6482 if (use_icount)
6483 gen_io_start();
6484 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6485 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6486 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6487 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6488 if (use_icount) {
6489 gen_io_end();
6490 gen_jmp(s, s->pc - s->cs_base);
6491 }
6492 break;
6493 case 0xec:
6494 case 0xed:
6495 if ((b & 1) == 0)
6496 ot = OT_BYTE;
6497 else
6498 ot = dflag ? OT_LONG : OT_WORD;
6499 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6500 gen_op_andl_T0_ffff();
6501 gen_check_io(s, ot, pc_start - s->cs_base,
6502 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6503 if (use_icount)
6504 gen_io_start();
6505 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6506 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6507 gen_op_mov_reg_T1(ot, R_EAX);
6508 if (use_icount) {
6509 gen_io_end();
6510 gen_jmp(s, s->pc - s->cs_base);
6511 }
6512 break;
6513 case 0xee:
6514 case 0xef:
6515 if ((b & 1) == 0)
6516 ot = OT_BYTE;
6517 else
6518 ot = dflag ? OT_LONG : OT_WORD;
6519 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6520 gen_op_andl_T0_ffff();
6521 gen_check_io(s, ot, pc_start - s->cs_base,
6522 svm_is_rep(prefixes));
6523 gen_op_mov_TN_reg(ot, 1, R_EAX);
6524
6525 if (use_icount)
6526 gen_io_start();
6527 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6528 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6529 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6530 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6531 if (use_icount) {
6532 gen_io_end();
6533 gen_jmp(s, s->pc - s->cs_base);
6534 }
6535 break;
6536
6537 /************************/
6538 /* control */
6539 case 0xc2: /* ret im */
6540 val = ldsw_code(s->pc);
6541 s->pc += 2;
6542 gen_pop_T0(s);
6543 if (CODE64(s) && s->dflag)
6544 s->dflag = 2;
6545 gen_stack_update(s, val + (2 << s->dflag));
6546 if (s->dflag == 0)
6547 gen_op_andl_T0_ffff();
6548 gen_op_jmp_T0();
6549 gen_eob(s);
6550 break;
6551 case 0xc3: /* ret */
6552 gen_pop_T0(s);
6553 gen_pop_update(s);
6554 if (s->dflag == 0)
6555 gen_op_andl_T0_ffff();
6556 gen_op_jmp_T0();
6557 gen_eob(s);
6558 break;
6559 case 0xca: /* lret im */
6560 val = ldsw_code(s->pc);
6561 s->pc += 2;
6562 do_lret:
6563 if (s->pe && !s->vm86) {
6564 if (s->cc_op != CC_OP_DYNAMIC)
6565 gen_op_set_cc_op(s->cc_op);
6566 gen_jmp_im(pc_start - s->cs_base);
6567 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6568 tcg_const_i32(val));
6569 } else {
6570 gen_stack_A0(s);
6571 /* pop offset */
6572 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6573 if (s->dflag == 0)
6574 gen_op_andl_T0_ffff();
6575 /* NOTE: keeping EIP updated is not a problem in case of
6576 exception */
6577 gen_op_jmp_T0();
6578 /* pop selector */
6579 gen_op_addl_A0_im(2 << s->dflag);
6580 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6581 gen_op_movl_seg_T0_vm(R_CS);
6582 /* add stack offset */
6583 gen_stack_update(s, val + (4 << s->dflag));
6584 }
6585 gen_eob(s);
6586 break;
6587 case 0xcb: /* lret */
6588 val = 0;
6589 goto do_lret;
6590 case 0xcf: /* iret */
6591 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6592 if (!s->pe) {
6593 /* real mode */
6594 gen_helper_iret_real(tcg_const_i32(s->dflag));
6595 s->cc_op = CC_OP_EFLAGS;
6596 } else if (s->vm86) {
6597#ifdef VBOX
6598 if (s->iopl != 3 && (!s->vme || s->dflag)) {
6599#else
6600 if (s->iopl != 3) {
6601#endif
6602 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6603 } else {
6604 gen_helper_iret_real(tcg_const_i32(s->dflag));
6605 s->cc_op = CC_OP_EFLAGS;
6606 }
6607 } else {
6608 if (s->cc_op != CC_OP_DYNAMIC)
6609 gen_op_set_cc_op(s->cc_op);
6610 gen_jmp_im(pc_start - s->cs_base);
6611 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6612 tcg_const_i32(s->pc - s->cs_base));
6613 s->cc_op = CC_OP_EFLAGS;
6614 }
6615 gen_eob(s);
6616 break;
6617 case 0xe8: /* call im */
6618 {
6619 if (dflag)
6620 tval = (int32_t)insn_get(s, OT_LONG);
6621 else
6622 tval = (int16_t)insn_get(s, OT_WORD);
6623 next_eip = s->pc - s->cs_base;
6624 tval += next_eip;
6625 if (s->dflag == 0)
6626 tval &= 0xffff;
6627 else if(!CODE64(s))
6628 tval &= 0xffffffff;
6629 gen_movtl_T0_im(next_eip);
6630 gen_push_T0(s);
6631 gen_jmp(s, tval);
6632 }
6633 break;
6634 case 0x9a: /* lcall im */
6635 {
6636 unsigned int selector, offset;
6637
6638 if (CODE64(s))
6639 goto illegal_op;
6640 ot = dflag ? OT_LONG : OT_WORD;
6641 offset = insn_get(s, ot);
6642 selector = insn_get(s, OT_WORD);
6643
6644 gen_op_movl_T0_im(selector);
6645 gen_op_movl_T1_imu(offset);
6646 }
6647 goto do_lcall;
6648 case 0xe9: /* jmp im */
6649 if (dflag)
6650 tval = (int32_t)insn_get(s, OT_LONG);
6651 else
6652 tval = (int16_t)insn_get(s, OT_WORD);
6653 tval += s->pc - s->cs_base;
6654 if (s->dflag == 0)
6655 tval &= 0xffff;
6656 else if(!CODE64(s))
6657 tval &= 0xffffffff;
6658 gen_jmp(s, tval);
6659 break;
6660 case 0xea: /* ljmp im */
6661 {
6662 unsigned int selector, offset;
6663
6664 if (CODE64(s))
6665 goto illegal_op;
6666 ot = dflag ? OT_LONG : OT_WORD;
6667 offset = insn_get(s, ot);
6668 selector = insn_get(s, OT_WORD);
6669
6670 gen_op_movl_T0_im(selector);
6671 gen_op_movl_T1_imu(offset);
6672 }
6673 goto do_ljmp;
6674 case 0xeb: /* jmp Jb */
6675 tval = (int8_t)insn_get(s, OT_BYTE);
6676 tval += s->pc - s->cs_base;
6677 if (s->dflag == 0)
6678 tval &= 0xffff;
6679 gen_jmp(s, tval);
6680 break;
6681 case 0x70 ... 0x7f: /* jcc Jb */
6682 tval = (int8_t)insn_get(s, OT_BYTE);
6683 goto do_jcc;
6684 case 0x180 ... 0x18f: /* jcc Jv */
6685 if (dflag) {
6686 tval = (int32_t)insn_get(s, OT_LONG);
6687 } else {
6688 tval = (int16_t)insn_get(s, OT_WORD);
6689 }
6690 do_jcc:
6691 next_eip = s->pc - s->cs_base;
6692 tval += next_eip;
6693 if (s->dflag == 0)
6694 tval &= 0xffff;
6695 gen_jcc(s, b, tval, next_eip);
6696 break;
6697
6698 case 0x190 ... 0x19f: /* setcc Gv */
6699 modrm = ldub_code(s->pc++);
6700 gen_setcc(s, b);
6701 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6702 break;
6703 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6704 {
6705 int l1;
6706 TCGv t0;
6707
6708 ot = dflag + OT_WORD;
6709 modrm = ldub_code(s->pc++);
6710 reg = ((modrm >> 3) & 7) | rex_r;
6711 mod = (modrm >> 6) & 3;
6712 t0 = tcg_temp_local_new();
6713 if (mod != 3) {
6714 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6715 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6716 } else {
6717 rm = (modrm & 7) | REX_B(s);
6718 gen_op_mov_v_reg(ot, t0, rm);
6719 }
6720#ifdef TARGET_X86_64
6721 if (ot == OT_LONG) {
6722 /* XXX: specific Intel behaviour ? */
6723 l1 = gen_new_label();
6724 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6725 tcg_gen_mov_tl(cpu_regs[reg], t0);
6726 gen_set_label(l1);
6727 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
6728 } else
6729#endif
6730 {
6731 l1 = gen_new_label();
6732 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6733 gen_op_mov_reg_v(ot, reg, t0);
6734 gen_set_label(l1);
6735 }
6736 tcg_temp_free(t0);
6737 }
6738 break;
6739
6740 /************************/
6741 /* flags */
6742 case 0x9c: /* pushf */
6743 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6744#ifdef VBOX
6745 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) {
6746#else
6747 if (s->vm86 && s->iopl != 3) {
6748#endif
6749 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6750 } else {
6751 if (s->cc_op != CC_OP_DYNAMIC)
6752 gen_op_set_cc_op(s->cc_op);
6753#ifdef VBOX
6754 if (s->vm86 && s->vme && s->iopl != 3)
6755 gen_helper_read_eflags_vme(cpu_T[0]);
6756 else
6757#endif
6758 gen_helper_read_eflags(cpu_T[0]);
6759 gen_push_T0(s);
6760 }
6761 break;
6762 case 0x9d: /* popf */
6763 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6764#ifdef VBOX
6765 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) {
6766#else
6767 if (s->vm86 && s->iopl != 3) {
6768#endif
6769 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6770 } else {
6771 gen_pop_T0(s);
6772 if (s->cpl == 0) {
6773 if (s->dflag) {
6774 gen_helper_write_eflags(cpu_T[0],
6775 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6776 } else {
6777 gen_helper_write_eflags(cpu_T[0],
6778 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6779 }
6780 } else {
6781 if (s->cpl <= s->iopl) {
6782 if (s->dflag) {
6783 gen_helper_write_eflags(cpu_T[0],
6784 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6785 } else {
6786 gen_helper_write_eflags(cpu_T[0],
6787 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6788 }
6789 } else {
6790 if (s->dflag) {
6791 gen_helper_write_eflags(cpu_T[0],
6792 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6793 } else {
6794#ifdef VBOX
6795 if (s->vm86 && s->vme)
6796 gen_helper_write_eflags_vme(cpu_T[0]);
6797 else
6798#endif
6799 gen_helper_write_eflags(cpu_T[0],
6800 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6801 }
6802 }
6803 }
6804 gen_pop_update(s);
6805 s->cc_op = CC_OP_EFLAGS;
6806 /* abort translation because TF flag may change */
6807 gen_jmp_im(s->pc - s->cs_base);
6808 gen_eob(s);
6809 }
6810 break;
6811 case 0x9e: /* sahf */
6812 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6813 goto illegal_op;
6814 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6815 if (s->cc_op != CC_OP_DYNAMIC)
6816 gen_op_set_cc_op(s->cc_op);
6817 gen_compute_eflags(cpu_cc_src);
6818 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6819 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6820 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6821 s->cc_op = CC_OP_EFLAGS;
6822 break;
6823 case 0x9f: /* lahf */
6824 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6825 goto illegal_op;
6826 if (s->cc_op != CC_OP_DYNAMIC)
6827 gen_op_set_cc_op(s->cc_op);
6828 gen_compute_eflags(cpu_T[0]);
6829 /* Note: gen_compute_eflags() only gives the condition codes */
6830 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6831 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6832 break;
6833 case 0xf5: /* cmc */
6834 if (s->cc_op != CC_OP_DYNAMIC)
6835 gen_op_set_cc_op(s->cc_op);
6836 gen_compute_eflags(cpu_cc_src);
6837 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6838 s->cc_op = CC_OP_EFLAGS;
6839 break;
6840 case 0xf8: /* clc */
6841 if (s->cc_op != CC_OP_DYNAMIC)
6842 gen_op_set_cc_op(s->cc_op);
6843 gen_compute_eflags(cpu_cc_src);
6844 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6845 s->cc_op = CC_OP_EFLAGS;
6846 break;
6847 case 0xf9: /* stc */
6848 if (s->cc_op != CC_OP_DYNAMIC)
6849 gen_op_set_cc_op(s->cc_op);
6850 gen_compute_eflags(cpu_cc_src);
6851 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6852 s->cc_op = CC_OP_EFLAGS;
6853 break;
6854 case 0xfc: /* cld */
6855 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6856 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6857 break;
6858 case 0xfd: /* std */
6859 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6860 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6861 break;
6862
6863 /************************/
6864 /* bit operations */
6865 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6866 ot = dflag + OT_WORD;
6867 modrm = ldub_code(s->pc++);
6868 op = (modrm >> 3) & 7;
6869 mod = (modrm >> 6) & 3;
6870 rm = (modrm & 7) | REX_B(s);
6871 if (mod != 3) {
6872 s->rip_offset = 1;
6873 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6874 gen_op_ld_T0_A0(ot + s->mem_index);
6875 } else {
6876 gen_op_mov_TN_reg(ot, 0, rm);
6877 }
6878 /* load shift */
6879 val = ldub_code(s->pc++);
6880 gen_op_movl_T1_im(val);
6881 if (op < 4)
6882 goto illegal_op;
6883 op -= 4;
6884 goto bt_op;
6885 case 0x1a3: /* bt Gv, Ev */
6886 op = 0;
6887 goto do_btx;
6888 case 0x1ab: /* bts */
6889 op = 1;
6890 goto do_btx;
6891 case 0x1b3: /* btr */
6892 op = 2;
6893 goto do_btx;
6894 case 0x1bb: /* btc */
6895 op = 3;
6896 do_btx:
6897 ot = dflag + OT_WORD;
6898 modrm = ldub_code(s->pc++);
6899 reg = ((modrm >> 3) & 7) | rex_r;
6900 mod = (modrm >> 6) & 3;
6901 rm = (modrm & 7) | REX_B(s);
6902 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6903 if (mod != 3) {
6904 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6905 /* specific case: we need to add a displacement */
6906 gen_exts(ot, cpu_T[1]);
6907 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6908 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6909 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6910 gen_op_ld_T0_A0(ot + s->mem_index);
6911 } else {
6912 gen_op_mov_TN_reg(ot, 0, rm);
6913 }
6914 bt_op:
6915 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6916 switch(op) {
6917 case 0:
6918 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6919 tcg_gen_movi_tl(cpu_cc_dst, 0);
6920 break;
6921 case 1:
6922 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6923 tcg_gen_movi_tl(cpu_tmp0, 1);
6924 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6925 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6926 break;
6927 case 2:
6928 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6929 tcg_gen_movi_tl(cpu_tmp0, 1);
6930 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6931 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6932 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6933 break;
6934 default:
6935 case 3:
6936 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6937 tcg_gen_movi_tl(cpu_tmp0, 1);
6938 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6939 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6940 break;
6941 }
6942 s->cc_op = CC_OP_SARB + ot;
6943 if (op != 0) {
6944 if (mod != 3)
6945 gen_op_st_T0_A0(ot + s->mem_index);
6946 else
6947 gen_op_mov_reg_T0(ot, rm);
6948 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6949 tcg_gen_movi_tl(cpu_cc_dst, 0);
6950 }
6951 break;
6952 case 0x1bc: /* bsf */
6953 case 0x1bd: /* bsr */
6954 {
6955 int label1;
6956 TCGv t0;
6957
6958 ot = dflag + OT_WORD;
6959 modrm = ldub_code(s->pc++);
6960 reg = ((modrm >> 3) & 7) | rex_r;
6961 gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
6962 gen_extu(ot, cpu_T[0]);
6963 t0 = tcg_temp_local_new();
6964 tcg_gen_mov_tl(t0, cpu_T[0]);
6965 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6966 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6967 switch(ot) {
6968 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6969 tcg_const_i32(16)); break;
6970 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6971 tcg_const_i32(32)); break;
6972 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6973 tcg_const_i32(64)); break;
6974 }
6975 gen_op_mov_reg_T0(ot, reg);
6976 } else {
6977 label1 = gen_new_label();
6978 tcg_gen_movi_tl(cpu_cc_dst, 0);
6979 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6980 if (b & 1) {
6981 gen_helper_bsr(cpu_T[0], t0);
6982 } else {
6983 gen_helper_bsf(cpu_T[0], t0);
6984 }
6985 gen_op_mov_reg_T0(ot, reg);
6986 tcg_gen_movi_tl(cpu_cc_dst, 1);
6987 gen_set_label(label1);
6988 tcg_gen_discard_tl(cpu_cc_src);
6989 s->cc_op = CC_OP_LOGICB + ot;
6990 }
6991 tcg_temp_free(t0);
6992 }
6993 break;
6994 /************************/
6995 /* bcd */
6996 case 0x27: /* daa */
6997 if (CODE64(s))
6998 goto illegal_op;
6999 if (s->cc_op != CC_OP_DYNAMIC)
7000 gen_op_set_cc_op(s->cc_op);
7001 gen_helper_daa();
7002 s->cc_op = CC_OP_EFLAGS;
7003 break;
7004 case 0x2f: /* das */
7005 if (CODE64(s))
7006 goto illegal_op;
7007 if (s->cc_op != CC_OP_DYNAMIC)
7008 gen_op_set_cc_op(s->cc_op);
7009 gen_helper_das();
7010 s->cc_op = CC_OP_EFLAGS;
7011 break;
7012 case 0x37: /* aaa */
7013 if (CODE64(s))
7014 goto illegal_op;
7015 if (s->cc_op != CC_OP_DYNAMIC)
7016 gen_op_set_cc_op(s->cc_op);
7017 gen_helper_aaa();
7018 s->cc_op = CC_OP_EFLAGS;
7019 break;
7020 case 0x3f: /* aas */
7021 if (CODE64(s))
7022 goto illegal_op;
7023 if (s->cc_op != CC_OP_DYNAMIC)
7024 gen_op_set_cc_op(s->cc_op);
7025 gen_helper_aas();
7026 s->cc_op = CC_OP_EFLAGS;
7027 break;
7028 case 0xd4: /* aam */
7029 if (CODE64(s))
7030 goto illegal_op;
7031 val = ldub_code(s->pc++);
7032 if (val == 0) {
7033 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7034 } else {
7035 gen_helper_aam(tcg_const_i32(val));
7036 s->cc_op = CC_OP_LOGICB;
7037 }
7038 break;
7039 case 0xd5: /* aad */
7040 if (CODE64(s))
7041 goto illegal_op;
7042 val = ldub_code(s->pc++);
7043 gen_helper_aad(tcg_const_i32(val));
7044 s->cc_op = CC_OP_LOGICB;
7045 break;
7046 /************************/
7047 /* misc */
7048 case 0x90: /* nop */
7049 /* XXX: correct lock test for all insn */
7050 if (prefixes & PREFIX_LOCK) {
7051 goto illegal_op;
7052 }
7053 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7054 if (REX_B(s)) {
7055 goto do_xchg_reg_eax;
7056 }
7057 if (prefixes & PREFIX_REPZ) {
7058 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
7059 }
7060 break;
7061 case 0x9b: /* fwait */
7062 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7063 (HF_MP_MASK | HF_TS_MASK)) {
7064 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7065 } else {
7066 if (s->cc_op != CC_OP_DYNAMIC)
7067 gen_op_set_cc_op(s->cc_op);
7068 gen_jmp_im(pc_start - s->cs_base);
7069 gen_helper_fwait();
7070 }
7071 break;
7072 case 0xcc: /* int3 */
7073#ifdef VBOX
7074 if (s->vm86 && s->iopl != 3 && !s->vme) {
7075 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7076 } else
7077#endif
7078 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7079 break;
7080 case 0xcd: /* int N */
7081 val = ldub_code(s->pc++);
7082#ifdef VBOX
7083 if (s->vm86 && s->iopl != 3 && !s->vme) {
7084#else
7085 if (s->vm86 && s->iopl != 3) {
7086#endif
7087 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7088 } else {
7089 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7090 }
7091 break;
7092 case 0xce: /* into */
7093 if (CODE64(s))
7094 goto illegal_op;
7095 if (s->cc_op != CC_OP_DYNAMIC)
7096 gen_op_set_cc_op(s->cc_op);
7097 gen_jmp_im(pc_start - s->cs_base);
7098 gen_helper_into(tcg_const_i32(s->pc - pc_start));
7099 break;
7100#ifdef WANT_ICEBP
7101 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7102 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
7103#if 1
7104 gen_debug(s, pc_start - s->cs_base);
7105#else
7106 /* start debug */
7107 tb_flush(cpu_single_env);
7108 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
7109#endif
7110 break;
7111#endif
7112 case 0xfa: /* cli */
7113 if (!s->vm86) {
7114 if (s->cpl <= s->iopl) {
7115 gen_helper_cli();
7116 } else {
7117 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7118 }
7119 } else {
7120 if (s->iopl == 3) {
7121 gen_helper_cli();
7122#ifdef VBOX
7123 } else if (s->iopl != 3 && s->vme) {
7124 gen_helper_cli_vme();
7125#endif
7126 } else {
7127 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7128 }
7129 }
7130 break;
7131 case 0xfb: /* sti */
7132 if (!s->vm86) {
7133 if (s->cpl <= s->iopl) {
7134 gen_sti:
7135 gen_helper_sti();
7136 /* interruptions are enabled only the first insn after sti */
7137 /* If several instructions disable interrupts, only the
7138 _first_ does it */
7139 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
7140 gen_helper_set_inhibit_irq();
7141 /* give a chance to handle pending irqs */
7142 gen_jmp_im(s->pc - s->cs_base);
7143 gen_eob(s);
7144 } else {
7145 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7146 }
7147 } else {
7148 if (s->iopl == 3) {
7149 goto gen_sti;
7150#ifdef VBOX
7151 } else if (s->iopl != 3 && s->vme) {
7152 gen_helper_sti_vme();
7153 /* give a chance to handle pending irqs */
7154 gen_jmp_im(s->pc - s->cs_base);
7155 gen_eob(s);
7156#endif
7157 } else {
7158 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7159 }
7160 }
7161 break;
7162 case 0x62: /* bound */
7163 if (CODE64(s))
7164 goto illegal_op;
7165 ot = dflag ? OT_LONG : OT_WORD;
7166 modrm = ldub_code(s->pc++);
7167 reg = (modrm >> 3) & 7;
7168 mod = (modrm >> 6) & 3;
7169 if (mod == 3)
7170 goto illegal_op;
7171 gen_op_mov_TN_reg(ot, 0, reg);
7172 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7173 gen_jmp_im(pc_start - s->cs_base);
7174 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7175 if (ot == OT_WORD)
7176 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
7177 else
7178 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
7179 break;
7180 case 0x1c8 ... 0x1cf: /* bswap reg */
7181 reg = (b & 7) | REX_B(s);
7182#ifdef TARGET_X86_64
7183 if (dflag == 2) {
7184 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
7185 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
7186 gen_op_mov_reg_T0(OT_QUAD, reg);
7187 } else
7188#endif
7189 {
7190 gen_op_mov_TN_reg(OT_LONG, 0, reg);
7191 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7192 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
7193 gen_op_mov_reg_T0(OT_LONG, reg);
7194 }
7195 break;
7196 case 0xd6: /* salc */
7197 if (CODE64(s))
7198 goto illegal_op;
7199 if (s->cc_op != CC_OP_DYNAMIC)
7200 gen_op_set_cc_op(s->cc_op);
7201 gen_compute_eflags_c(cpu_T[0]);
7202 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7203 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
7204 break;
7205 case 0xe0: /* loopnz */
7206 case 0xe1: /* loopz */
7207 case 0xe2: /* loop */
7208 case 0xe3: /* jecxz */
7209 {
7210 int l1, l2, l3;
7211
7212 tval = (int8_t)insn_get(s, OT_BYTE);
7213 next_eip = s->pc - s->cs_base;
7214 tval += next_eip;
7215 if (s->dflag == 0)
7216 tval &= 0xffff;
7217
7218 l1 = gen_new_label();
7219 l2 = gen_new_label();
7220 l3 = gen_new_label();
7221 b &= 3;
7222 switch(b) {
7223 case 0: /* loopnz */
7224 case 1: /* loopz */
7225 if (s->cc_op != CC_OP_DYNAMIC)
7226 gen_op_set_cc_op(s->cc_op);
7227 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7228 gen_op_jz_ecx(s->aflag, l3);
7229 gen_compute_eflags(cpu_tmp0);
7230 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
7231 if (b == 0) {
7232 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
7233 } else {
7234 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
7235 }
7236 break;
7237 case 2: /* loop */
7238 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7239 gen_op_jnz_ecx(s->aflag, l1);
7240 break;
7241 default:
7242 case 3: /* jcxz */
7243 gen_op_jz_ecx(s->aflag, l1);
7244 break;
7245 }
7246
7247 gen_set_label(l3);
7248 gen_jmp_im(next_eip);
7249 tcg_gen_br(l2);
7250
7251 gen_set_label(l1);
7252 gen_jmp_im(tval);
7253 gen_set_label(l2);
7254 gen_eob(s);
7255 }
7256 break;
7257 case 0x130: /* wrmsr */
7258 case 0x132: /* rdmsr */
7259 if (s->cpl != 0) {
7260 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7261 } else {
7262 if (s->cc_op != CC_OP_DYNAMIC)
7263 gen_op_set_cc_op(s->cc_op);
7264 gen_jmp_im(pc_start - s->cs_base);
7265 if (b & 2) {
7266 gen_helper_rdmsr();
7267 } else {
7268 gen_helper_wrmsr();
7269 }
7270 }
7271 break;
7272 case 0x131: /* rdtsc */
7273 if (s->cc_op != CC_OP_DYNAMIC)
7274 gen_op_set_cc_op(s->cc_op);
7275 gen_jmp_im(pc_start - s->cs_base);
7276 if (use_icount)
7277 gen_io_start();
7278 gen_helper_rdtsc();
7279 if (use_icount) {
7280 gen_io_end();
7281 gen_jmp(s, s->pc - s->cs_base);
7282 }
7283 break;
7284 case 0x133: /* rdpmc */
7285 if (s->cc_op != CC_OP_DYNAMIC)
7286 gen_op_set_cc_op(s->cc_op);
7287 gen_jmp_im(pc_start - s->cs_base);
7288 gen_helper_rdpmc();
7289 break;
7290 case 0x134: /* sysenter */
7291#ifndef VBOX
7292 /* For Intel SYSENTER is valid on 64-bit */
7293 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7294#else
7295 if ( !(cpu_single_env->cpuid_features & CPUID_SEP)
7296 || ( IS_LONG_MODE(s)
7297 && CPUMGetGuestCpuVendor(cpu_single_env->pVM) != CPUMCPUVENDOR_INTEL))
7298#endif
7299 goto illegal_op;
7300 if (!s->pe) {
7301 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7302 } else {
7303 gen_update_cc_op(s);
7304 gen_jmp_im(pc_start - s->cs_base);
7305 gen_helper_sysenter();
7306 gen_eob(s);
7307 }
7308 break;
7309 case 0x135: /* sysexit */
7310#ifndef VBOX
7311 /* For Intel SYSEXIT is valid on 64-bit */
7312 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7313#else
7314 if ( !(cpu_single_env->cpuid_features & CPUID_SEP)
7315 || ( IS_LONG_MODE(s)
7316 && CPUMGetGuestCpuVendor(cpu_single_env->pVM) != CPUMCPUVENDOR_INTEL))
7317#endif
7318 goto illegal_op;
7319 if (!s->pe) {
7320 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7321 } else {
7322 gen_update_cc_op(s);
7323 gen_jmp_im(pc_start - s->cs_base);
7324 gen_helper_sysexit(tcg_const_i32(dflag));
7325 gen_eob(s);
7326 }
7327 break;
7328#ifdef TARGET_X86_64
7329 case 0x105: /* syscall */
7330 /* XXX: is it usable in real mode ? */
7331 gen_update_cc_op(s);
7332 gen_jmp_im(pc_start - s->cs_base);
7333 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
7334 gen_eob(s);
7335 break;
7336 case 0x107: /* sysret */
7337 if (!s->pe) {
7338 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7339 } else {
7340 gen_update_cc_op(s);
7341 gen_jmp_im(pc_start - s->cs_base);
7342 gen_helper_sysret(tcg_const_i32(s->dflag));
7343 /* condition codes are modified only in long mode */
7344 if (s->lma)
7345 s->cc_op = CC_OP_EFLAGS;
7346 gen_eob(s);
7347 }
7348 break;
7349#endif
7350 case 0x1a2: /* cpuid */
7351 if (s->cc_op != CC_OP_DYNAMIC)
7352 gen_op_set_cc_op(s->cc_op);
7353 gen_jmp_im(pc_start - s->cs_base);
7354 gen_helper_cpuid();
7355 break;
7356 case 0xf4: /* hlt */
7357 if (s->cpl != 0) {
7358 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7359 } else {
7360 if (s->cc_op != CC_OP_DYNAMIC)
7361 gen_op_set_cc_op(s->cc_op);
7362 gen_jmp_im(pc_start - s->cs_base);
7363 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
7364 s->is_jmp = DISAS_TB_JUMP;
7365 }
7366 break;
7367 case 0x100:
7368 modrm = ldub_code(s->pc++);
7369 mod = (modrm >> 6) & 3;
7370 op = (modrm >> 3) & 7;
7371 switch(op) {
7372 case 0: /* sldt */
7373 if (!s->pe || s->vm86)
7374 goto illegal_op;
7375 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7376 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7377 ot = OT_WORD;
7378 if (mod == 3)
7379 ot += s->dflag;
7380 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7381 break;
7382 case 2: /* lldt */
7383 if (!s->pe || s->vm86)
7384 goto illegal_op;
7385 if (s->cpl != 0) {
7386 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7387 } else {
7388 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7389 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7390 gen_jmp_im(pc_start - s->cs_base);
7391 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7392 gen_helper_lldt(cpu_tmp2_i32);
7393 }
7394 break;
7395 case 1: /* str */
7396 if (!s->pe || s->vm86)
7397 goto illegal_op;
7398 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7399 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7400 ot = OT_WORD;
7401 if (mod == 3)
7402 ot += s->dflag;
7403 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7404 break;
7405 case 3: /* ltr */
7406 if (!s->pe || s->vm86)
7407 goto illegal_op;
7408 if (s->cpl != 0) {
7409 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7410 } else {
7411 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7412 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7413 gen_jmp_im(pc_start - s->cs_base);
7414 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7415 gen_helper_ltr(cpu_tmp2_i32);
7416 }
7417 break;
7418 case 4: /* verr */
7419 case 5: /* verw */
7420 if (!s->pe || s->vm86)
7421 goto illegal_op;
7422 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7423 if (s->cc_op != CC_OP_DYNAMIC)
7424 gen_op_set_cc_op(s->cc_op);
7425 if (op == 4)
7426 gen_helper_verr(cpu_T[0]);
7427 else
7428 gen_helper_verw(cpu_T[0]);
7429 s->cc_op = CC_OP_EFLAGS;
7430 break;
7431 default:
7432 goto illegal_op;
7433 }
7434 break;
7435 case 0x101:
7436 modrm = ldub_code(s->pc++);
7437 mod = (modrm >> 6) & 3;
7438 op = (modrm >> 3) & 7;
7439 rm = modrm & 7;
7440 switch(op) {
7441 case 0: /* sgdt */
7442 if (mod == 3)
7443 goto illegal_op;
7444 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7445 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7446 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7447 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7448 gen_add_A0_im(s, 2);
7449 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7450 if (!s->dflag)
7451 gen_op_andl_T0_im(0xffffff);
7452 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7453 break;
7454 case 1:
7455 if (mod == 3) {
7456 switch (rm) {
7457 case 0: /* monitor */
7458 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7459 s->cpl != 0)
7460 goto illegal_op;
7461 if (s->cc_op != CC_OP_DYNAMIC)
7462 gen_op_set_cc_op(s->cc_op);
7463 gen_jmp_im(pc_start - s->cs_base);
7464#ifdef TARGET_X86_64
7465 if (s->aflag == 2) {
7466 gen_op_movq_A0_reg(R_EAX);
7467 } else
7468#endif
7469 {
7470 gen_op_movl_A0_reg(R_EAX);
7471 if (s->aflag == 0)
7472 gen_op_andl_A0_ffff();
7473 }
7474 gen_add_A0_ds_seg(s);
7475 gen_helper_monitor(cpu_A0);
7476 break;
7477 case 1: /* mwait */
7478 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7479 s->cpl != 0)
7480 goto illegal_op;
7481 gen_update_cc_op(s);
7482 gen_jmp_im(pc_start - s->cs_base);
7483 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
7484 gen_eob(s);
7485 break;
7486 default:
7487 goto illegal_op;
7488 }
7489 } else { /* sidt */
7490 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7491 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7492 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7493 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7494 gen_add_A0_im(s, 2);
7495 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7496 if (!s->dflag)
7497 gen_op_andl_T0_im(0xffffff);
7498 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7499 }
7500 break;
7501 case 2: /* lgdt */
7502 case 3: /* lidt */
7503 if (mod == 3) {
7504 if (s->cc_op != CC_OP_DYNAMIC)
7505 gen_op_set_cc_op(s->cc_op);
7506 gen_jmp_im(pc_start - s->cs_base);
7507 switch(rm) {
7508 case 0: /* VMRUN */
7509 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7510 goto illegal_op;
7511 if (s->cpl != 0) {
7512 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7513 break;
7514 } else {
7515 gen_helper_vmrun(tcg_const_i32(s->aflag),
7516 tcg_const_i32(s->pc - pc_start));
7517 tcg_gen_exit_tb(0);
7518 s->is_jmp = DISAS_TB_JUMP;
7519 }
7520 break;
7521 case 1: /* VMMCALL */
7522 if (!(s->flags & HF_SVME_MASK))
7523 goto illegal_op;
7524 gen_helper_vmmcall();
7525 break;
7526 case 2: /* VMLOAD */
7527 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7528 goto illegal_op;
7529 if (s->cpl != 0) {
7530 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7531 break;
7532 } else {
7533 gen_helper_vmload(tcg_const_i32(s->aflag));
7534 }
7535 break;
7536 case 3: /* VMSAVE */
7537 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7538 goto illegal_op;
7539 if (s->cpl != 0) {
7540 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7541 break;
7542 } else {
7543 gen_helper_vmsave(tcg_const_i32(s->aflag));
7544 }
7545 break;
7546 case 4: /* STGI */
7547 if ((!(s->flags & HF_SVME_MASK) &&
7548 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7549 !s->pe)
7550 goto illegal_op;
7551 if (s->cpl != 0) {
7552 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7553 break;
7554 } else {
7555 gen_helper_stgi();
7556 }
7557 break;
7558 case 5: /* CLGI */
7559 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7560 goto illegal_op;
7561 if (s->cpl != 0) {
7562 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7563 break;
7564 } else {
7565 gen_helper_clgi();
7566 }
7567 break;
7568 case 6: /* SKINIT */
7569 if ((!(s->flags & HF_SVME_MASK) &&
7570 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7571 !s->pe)
7572 goto illegal_op;
7573 gen_helper_skinit();
7574 break;
7575 case 7: /* INVLPGA */
7576 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7577 goto illegal_op;
7578 if (s->cpl != 0) {
7579 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7580 break;
7581 } else {
7582 gen_helper_invlpga(tcg_const_i32(s->aflag));
7583 }
7584 break;
7585 default:
7586 goto illegal_op;
7587 }
7588 } else if (s->cpl != 0) {
7589 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7590 } else {
7591 gen_svm_check_intercept(s, pc_start,
7592 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7593 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7594 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7595 gen_add_A0_im(s, 2);
7596 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7597 if (!s->dflag)
7598 gen_op_andl_T0_im(0xffffff);
7599 if (op == 2) {
7600 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7601 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7602 } else {
7603 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7604 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7605 }
7606 }
7607 break;
7608 case 4: /* smsw */
7609 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7610#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7611 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7612#else
7613 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7614#endif
7615 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7616 break;
7617 case 6: /* lmsw */
7618 if (s->cpl != 0) {
7619 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7620 } else {
7621 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7622 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7623 gen_helper_lmsw(cpu_T[0]);
7624 gen_jmp_im(s->pc - s->cs_base);
7625 gen_eob(s);
7626 }
7627 break;
7628 case 7:
7629 if (mod != 3) { /* invlpg */
7630 if (s->cpl != 0) {
7631 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7632 } else {
7633 if (s->cc_op != CC_OP_DYNAMIC)
7634 gen_op_set_cc_op(s->cc_op);
7635 gen_jmp_im(pc_start - s->cs_base);
7636 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7637 gen_helper_invlpg(cpu_A0);
7638 gen_jmp_im(s->pc - s->cs_base);
7639 gen_eob(s);
7640 }
7641 } else {
7642 switch (rm) {
7643 case 0: /* swapgs */
7644#ifdef TARGET_X86_64
7645 if (CODE64(s)) {
7646 if (s->cpl != 0) {
7647 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7648 } else {
7649 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7650 offsetof(CPUX86State,segs[R_GS].base));
7651 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7652 offsetof(CPUX86State,kernelgsbase));
7653 tcg_gen_st_tl(cpu_T[1], cpu_env,
7654 offsetof(CPUX86State,segs[R_GS].base));
7655 tcg_gen_st_tl(cpu_T[0], cpu_env,
7656 offsetof(CPUX86State,kernelgsbase));
7657 }
7658 } else
7659#endif
7660 {
7661 goto illegal_op;
7662 }
7663 break;
7664 case 1: /* rdtscp */
7665 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7666 goto illegal_op;
7667 if (s->cc_op != CC_OP_DYNAMIC)
7668 gen_op_set_cc_op(s->cc_op);
7669 gen_jmp_im(pc_start - s->cs_base);
7670 if (use_icount)
7671 gen_io_start();
7672 gen_helper_rdtscp();
7673 if (use_icount) {
7674 gen_io_end();
7675 gen_jmp(s, s->pc - s->cs_base);
7676 }
7677 break;
7678 default:
7679 goto illegal_op;
7680 }
7681 }
7682 break;
7683 default:
7684 goto illegal_op;
7685 }
7686 break;
7687 case 0x108: /* invd */
7688 case 0x109: /* wbinvd */
7689 if (s->cpl != 0) {
7690 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7691 } else {
7692 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7693 /* nothing to do */
7694 }
7695 break;
7696 case 0x63: /* arpl or movslS (x86_64) */
7697#ifdef TARGET_X86_64
7698 if (CODE64(s)) {
7699 int d_ot;
7700 /* d_ot is the size of destination */
7701 d_ot = dflag + OT_WORD;
7702
7703 modrm = ldub_code(s->pc++);
7704 reg = ((modrm >> 3) & 7) | rex_r;
7705 mod = (modrm >> 6) & 3;
7706 rm = (modrm & 7) | REX_B(s);
7707
7708 if (mod == 3) {
7709 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7710 /* sign extend */
7711 if (d_ot == OT_QUAD)
7712 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7713 gen_op_mov_reg_T0(d_ot, reg);
7714 } else {
7715 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7716 if (d_ot == OT_QUAD) {
7717 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7718 } else {
7719 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7720 }
7721 gen_op_mov_reg_T0(d_ot, reg);
7722 }
7723 } else
7724#endif
7725 {
7726 int label1;
7727 TCGv t0, t1, t2, a0;
7728
7729 if (!s->pe || s->vm86)
7730 goto illegal_op;
7731 t0 = tcg_temp_local_new();
7732 t1 = tcg_temp_local_new();
7733 t2 = tcg_temp_local_new();
7734 ot = OT_WORD;
7735 modrm = ldub_code(s->pc++);
7736 reg = (modrm >> 3) & 7;
7737 mod = (modrm >> 6) & 3;
7738 rm = modrm & 7;
7739 if (mod != 3) {
7740 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7741 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7742 a0 = tcg_temp_local_new();
7743 tcg_gen_mov_tl(a0, cpu_A0);
7744 } else {
7745 gen_op_mov_v_reg(ot, t0, rm);
7746 TCGV_UNUSED(a0);
7747 }
7748 gen_op_mov_v_reg(ot, t1, reg);
7749 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7750 tcg_gen_andi_tl(t1, t1, 3);
7751 tcg_gen_movi_tl(t2, 0);
7752 label1 = gen_new_label();
7753 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7754 tcg_gen_andi_tl(t0, t0, ~3);
7755 tcg_gen_or_tl(t0, t0, t1);
7756 tcg_gen_movi_tl(t2, CC_Z);
7757 gen_set_label(label1);
7758 if (mod != 3) {
7759 gen_op_st_v(ot + s->mem_index, t0, a0);
7760 tcg_temp_free(a0);
7761 } else {
7762 gen_op_mov_reg_v(ot, rm, t0);
7763 }
7764 if (s->cc_op != CC_OP_DYNAMIC)
7765 gen_op_set_cc_op(s->cc_op);
7766 gen_compute_eflags(cpu_cc_src);
7767 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7768 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7769 s->cc_op = CC_OP_EFLAGS;
7770 tcg_temp_free(t0);
7771 tcg_temp_free(t1);
7772 tcg_temp_free(t2);
7773 }
7774 break;
7775 case 0x102: /* lar */
7776 case 0x103: /* lsl */
7777 {
7778 int label1;
7779 TCGv t0;
7780 if (!s->pe || s->vm86)
7781 goto illegal_op;
7782 ot = dflag ? OT_LONG : OT_WORD;
7783 modrm = ldub_code(s->pc++);
7784 reg = ((modrm >> 3) & 7) | rex_r;
7785 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7786 t0 = tcg_temp_local_new();
7787 if (s->cc_op != CC_OP_DYNAMIC)
7788 gen_op_set_cc_op(s->cc_op);
7789 if (b == 0x102)
7790 gen_helper_lar(t0, cpu_T[0]);
7791 else
7792 gen_helper_lsl(t0, cpu_T[0]);
7793 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7794 label1 = gen_new_label();
7795 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7796 gen_op_mov_reg_v(ot, reg, t0);
7797 gen_set_label(label1);
7798 s->cc_op = CC_OP_EFLAGS;
7799 tcg_temp_free(t0);
7800 }
7801 break;
7802 case 0x118:
7803 modrm = ldub_code(s->pc++);
7804 mod = (modrm >> 6) & 3;
7805 op = (modrm >> 3) & 7;
7806 switch(op) {
7807 case 0: /* prefetchnta */
7808 case 1: /* prefetchnt0 */
7809 case 2: /* prefetchnt0 */
7810 case 3: /* prefetchnt0 */
7811 if (mod == 3)
7812 goto illegal_op;
7813 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7814 /* nothing more to do */
7815 break;
7816 default: /* nop (multi byte) */
7817 gen_nop_modrm(s, modrm);
7818 break;
7819 }
7820 break;
7821 case 0x119 ... 0x11f: /* nop (multi byte) */
7822 modrm = ldub_code(s->pc++);
7823 gen_nop_modrm(s, modrm);
7824 break;
7825 case 0x120: /* mov reg, crN */
7826 case 0x122: /* mov crN, reg */
7827 if (s->cpl != 0) {
7828 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7829 } else {
7830 modrm = ldub_code(s->pc++);
7831#ifndef VBOX /* mod bits are always understood to be 11 (0xc0) regardless of actual content; see AMD manuals */
7832 if ((modrm & 0xc0) != 0xc0)
7833 goto illegal_op;
7834#endif
7835 rm = (modrm & 7) | REX_B(s);
7836 reg = ((modrm >> 3) & 7) | rex_r;
7837 if (CODE64(s))
7838 ot = OT_QUAD;
7839 else
7840 ot = OT_LONG;
7841 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7842 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7843 reg = 8;
7844 }
7845 switch(reg) {
7846 case 0:
7847 case 2:
7848 case 3:
7849 case 4:
7850 case 8:
7851 if (s->cc_op != CC_OP_DYNAMIC)
7852 gen_op_set_cc_op(s->cc_op);
7853 gen_jmp_im(pc_start - s->cs_base);
7854 if (b & 2) {
7855 gen_op_mov_TN_reg(ot, 0, rm);
7856 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7857 gen_jmp_im(s->pc - s->cs_base);
7858 gen_eob(s);
7859 } else {
7860 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7861 gen_op_mov_reg_T0(ot, rm);
7862 }
7863 break;
7864 default:
7865 goto illegal_op;
7866 }
7867 }
7868 break;
7869 case 0x121: /* mov reg, drN */
7870 case 0x123: /* mov drN, reg */
7871 if (s->cpl != 0) {
7872 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7873 } else {
7874 modrm = ldub_code(s->pc++);
7875#ifndef VBOX /* mod bits are always understood to be 11 (0xc0) regardless of actual content; see AMD manuals */
7876 if ((modrm & 0xc0) != 0xc0)
7877 goto illegal_op;
7878#endif
7879 rm = (modrm & 7) | REX_B(s);
7880 reg = ((modrm >> 3) & 7) | rex_r;
7881 if (CODE64(s))
7882 ot = OT_QUAD;
7883 else
7884 ot = OT_LONG;
7885 /* XXX: do it dynamically with CR4.DE bit */
7886 if (reg == 4 || reg == 5 || reg >= 8)
7887 goto illegal_op;
7888 if (b & 2) {
7889 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7890 gen_op_mov_TN_reg(ot, 0, rm);
7891 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7892 gen_jmp_im(s->pc - s->cs_base);
7893 gen_eob(s);
7894 } else {
7895 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7896 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7897 gen_op_mov_reg_T0(ot, rm);
7898 }
7899 }
7900 break;
7901 case 0x106: /* clts */
7902 if (s->cpl != 0) {
7903 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7904 } else {
7905 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7906 gen_helper_clts();
7907 /* abort block because static cpu state changed */
7908 gen_jmp_im(s->pc - s->cs_base);
7909 gen_eob(s);
7910 }
7911 break;
7912 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7913 case 0x1c3: /* MOVNTI reg, mem */
7914 if (!(s->cpuid_features & CPUID_SSE2))
7915 goto illegal_op;
7916 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7917 modrm = ldub_code(s->pc++);
7918 mod = (modrm >> 6) & 3;
7919 if (mod == 3)
7920 goto illegal_op;
7921 reg = ((modrm >> 3) & 7) | rex_r;
7922 /* generate a generic store */
7923 gen_ldst_modrm(s, modrm, ot, reg, 1);
7924 break;
7925 case 0x1ae:
7926 modrm = ldub_code(s->pc++);
7927 mod = (modrm >> 6) & 3;
7928 op = (modrm >> 3) & 7;
7929 switch(op) {
7930 case 0: /* fxsave */
7931 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7932 (s->prefix & PREFIX_LOCK))
7933 goto illegal_op;
7934 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7935 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7936 break;
7937 }
7938 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7939 if (s->cc_op != CC_OP_DYNAMIC)
7940 gen_op_set_cc_op(s->cc_op);
7941 gen_jmp_im(pc_start - s->cs_base);
7942 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7943 break;
7944 case 1: /* fxrstor */
7945 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7946 (s->prefix & PREFIX_LOCK))
7947 goto illegal_op;
7948 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7949 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7950 break;
7951 }
7952 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7953 if (s->cc_op != CC_OP_DYNAMIC)
7954 gen_op_set_cc_op(s->cc_op);
7955 gen_jmp_im(pc_start - s->cs_base);
7956 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7957 break;
7958 case 2: /* ldmxcsr */
7959 case 3: /* stmxcsr */
7960 if (s->flags & HF_TS_MASK) {
7961 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7962 break;
7963 }
7964 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7965 mod == 3)
7966 goto illegal_op;
7967 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7968 if (op == 2) {
7969 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7970 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7971 } else {
7972 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7973 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7974 }
7975 break;
7976 case 5: /* lfence */
7977 case 6: /* mfence */
7978 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
7979 goto illegal_op;
7980 break;
7981 case 7: /* sfence / clflush */
7982 if ((modrm & 0xc7) == 0xc0) {
7983 /* sfence */
7984 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7985 if (!(s->cpuid_features & CPUID_SSE))
7986 goto illegal_op;
7987 } else {
7988 /* clflush */
7989 if (!(s->cpuid_features & CPUID_CLFLUSH))
7990 goto illegal_op;
7991 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7992 }
7993 break;
7994 default:
7995 goto illegal_op;
7996 }
7997 break;
7998 case 0x10d: /* 3DNow! prefetch(w) */
7999 modrm = ldub_code(s->pc++);
8000 mod = (modrm >> 6) & 3;
8001 if (mod == 3)
8002 goto illegal_op;
8003 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
8004 /* ignore for now */
8005 break;
8006 case 0x1aa: /* rsm */
8007 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
8008 if (!(s->flags & HF_SMM_MASK))
8009 goto illegal_op;
8010 gen_update_cc_op(s);
8011 gen_jmp_im(s->pc - s->cs_base);
8012 gen_helper_rsm();
8013 gen_eob(s);
8014 break;
8015 case 0x1b8: /* SSE4.2 popcnt */
8016 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8017 PREFIX_REPZ)
8018 goto illegal_op;
8019 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8020 goto illegal_op;
8021
8022 modrm = ldub_code(s->pc++);
8023 reg = ((modrm >> 3) & 7);
8024
8025 if (s->prefix & PREFIX_DATA)
8026 ot = OT_WORD;
8027 else if (s->dflag != 2)
8028 ot = OT_LONG;
8029 else
8030 ot = OT_QUAD;
8031
8032 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
8033 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
8034 gen_op_mov_reg_T0(ot, reg);
8035
8036 s->cc_op = CC_OP_EFLAGS;
8037 break;
8038 case 0x10e ... 0x10f:
8039 /* 3DNow! instructions, ignore prefixes */
8040 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
8041 case 0x110 ... 0x117:
8042 case 0x128 ... 0x12f:
8043 case 0x138 ... 0x13a:
8044 case 0x150 ... 0x179:
8045 case 0x17c ... 0x17f:
8046 case 0x1c2:
8047 case 0x1c4 ... 0x1c6:
8048 case 0x1d0 ... 0x1fe:
8049 gen_sse(s, b, pc_start, rex_r);
8050 break;
8051 default:
8052 goto illegal_op;
8053 }
8054 /* lock generation */
8055 if (s->prefix & PREFIX_LOCK)
8056 gen_helper_unlock();
8057 return s->pc;
8058 illegal_op:
8059 if (s->prefix & PREFIX_LOCK)
8060 gen_helper_unlock();
8061 /* XXX: ensure that no lock was generated */
8062 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8063 return s->pc;
8064}
8065
8066void optimize_flags_init(void)
8067{
8068#if TCG_TARGET_REG_BITS == 32
8069 assert(sizeof(CCTable) == (1 << 3));
8070#else
8071 assert(sizeof(CCTable) == (1 << 4));
8072#endif
8073 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8074 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
8075 offsetof(CPUState, cc_op), "cc_op");
8076 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
8077 "cc_src");
8078 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
8079 "cc_dst");
8080 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
8081 "cc_tmp");
8082
8083#ifdef TARGET_X86_64
8084 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
8085 offsetof(CPUState, regs[R_EAX]), "rax");
8086 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
8087 offsetof(CPUState, regs[R_ECX]), "rcx");
8088 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
8089 offsetof(CPUState, regs[R_EDX]), "rdx");
8090 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
8091 offsetof(CPUState, regs[R_EBX]), "rbx");
8092 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
8093 offsetof(CPUState, regs[R_ESP]), "rsp");
8094 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
8095 offsetof(CPUState, regs[R_EBP]), "rbp");
8096 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
8097 offsetof(CPUState, regs[R_ESI]), "rsi");
8098 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
8099 offsetof(CPUState, regs[R_EDI]), "rdi");
8100 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
8101 offsetof(CPUState, regs[8]), "r8");
8102 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
8103 offsetof(CPUState, regs[9]), "r9");
8104 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
8105 offsetof(CPUState, regs[10]), "r10");
8106 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
8107 offsetof(CPUState, regs[11]), "r11");
8108 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
8109 offsetof(CPUState, regs[12]), "r12");
8110 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
8111 offsetof(CPUState, regs[13]), "r13");
8112 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
8113 offsetof(CPUState, regs[14]), "r14");
8114 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
8115 offsetof(CPUState, regs[15]), "r15");
8116#else
8117 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
8118 offsetof(CPUState, regs[R_EAX]), "eax");
8119 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
8120 offsetof(CPUState, regs[R_ECX]), "ecx");
8121 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
8122 offsetof(CPUState, regs[R_EDX]), "edx");
8123 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
8124 offsetof(CPUState, regs[R_EBX]), "ebx");
8125 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
8126 offsetof(CPUState, regs[R_ESP]), "esp");
8127 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
8128 offsetof(CPUState, regs[R_EBP]), "ebp");
8129 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
8130 offsetof(CPUState, regs[R_ESI]), "esi");
8131 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
8132 offsetof(CPUState, regs[R_EDI]), "edi");
8133#endif
8134
8135 /* register helpers */
8136#define GEN_HELPER 2
8137#include "helper.h"
8138}
8139
8140/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8141 basic block 'tb'. If search_pc is TRUE, also generate PC
8142 information for each intermediate instruction. */
8143static inline void gen_intermediate_code_internal(CPUState *env,
8144 TranslationBlock *tb,
8145 int search_pc)
8146{
8147 DisasContext dc1, *dc = &dc1;
8148 target_ulong pc_ptr;
8149 uint16_t *gen_opc_end;
8150 CPUBreakpoint *bp;
8151 int j, lj;
8152 uint64_t flags;
8153 target_ulong pc_start;
8154 target_ulong cs_base;
8155 int num_insns;
8156 int max_insns;
8157#ifdef VBOX
8158 int const singlestep = env->state & CPU_EMULATE_SINGLE_STEP;
8159#endif
8160
8161 /* generate intermediate code */
8162 pc_start = tb->pc;
8163 cs_base = tb->cs_base;
8164 flags = tb->flags;
8165
8166 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8167 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8168 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8169 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8170 dc->f_st = 0;
8171 dc->vm86 = (flags >> VM_SHIFT) & 1;
8172#ifdef VBOX
8173 dc->vme = !!(env->cr[4] & CR4_VME_MASK);
8174 dc->pvi = !!(env->cr[4] & CR4_PVI_MASK);
8175# ifdef VBOX_WITH_CALL_RECORD
8176 if ( !(env->state & CPU_RAW_RING0)
8177 && (env->cr[0] & CR0_PG_MASK)
8178 && !(env->eflags & X86_EFL_IF)
8179 && dc->code32)
8180 dc->record_call = 1;
8181 else
8182 dc->record_call = 0;
8183# endif
8184#endif /* VBOX */
8185 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8186 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8187 dc->tf = (flags >> TF_SHIFT) & 1;
8188 dc->singlestep_enabled = env->singlestep_enabled;
8189 dc->cc_op = CC_OP_DYNAMIC;
8190 dc->cs_base = cs_base;
8191 dc->tb = tb;
8192 dc->popl_esp_hack = 0;
8193 /* select memory access functions */
8194 dc->mem_index = 0;
8195 if (flags & HF_SOFTMMU_MASK) {
8196 if (dc->cpl == 3)
8197 dc->mem_index = 2 * 4;
8198 else
8199 dc->mem_index = 1 * 4;
8200 }
8201 dc->cpuid_features = env->cpuid_features;
8202 dc->cpuid_ext_features = env->cpuid_ext_features;
8203 dc->cpuid_ext2_features = env->cpuid_ext2_features;
8204 dc->cpuid_ext3_features = env->cpuid_ext3_features;
8205#ifdef TARGET_X86_64
8206 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8207 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8208#endif
8209 dc->flags = flags;
8210 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
8211 (flags & HF_INHIBIT_IRQ_MASK)
8212#ifndef CONFIG_SOFTMMU
8213 || (flags & HF_SOFTMMU_MASK)
8214#endif
8215 );
8216#if 0
8217 /* check addseg logic */
8218 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8219 printf("ERROR addseg\n");
8220#endif
8221
8222 cpu_T[0] = tcg_temp_new();
8223 cpu_T[1] = tcg_temp_new();
8224 cpu_A0 = tcg_temp_new();
8225 cpu_T3 = tcg_temp_new();
8226
8227 cpu_tmp0 = tcg_temp_new();
8228 cpu_tmp1_i64 = tcg_temp_new_i64();
8229 cpu_tmp2_i32 = tcg_temp_new_i32();
8230 cpu_tmp3_i32 = tcg_temp_new_i32();
8231 cpu_tmp4 = tcg_temp_new();
8232 cpu_tmp5 = tcg_temp_new();
8233 cpu_ptr0 = tcg_temp_new_ptr();
8234 cpu_ptr1 = tcg_temp_new_ptr();
8235
8236 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8237
8238 dc->is_jmp = DISAS_NEXT;
8239 pc_ptr = pc_start;
8240 lj = -1;
8241 num_insns = 0;
8242 max_insns = tb->cflags & CF_COUNT_MASK;
8243 if (max_insns == 0)
8244 max_insns = CF_COUNT_MASK;
8245
8246 gen_icount_start();
8247 for(;;) {
8248 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8249 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
8250 if (bp->pc == pc_ptr &&
8251 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
8252 gen_debug(dc, pc_ptr - dc->cs_base);
8253 break;
8254 }
8255 }
8256 }
8257 if (search_pc) {
8258 j = gen_opc_ptr - gen_opc_buf;
8259 if (lj < j) {
8260 lj++;
8261 while (lj < j)
8262 gen_opc_instr_start[lj++] = 0;
8263 }
8264 gen_opc_pc[lj] = pc_ptr;
8265 gen_opc_cc_op[lj] = dc->cc_op;
8266 gen_opc_instr_start[lj] = 1;
8267 gen_opc_icount[lj] = num_insns;
8268 }
8269 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8270 gen_io_start();
8271
8272 pc_ptr = disas_insn(dc, pc_ptr);
8273 num_insns++;
8274 /* stop translation if indicated */
8275 if (dc->is_jmp)
8276 break;
8277#ifdef VBOX
8278# ifdef DEBUG
8279/*
8280 if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS)
8281 {
8282 //should never happen as the jump to the patch code terminates the translation block
8283 dprintf(("QEmu is about to execute instructions in our patch block at %08X!!\n", pc_ptr));
8284 }
8285*/
8286# endif /* DEBUG */
8287 if (env->state & CPU_EMULATE_SINGLE_INSTR)
8288 {
8289 env->state &= ~CPU_EMULATE_SINGLE_INSTR;
8290 gen_jmp_im(pc_ptr - dc->cs_base);
8291 gen_eob(dc);
8292 break;
8293 }
8294#endif /* VBOX */
8295
8296 /* if single step mode, we generate only one instruction and
8297 generate an exception */
8298 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8299 the flag and abort the translation to give the irqs a
8300 change to be happen */
8301 if (dc->tf || dc->singlestep_enabled ||
8302 (flags & HF_INHIBIT_IRQ_MASK)) {
8303 gen_jmp_im(pc_ptr - dc->cs_base);
8304 gen_eob(dc);
8305 break;
8306 }
8307 /* if too long translation, stop generation too */
8308 if (gen_opc_ptr >= gen_opc_end ||
8309 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8310 num_insns >= max_insns) {
8311 gen_jmp_im(pc_ptr - dc->cs_base);
8312 gen_eob(dc);
8313 break;
8314 }
8315 if (singlestep) {
8316 gen_jmp_im(pc_ptr - dc->cs_base);
8317 gen_eob(dc);
8318 break;
8319 }
8320 }
8321 if (tb->cflags & CF_LAST_IO)
8322 gen_io_end();
8323 gen_icount_end(tb, num_insns);
8324 *gen_opc_ptr = INDEX_op_end;
8325 /* we don't forget to fill the last values */
8326 if (search_pc) {
8327 j = gen_opc_ptr - gen_opc_buf;
8328 lj++;
8329 while (lj <= j)
8330 gen_opc_instr_start[lj++] = 0;
8331 }
8332
8333#ifdef DEBUG_DISAS
8334 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8335 int disas_flags;
8336 qemu_log("----------------\n");
8337 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8338#ifdef TARGET_X86_64
8339 if (dc->code64)
8340 disas_flags = 2;
8341 else
8342#endif
8343 disas_flags = !dc->code32;
8344 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
8345 qemu_log("\n");
8346 }
8347#endif
8348
8349 if (!search_pc) {
8350 tb->size = pc_ptr - pc_start;
8351 tb->icount = num_insns;
8352 }
8353}
8354
8355void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8356{
8357 gen_intermediate_code_internal(env, tb, 0);
8358}
8359
8360void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8361{
8362 gen_intermediate_code_internal(env, tb, 1);
8363}
8364
8365void gen_pc_load(CPUState *env, TranslationBlock *tb,
8366 uintptr_t searched_pc, int pc_pos, void *puc)
8367{
8368 int cc_op;
8369#ifdef DEBUG_DISAS
8370 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8371 int i;
8372 qemu_log("RESTORE:\n");
8373 for(i = 0;i <= pc_pos; i++) {
8374 if (gen_opc_instr_start[i]) {
8375 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
8376 }
8377 }
8378 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8379 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
8380 (uint32_t)tb->cs_base);
8381 }
8382#endif
8383 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
8384 cc_op = gen_opc_cc_op[pc_pos];
8385 if (cc_op != CC_OP_DYNAMIC)
8386 env->cc_op = cc_op;
8387}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette