VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 15945

Last change on this file since 15945 was 15903, checked in by vboxsync, 16 years ago

REM: fixed bug in VME POPF Michal found

File size: 194.7 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386 }
387}
388
389#define SWITCH_TSS_JMP 0
390#define SWITCH_TSS_IRET 1
391#define SWITCH_TSS_CALL 2
392
393/* XXX: restore CPU state in registers (PowerPC case) */
394static void switch_tss(int tss_selector,
395 uint32_t e1, uint32_t e2, int source,
396 uint32_t next_eip)
397{
398 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
399 target_ulong tss_base;
400 uint32_t new_regs[8], new_segs[6];
401 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
402 uint32_t old_eflags, eflags_mask;
403 SegmentCache *dt;
404#ifndef VBOX
405 int index;
406#else
407 unsigned int index;
408#endif
409 target_ulong ptr;
410
411 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
412#ifdef DEBUG_PCALL
413 if (loglevel & CPU_LOG_PCALL)
414 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
415#endif
416
417#if defined(VBOX) && defined(DEBUG)
418 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
419#endif
420
421 /* if task gate, we read the TSS segment and we load it */
422 if (type == 5) {
423 if (!(e2 & DESC_P_MASK))
424 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
425 tss_selector = e1 >> 16;
426 if (tss_selector & 4)
427 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
428 if (load_segment(&e1, &e2, tss_selector) != 0)
429 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
430 if (e2 & DESC_S_MASK)
431 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
432 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
433 if ((type & 7) != 1)
434 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
435 }
436
437 if (!(e2 & DESC_P_MASK))
438 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
439
440 if (type & 8)
441 tss_limit_max = 103;
442 else
443 tss_limit_max = 43;
444 tss_limit = get_seg_limit(e1, e2);
445 tss_base = get_seg_base(e1, e2);
446 if ((tss_selector & 4) != 0 ||
447 tss_limit < tss_limit_max)
448 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
449 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
450 if (old_type & 8)
451 old_tss_limit_max = 103;
452 else
453 old_tss_limit_max = 43;
454
455 /* read all the registers from the new TSS */
456 if (type & 8) {
457 /* 32 bit */
458 new_cr3 = ldl_kernel(tss_base + 0x1c);
459 new_eip = ldl_kernel(tss_base + 0x20);
460 new_eflags = ldl_kernel(tss_base + 0x24);
461 for(i = 0; i < 8; i++)
462 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
463 for(i = 0; i < 6; i++)
464 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
465 new_ldt = lduw_kernel(tss_base + 0x60);
466 new_trap = ldl_kernel(tss_base + 0x64);
467 } else {
468 /* 16 bit */
469 new_cr3 = 0;
470 new_eip = lduw_kernel(tss_base + 0x0e);
471 new_eflags = lduw_kernel(tss_base + 0x10);
472 for(i = 0; i < 8; i++)
473 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
474 for(i = 0; i < 4; i++)
475 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
476 new_ldt = lduw_kernel(tss_base + 0x2a);
477 new_segs[R_FS] = 0;
478 new_segs[R_GS] = 0;
479 new_trap = 0;
480 }
481
482 /* NOTE: we must avoid memory exceptions during the task switch,
483 so we make dummy accesses before */
484 /* XXX: it can still fail in some cases, so a bigger hack is
485 necessary to valid the TLB after having done the accesses */
486
487 v1 = ldub_kernel(env->tr.base);
488 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
489 stb_kernel(env->tr.base, v1);
490 stb_kernel(env->tr.base + old_tss_limit_max, v2);
491
492 /* clear busy bit (it is restartable) */
493 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
494 target_ulong ptr;
495 uint32_t e2;
496 ptr = env->gdt.base + (env->tr.selector & ~7);
497 e2 = ldl_kernel(ptr + 4);
498 e2 &= ~DESC_TSS_BUSY_MASK;
499 stl_kernel(ptr + 4, e2);
500 }
501 old_eflags = compute_eflags();
502 if (source == SWITCH_TSS_IRET)
503 old_eflags &= ~NT_MASK;
504
505 /* save the current state in the old TSS */
506 if (type & 8) {
507 /* 32 bit */
508 stl_kernel(env->tr.base + 0x20, next_eip);
509 stl_kernel(env->tr.base + 0x24, old_eflags);
510 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
511 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
512 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
513 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
514 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
515 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
516 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
517 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
518 for(i = 0; i < 6; i++)
519 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
520#if defined(VBOX) && defined(DEBUG)
521 printf("TSS 32 bits switch\n");
522 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
523#endif
524 } else {
525 /* 16 bit */
526 stw_kernel(env->tr.base + 0x0e, next_eip);
527 stw_kernel(env->tr.base + 0x10, old_eflags);
528 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
529 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
530 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
531 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
532 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
533 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
534 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
535 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
536 for(i = 0; i < 4; i++)
537 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
538 }
539
540 /* now if an exception occurs, it will occurs in the next task
541 context */
542
543 if (source == SWITCH_TSS_CALL) {
544 stw_kernel(tss_base, env->tr.selector);
545 new_eflags |= NT_MASK;
546 }
547
548 /* set busy bit */
549 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
550 target_ulong ptr;
551 uint32_t e2;
552 ptr = env->gdt.base + (tss_selector & ~7);
553 e2 = ldl_kernel(ptr + 4);
554 e2 |= DESC_TSS_BUSY_MASK;
555 stl_kernel(ptr + 4, e2);
556 }
557
558 /* set the new CPU state */
559 /* from this point, any exception which occurs can give problems */
560 env->cr[0] |= CR0_TS_MASK;
561 env->hflags |= HF_TS_MASK;
562 env->tr.selector = tss_selector;
563 env->tr.base = tss_base;
564 env->tr.limit = tss_limit;
565 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
566
567 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
568 cpu_x86_update_cr3(env, new_cr3);
569 }
570
571 /* load all registers without an exception, then reload them with
572 possible exception */
573 env->eip = new_eip;
574 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
575 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
576 if (!(type & 8))
577 eflags_mask &= 0xffff;
578 load_eflags(new_eflags, eflags_mask);
579 /* XXX: what to do in 16 bit case ? */
580 EAX = new_regs[0];
581 ECX = new_regs[1];
582 EDX = new_regs[2];
583 EBX = new_regs[3];
584 ESP = new_regs[4];
585 EBP = new_regs[5];
586 ESI = new_regs[6];
587 EDI = new_regs[7];
588 if (new_eflags & VM_MASK) {
589 for(i = 0; i < 6; i++)
590 load_seg_vm(i, new_segs[i]);
591 /* in vm86, CPL is always 3 */
592 cpu_x86_set_cpl(env, 3);
593 } else {
594 /* CPL is set the RPL of CS */
595 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
596 /* first just selectors as the rest may trigger exceptions */
597 for(i = 0; i < 6; i++)
598 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
599 }
600
601 env->ldt.selector = new_ldt & ~4;
602 env->ldt.base = 0;
603 env->ldt.limit = 0;
604 env->ldt.flags = 0;
605
606 /* load the LDT */
607 if (new_ldt & 4)
608 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
609
610 if ((new_ldt & 0xfffc) != 0) {
611 dt = &env->gdt;
612 index = new_ldt & ~7;
613 if ((index + 7) > dt->limit)
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615 ptr = dt->base + index;
616 e1 = ldl_kernel(ptr);
617 e2 = ldl_kernel(ptr + 4);
618 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
619 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
620 if (!(e2 & DESC_P_MASK))
621 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
622 load_seg_cache_raw_dt(&env->ldt, e1, e2);
623 }
624
625 /* load the segments */
626 if (!(new_eflags & VM_MASK)) {
627 tss_load_seg(R_CS, new_segs[R_CS]);
628 tss_load_seg(R_SS, new_segs[R_SS]);
629 tss_load_seg(R_ES, new_segs[R_ES]);
630 tss_load_seg(R_DS, new_segs[R_DS]);
631 tss_load_seg(R_FS, new_segs[R_FS]);
632 tss_load_seg(R_GS, new_segs[R_GS]);
633 }
634
635 /* check that EIP is in the CS segment limits */
636 if (new_eip > env->segs[R_CS].limit) {
637 /* XXX: different exception if CALL ? */
638 raise_exception_err(EXCP0D_GPF, 0);
639 }
640}
641
642/* check if Port I/O is allowed in TSS */
643#ifndef VBOX
644static inline void check_io(int addr, int size)
645{
646 int io_offset, val, mask;
647
648#else /* VBOX */
649DECLINLINE(void) check_io(int addr, int size)
650{
651 int val, mask;
652 unsigned int io_offset;
653#endif /* VBOX */
654 /* TSS must be a valid 32 bit one */
655 if (!(env->tr.flags & DESC_P_MASK) ||
656 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
657 env->tr.limit < 103)
658 goto fail;
659 io_offset = lduw_kernel(env->tr.base + 0x66);
660 io_offset += (addr >> 3);
661 /* Note: the check needs two bytes */
662 if ((io_offset + 1) > env->tr.limit)
663 goto fail;
664 val = lduw_kernel(env->tr.base + io_offset);
665 val >>= (addr & 7);
666 mask = (1 << size) - 1;
667 /* all bits must be zero to allow the I/O */
668 if ((val & mask) != 0) {
669 fail:
670 raise_exception_err(EXCP0D_GPF, 0);
671 }
672}
673
674#ifdef VBOX
675/* Keep in sync with gen_check_external_event() */
676void helper_check_external_event()
677{
678 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
679 | CPU_INTERRUPT_EXTERNAL_TIMER
680 | CPU_INTERRUPT_EXTERNAL_DMA))
681 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
682 && (env->eflags & IF_MASK)
683 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
684 {
685 helper_external_event();
686 }
687
688}
689
690void helper_sync_seg(uint32_t reg)
691{
692 assert(env->segs[reg].newselector != 0);
693 sync_seg(env, reg, env->segs[reg].newselector);
694}
695#endif
696
697void helper_check_iob(uint32_t t0)
698{
699 check_io(t0, 1);
700}
701
702void helper_check_iow(uint32_t t0)
703{
704 check_io(t0, 2);
705}
706
707void helper_check_iol(uint32_t t0)
708{
709 check_io(t0, 4);
710}
711
712void helper_outb(uint32_t port, uint32_t data)
713{
714 cpu_outb(env, port, data & 0xff);
715}
716
717target_ulong helper_inb(uint32_t port)
718{
719 return cpu_inb(env, port);
720}
721
722void helper_outw(uint32_t port, uint32_t data)
723{
724 cpu_outw(env, port, data & 0xffff);
725}
726
727target_ulong helper_inw(uint32_t port)
728{
729 return cpu_inw(env, port);
730}
731
732void helper_outl(uint32_t port, uint32_t data)
733{
734 cpu_outl(env, port, data);
735}
736
737target_ulong helper_inl(uint32_t port)
738{
739 return cpu_inl(env, port);
740}
741
742#ifndef VBOX
743static inline unsigned int get_sp_mask(unsigned int e2)
744#else /* VBOX */
745DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
746#endif /* VBOX */
747{
748 if (e2 & DESC_B_MASK)
749 return 0xffffffff;
750 else
751 return 0xffff;
752}
753
754#ifdef TARGET_X86_64
755#define SET_ESP(val, sp_mask)\
756do {\
757 if ((sp_mask) == 0xffff)\
758 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
759 else if ((sp_mask) == 0xffffffffLL)\
760 ESP = (uint32_t)(val);\
761 else\
762 ESP = (val);\
763} while (0)
764#else
765#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
766#endif
767
768/* in 64-bit machines, this can overflow. So this segment addition macro
769 * can be used to trim the value to 32-bit whenever needed */
770#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
771
772/* XXX: add a is_user flag to have proper security support */
773#define PUSHW(ssp, sp, sp_mask, val)\
774{\
775 sp -= 2;\
776 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
777}
778
779#define PUSHL(ssp, sp, sp_mask, val)\
780{\
781 sp -= 4;\
782 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
783}
784
785#define POPW(ssp, sp, sp_mask, val)\
786{\
787 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
788 sp += 2;\
789}
790
791#define POPL(ssp, sp, sp_mask, val)\
792{\
793 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
794 sp += 4;\
795}
796
797/* protected mode interrupt */
798static void do_interrupt_protected(int intno, int is_int, int error_code,
799 unsigned int next_eip, int is_hw)
800{
801 SegmentCache *dt;
802 target_ulong ptr, ssp;
803 int type, dpl, selector, ss_dpl, cpl;
804 int has_error_code, new_stack, shift;
805 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
806 uint32_t old_eip, sp_mask;
807
808#ifdef VBOX
809 ss = ss_e1 = ss_e2 = 0;
810# ifdef VBOX_WITH_VMI
811 if ( intno == 6
812 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
813 {
814 env->exception_index = EXCP_PARAV_CALL;
815 cpu_loop_exit();
816 }
817# endif
818 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
819 cpu_loop_exit();
820#endif
821
822 has_error_code = 0;
823 if (!is_int && !is_hw) {
824 switch(intno) {
825 case 8:
826 case 10:
827 case 11:
828 case 12:
829 case 13:
830 case 14:
831 case 17:
832 has_error_code = 1;
833 break;
834 }
835 }
836 if (is_int)
837 old_eip = next_eip;
838 else
839 old_eip = env->eip;
840
841 dt = &env->idt;
842#ifndef VBOX
843 if (intno * 8 + 7 > dt->limit)
844#else
845 if ((unsigned)intno * 8 + 7 > dt->limit)
846#endif
847 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
848 ptr = dt->base + intno * 8;
849 e1 = ldl_kernel(ptr);
850 e2 = ldl_kernel(ptr + 4);
851 /* check gate type */
852 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
853 switch(type) {
854 case 5: /* task gate */
855 /* must do that check here to return the correct error code */
856 if (!(e2 & DESC_P_MASK))
857 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
858 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
859 if (has_error_code) {
860 int type;
861 uint32_t mask;
862 /* push the error code */
863 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
864 shift = type >> 3;
865 if (env->segs[R_SS].flags & DESC_B_MASK)
866 mask = 0xffffffff;
867 else
868 mask = 0xffff;
869 esp = (ESP - (2 << shift)) & mask;
870 ssp = env->segs[R_SS].base + esp;
871 if (shift)
872 stl_kernel(ssp, error_code);
873 else
874 stw_kernel(ssp, error_code);
875 SET_ESP(esp, mask);
876 }
877 return;
878 case 6: /* 286 interrupt gate */
879 case 7: /* 286 trap gate */
880 case 14: /* 386 interrupt gate */
881 case 15: /* 386 trap gate */
882 break;
883 default:
884 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
885 break;
886 }
887 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
888 cpl = env->hflags & HF_CPL_MASK;
889 /* check privilege if software int */
890 if (is_int && dpl < cpl)
891 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
892 /* check valid bit */
893 if (!(e2 & DESC_P_MASK))
894 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
895 selector = e1 >> 16;
896 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
897 if ((selector & 0xfffc) == 0)
898 raise_exception_err(EXCP0D_GPF, 0);
899
900 if (load_segment(&e1, &e2, selector) != 0)
901 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
902 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
903 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
904 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
905 if (dpl > cpl)
906 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
907 if (!(e2 & DESC_P_MASK))
908 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
909 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
910 /* to inner privilege */
911 get_ss_esp_from_tss(&ss, &esp, dpl);
912 if ((ss & 0xfffc) == 0)
913 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
914 if ((ss & 3) != dpl)
915 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
916 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
917 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
918 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
919 if (ss_dpl != dpl)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 if (!(ss_e2 & DESC_S_MASK) ||
922 (ss_e2 & DESC_CS_MASK) ||
923 !(ss_e2 & DESC_W_MASK))
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 if (!(ss_e2 & DESC_P_MASK))
926#ifdef VBOX /* See page 3-477 of 253666.pdf */
927 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
928#else
929 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
930#endif
931 new_stack = 1;
932 sp_mask = get_sp_mask(ss_e2);
933 ssp = get_seg_base(ss_e1, ss_e2);
934#if defined(VBOX) && defined(DEBUG)
935 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
936#endif
937 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
938 /* to same privilege */
939 if (env->eflags & VM_MASK)
940 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941 new_stack = 0;
942 sp_mask = get_sp_mask(env->segs[R_SS].flags);
943 ssp = env->segs[R_SS].base;
944 esp = ESP;
945 dpl = cpl;
946 } else {
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0; /* avoid warning */
949 sp_mask = 0; /* avoid warning */
950 ssp = 0; /* avoid warning */
951 esp = 0; /* avoid warning */
952 }
953
954 shift = type >> 3;
955
956#if 0
957 /* XXX: check that enough room is available */
958 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
959 if (env->eflags & VM_MASK)
960 push_size += 8;
961 push_size <<= shift;
962#endif
963 if (shift == 1) {
964 if (new_stack) {
965 if (env->eflags & VM_MASK) {
966 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
967 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
968 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
969 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
970 }
971 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
972 PUSHL(ssp, esp, sp_mask, ESP);
973 }
974 PUSHL(ssp, esp, sp_mask, compute_eflags());
975 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
976 PUSHL(ssp, esp, sp_mask, old_eip);
977 if (has_error_code) {
978 PUSHL(ssp, esp, sp_mask, error_code);
979 }
980 } else {
981 if (new_stack) {
982 if (env->eflags & VM_MASK) {
983 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
984 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
985 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
986 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
987 }
988 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
989 PUSHW(ssp, esp, sp_mask, ESP);
990 }
991 PUSHW(ssp, esp, sp_mask, compute_eflags());
992 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
993 PUSHW(ssp, esp, sp_mask, old_eip);
994 if (has_error_code) {
995 PUSHW(ssp, esp, sp_mask, error_code);
996 }
997 }
998
999 if (new_stack) {
1000 if (env->eflags & VM_MASK) {
1001 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1002 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1003 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1004 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1005 }
1006 ss = (ss & ~3) | dpl;
1007 cpu_x86_load_seg_cache(env, R_SS, ss,
1008 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1009 }
1010 SET_ESP(esp, sp_mask);
1011
1012 selector = (selector & ~3) | dpl;
1013 cpu_x86_load_seg_cache(env, R_CS, selector,
1014 get_seg_base(e1, e2),
1015 get_seg_limit(e1, e2),
1016 e2);
1017 cpu_x86_set_cpl(env, dpl);
1018 env->eip = offset;
1019
1020 /* interrupt gate clear IF mask */
1021 if ((type & 1) == 0) {
1022 env->eflags &= ~IF_MASK;
1023 }
1024 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1025}
1026#ifdef VBOX
1027
1028/* check if VME interrupt redirection is enabled in TSS */
1029DECLINLINE(bool) is_vme_irq_redirected(int intno)
1030{
1031 unsigned int io_offset, intredir_offset;
1032 unsigned char val, mask;
1033
1034 /* TSS must be a valid 32 bit one */
1035 if (!(env->tr.flags & DESC_P_MASK) ||
1036 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1037 env->tr.limit < 103)
1038 goto fail;
1039 io_offset = lduw_kernel(env->tr.base + 0x66);
1040 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1041 if (io_offset < 0x68 + 0x20)
1042 io_offset = 0x68 + 0x20;
1043 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1044 intredir_offset = io_offset - 0x20;
1045
1046 intredir_offset += (intno >> 3);
1047 if ((intredir_offset) > env->tr.limit)
1048 goto fail;
1049
1050 val = ldub_kernel(env->tr.base + intredir_offset);
1051 mask = 1 << (unsigned char)(intno & 7);
1052
1053 /* bit set means no redirection. */
1054 if ((val & mask) != 0) {
1055 return false;
1056 }
1057 return true;
1058
1059fail:
1060 raise_exception_err(EXCP0D_GPF, 0);
1061 return true;
1062}
1063
1064/* V86 mode software interrupt with CR4.VME=1 */
1065static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1066{
1067 target_ulong ptr, ssp;
1068 int selector;
1069 uint32_t offset, esp;
1070 uint32_t old_cs, old_eflags;
1071 uint32_t iopl;
1072
1073 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1074
1075 if (!is_vme_irq_redirected(intno))
1076 {
1077 if (iopl == 3)
1078 {
1079 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1080 return;
1081 }
1082 else
1083 raise_exception_err(EXCP0D_GPF, 0);
1084 }
1085
1086 /* virtual mode idt is at linear address 0 */
1087 ptr = 0 + intno * 4;
1088 offset = lduw_kernel(ptr);
1089 selector = lduw_kernel(ptr + 2);
1090 esp = ESP;
1091 ssp = env->segs[R_SS].base;
1092 old_cs = env->segs[R_CS].selector;
1093
1094 old_eflags = compute_eflags();
1095 if (iopl < 3)
1096 {
1097 /* copy VIF into IF and set IOPL to 3 */
1098 if (env->eflags & VIF_MASK)
1099 old_eflags |= IF_MASK;
1100 else
1101 old_eflags &= ~IF_MASK;
1102
1103 old_eflags |= (3 << IOPL_SHIFT);
1104 }
1105
1106 /* XXX: use SS segment size ? */
1107 PUSHW(ssp, esp, 0xffff, old_eflags);
1108 PUSHW(ssp, esp, 0xffff, old_cs);
1109 PUSHW(ssp, esp, 0xffff, next_eip);
1110
1111 /* update processor state */
1112 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1113 env->eip = offset;
1114 env->segs[R_CS].selector = selector;
1115 env->segs[R_CS].base = (selector << 4);
1116 env->eflags &= ~(TF_MASK | RF_MASK);
1117
1118 if (iopl < 3)
1119 env->eflags &= ~VIF_MASK;
1120 else
1121 env->eflags &= ~IF_MASK;
1122}
1123#endif /* VBOX */
1124
1125#ifdef TARGET_X86_64
1126
1127#define PUSHQ(sp, val)\
1128{\
1129 sp -= 8;\
1130 stq_kernel(sp, (val));\
1131}
1132
1133#define POPQ(sp, val)\
1134{\
1135 val = ldq_kernel(sp);\
1136 sp += 8;\
1137}
1138
1139#ifndef VBOX
1140static inline target_ulong get_rsp_from_tss(int level)
1141#else /* VBOX */
1142DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1143#endif /* VBOX */
1144{
1145 int index;
1146
1147#if 0
1148 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1149 env->tr.base, env->tr.limit);
1150#endif
1151
1152 if (!(env->tr.flags & DESC_P_MASK))
1153 cpu_abort(env, "invalid tss");
1154 index = 8 * level + 4;
1155 if ((index + 7) > env->tr.limit)
1156 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1157 return ldq_kernel(env->tr.base + index);
1158}
1159
1160/* 64 bit interrupt */
1161static void do_interrupt64(int intno, int is_int, int error_code,
1162 target_ulong next_eip, int is_hw)
1163{
1164 SegmentCache *dt;
1165 target_ulong ptr;
1166 int type, dpl, selector, cpl, ist;
1167 int has_error_code, new_stack;
1168 uint32_t e1, e2, e3, ss;
1169 target_ulong old_eip, esp, offset;
1170
1171#ifdef VBOX
1172 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1173 cpu_loop_exit();
1174#endif
1175
1176 has_error_code = 0;
1177 if (!is_int && !is_hw) {
1178 switch(intno) {
1179 case 8:
1180 case 10:
1181 case 11:
1182 case 12:
1183 case 13:
1184 case 14:
1185 case 17:
1186 has_error_code = 1;
1187 break;
1188 }
1189 }
1190 if (is_int)
1191 old_eip = next_eip;
1192 else
1193 old_eip = env->eip;
1194
1195 dt = &env->idt;
1196 if (intno * 16 + 15 > dt->limit)
1197 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1198 ptr = dt->base + intno * 16;
1199 e1 = ldl_kernel(ptr);
1200 e2 = ldl_kernel(ptr + 4);
1201 e3 = ldl_kernel(ptr + 8);
1202 /* check gate type */
1203 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1204 switch(type) {
1205 case 14: /* 386 interrupt gate */
1206 case 15: /* 386 trap gate */
1207 break;
1208 default:
1209 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1210 break;
1211 }
1212 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1213 cpl = env->hflags & HF_CPL_MASK;
1214 /* check privilege if software int */
1215 if (is_int && dpl < cpl)
1216 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1217 /* check valid bit */
1218 if (!(e2 & DESC_P_MASK))
1219 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1220 selector = e1 >> 16;
1221 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1222 ist = e2 & 7;
1223 if ((selector & 0xfffc) == 0)
1224 raise_exception_err(EXCP0D_GPF, 0);
1225
1226 if (load_segment(&e1, &e2, selector) != 0)
1227 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1228 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1229 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1230 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1231 if (dpl > cpl)
1232 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1233 if (!(e2 & DESC_P_MASK))
1234 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1235 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1237 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1238 /* to inner privilege */
1239 if (ist != 0)
1240 esp = get_rsp_from_tss(ist + 3);
1241 else
1242 esp = get_rsp_from_tss(dpl);
1243 esp &= ~0xfLL; /* align stack */
1244 ss = 0;
1245 new_stack = 1;
1246 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1247 /* to same privilege */
1248 if (env->eflags & VM_MASK)
1249 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1250 new_stack = 0;
1251 if (ist != 0)
1252 esp = get_rsp_from_tss(ist + 3);
1253 else
1254 esp = ESP;
1255 esp &= ~0xfLL; /* align stack */
1256 dpl = cpl;
1257 } else {
1258 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1259 new_stack = 0; /* avoid warning */
1260 esp = 0; /* avoid warning */
1261 }
1262
1263 PUSHQ(esp, env->segs[R_SS].selector);
1264 PUSHQ(esp, ESP);
1265 PUSHQ(esp, compute_eflags());
1266 PUSHQ(esp, env->segs[R_CS].selector);
1267 PUSHQ(esp, old_eip);
1268 if (has_error_code) {
1269 PUSHQ(esp, error_code);
1270 }
1271
1272 if (new_stack) {
1273 ss = 0 | dpl;
1274 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1275 }
1276 ESP = esp;
1277
1278 selector = (selector & ~3) | dpl;
1279 cpu_x86_load_seg_cache(env, R_CS, selector,
1280 get_seg_base(e1, e2),
1281 get_seg_limit(e1, e2),
1282 e2);
1283 cpu_x86_set_cpl(env, dpl);
1284 env->eip = offset;
1285
1286 /* interrupt gate clear IF mask */
1287 if ((type & 1) == 0) {
1288 env->eflags &= ~IF_MASK;
1289 }
1290 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1291}
1292#endif
1293
1294#if defined(CONFIG_USER_ONLY)
1295void helper_syscall(int next_eip_addend)
1296{
1297 env->exception_index = EXCP_SYSCALL;
1298 env->exception_next_eip = env->eip + next_eip_addend;
1299 cpu_loop_exit();
1300}
1301#else
1302void helper_syscall(int next_eip_addend)
1303{
1304 int selector;
1305
1306 if (!(env->efer & MSR_EFER_SCE)) {
1307 raise_exception_err(EXCP06_ILLOP, 0);
1308 }
1309 selector = (env->star >> 32) & 0xffff;
1310#ifdef TARGET_X86_64
1311 if (env->hflags & HF_LMA_MASK) {
1312 int code64;
1313
1314 ECX = env->eip + next_eip_addend;
1315 env->regs[11] = compute_eflags();
1316
1317 code64 = env->hflags & HF_CS64_MASK;
1318
1319 cpu_x86_set_cpl(env, 0);
1320 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1321 0, 0xffffffff,
1322 DESC_G_MASK | DESC_P_MASK |
1323 DESC_S_MASK |
1324 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1325 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1326 0, 0xffffffff,
1327 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1328 DESC_S_MASK |
1329 DESC_W_MASK | DESC_A_MASK);
1330 env->eflags &= ~env->fmask;
1331 load_eflags(env->eflags, 0);
1332 if (code64)
1333 env->eip = env->lstar;
1334 else
1335 env->eip = env->cstar;
1336 } else
1337#endif
1338 {
1339 ECX = (uint32_t)(env->eip + next_eip_addend);
1340
1341 cpu_x86_set_cpl(env, 0);
1342 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1343 0, 0xffffffff,
1344 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1345 DESC_S_MASK |
1346 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1347 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1348 0, 0xffffffff,
1349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1350 DESC_S_MASK |
1351 DESC_W_MASK | DESC_A_MASK);
1352 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1353 env->eip = (uint32_t)env->star;
1354 }
1355}
1356#endif
1357
1358void helper_sysret(int dflag)
1359{
1360 int cpl, selector;
1361
1362 if (!(env->efer & MSR_EFER_SCE)) {
1363 raise_exception_err(EXCP06_ILLOP, 0);
1364 }
1365 cpl = env->hflags & HF_CPL_MASK;
1366 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1367 raise_exception_err(EXCP0D_GPF, 0);
1368 }
1369 selector = (env->star >> 48) & 0xffff;
1370#ifdef TARGET_X86_64
1371 if (env->hflags & HF_LMA_MASK) {
1372 if (dflag == 2) {
1373 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1374 0, 0xffffffff,
1375 DESC_G_MASK | DESC_P_MASK |
1376 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1377 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1378 DESC_L_MASK);
1379 env->eip = ECX;
1380 } else {
1381 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1382 0, 0xffffffff,
1383 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1384 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1385 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1386 env->eip = (uint32_t)ECX;
1387 }
1388 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1389 0, 0xffffffff,
1390 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1391 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1392 DESC_W_MASK | DESC_A_MASK);
1393 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1394 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1395 cpu_x86_set_cpl(env, 3);
1396 } else
1397#endif
1398 {
1399 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1400 0, 0xffffffff,
1401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1403 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1404 env->eip = (uint32_t)ECX;
1405 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1406 0, 0xffffffff,
1407 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1408 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1409 DESC_W_MASK | DESC_A_MASK);
1410 env->eflags |= IF_MASK;
1411 cpu_x86_set_cpl(env, 3);
1412 }
1413#ifdef USE_KQEMU
1414 if (kqemu_is_ok(env)) {
1415 if (env->hflags & HF_LMA_MASK)
1416 CC_OP = CC_OP_EFLAGS;
1417 env->exception_index = -1;
1418 cpu_loop_exit();
1419 }
1420#endif
1421}
1422
1423#ifdef VBOX
1424/**
1425 * Checks and processes external VMM events.
1426 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1427 */
1428void helper_external_event(void)
1429{
1430#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1431 uintptr_t uESP;
1432 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1433 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1434#endif
1435 /* Keep in sync with flags checked by gen_check_external_event() */
1436 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1437 {
1438 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1439 ~CPU_INTERRUPT_EXTERNAL_HARD);
1440 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1441 }
1442 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1443 {
1444 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1445 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1446 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1447 }
1448 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1449 {
1450 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1451 ~CPU_INTERRUPT_EXTERNAL_DMA);
1452 remR3DmaRun(env);
1453 }
1454 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1455 {
1456 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1457 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1458 remR3TimersRun(env);
1459 }
1460}
1461/* helper for recording call instruction addresses for later scanning */
1462void helper_record_call()
1463{
1464 if ( !(env->state & CPU_RAW_RING0)
1465 && (env->cr[0] & CR0_PG_MASK)
1466 && !(env->eflags & X86_EFL_IF))
1467 remR3RecordCall(env);
1468}
1469#endif /* VBOX */
1470
1471/* real mode interrupt */
1472static void do_interrupt_real(int intno, int is_int, int error_code,
1473 unsigned int next_eip)
1474{
1475 SegmentCache *dt;
1476 target_ulong ptr, ssp;
1477 int selector;
1478 uint32_t offset, esp;
1479 uint32_t old_cs, old_eip;
1480
1481 /* real mode (simpler !) */
1482 dt = &env->idt;
1483#ifndef VBOX
1484 if (intno * 4 + 3 > dt->limit)
1485#else
1486 if ((unsigned)intno * 4 + 3 > dt->limit)
1487#endif
1488 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1489 ptr = dt->base + intno * 4;
1490 offset = lduw_kernel(ptr);
1491 selector = lduw_kernel(ptr + 2);
1492 esp = ESP;
1493 ssp = env->segs[R_SS].base;
1494 if (is_int)
1495 old_eip = next_eip;
1496 else
1497 old_eip = env->eip;
1498 old_cs = env->segs[R_CS].selector;
1499 /* XXX: use SS segment size ? */
1500 PUSHW(ssp, esp, 0xffff, compute_eflags());
1501 PUSHW(ssp, esp, 0xffff, old_cs);
1502 PUSHW(ssp, esp, 0xffff, old_eip);
1503
1504 /* update processor state */
1505 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1506 env->eip = offset;
1507 env->segs[R_CS].selector = selector;
1508 env->segs[R_CS].base = (selector << 4);
1509 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1510}
1511
1512/* fake user mode interrupt */
1513void do_interrupt_user(int intno, int is_int, int error_code,
1514 target_ulong next_eip)
1515{
1516 SegmentCache *dt;
1517 target_ulong ptr;
1518 int dpl, cpl, shift;
1519 uint32_t e2;
1520
1521 dt = &env->idt;
1522 if (env->hflags & HF_LMA_MASK) {
1523 shift = 4;
1524 } else {
1525 shift = 3;
1526 }
1527 ptr = dt->base + (intno << shift);
1528 e2 = ldl_kernel(ptr + 4);
1529
1530 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1531 cpl = env->hflags & HF_CPL_MASK;
1532 /* check privilege if software int */
1533 if (is_int && dpl < cpl)
1534 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1535
1536 /* Since we emulate only user space, we cannot do more than
1537 exiting the emulation with the suitable exception and error
1538 code */
1539 if (is_int)
1540 EIP = next_eip;
1541}
1542
1543/*
1544 * Begin execution of an interruption. is_int is TRUE if coming from
1545 * the int instruction. next_eip is the EIP value AFTER the interrupt
1546 * instruction. It is only relevant if is_int is TRUE.
1547 */
1548void do_interrupt(int intno, int is_int, int error_code,
1549 target_ulong next_eip, int is_hw)
1550{
1551 if (loglevel & CPU_LOG_INT) {
1552 if ((env->cr[0] & CR0_PE_MASK)) {
1553 static int count;
1554 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1555 count, intno, error_code, is_int,
1556 env->hflags & HF_CPL_MASK,
1557 env->segs[R_CS].selector, EIP,
1558 (int)env->segs[R_CS].base + EIP,
1559 env->segs[R_SS].selector, ESP);
1560 if (intno == 0x0e) {
1561 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1562 } else {
1563 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1564 }
1565 fprintf(logfile, "\n");
1566 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1567#if 0
1568 {
1569 int i;
1570 uint8_t *ptr;
1571 fprintf(logfile, " code=");
1572 ptr = env->segs[R_CS].base + env->eip;
1573 for(i = 0; i < 16; i++) {
1574 fprintf(logfile, " %02x", ldub(ptr + i));
1575 }
1576 fprintf(logfile, "\n");
1577 }
1578#endif
1579 count++;
1580 }
1581 }
1582 if (env->cr[0] & CR0_PE_MASK) {
1583#ifdef TARGET_X86_64
1584 if (env->hflags & HF_LMA_MASK) {
1585 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1586 } else
1587#endif
1588 {
1589#ifdef VBOX
1590 /* int xx *, v86 code and VME enabled? */
1591 if ( (env->eflags & VM_MASK)
1592 && (env->cr[4] & CR4_VME_MASK)
1593 && is_int
1594 && !is_hw
1595 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1596 )
1597 do_soft_interrupt_vme(intno, error_code, next_eip);
1598 else
1599#endif /* VBOX */
1600 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1601 }
1602 } else {
1603 do_interrupt_real(intno, is_int, error_code, next_eip);
1604 }
1605}
1606
1607/*
1608 * Check nested exceptions and change to double or triple fault if
1609 * needed. It should only be called, if this is not an interrupt.
1610 * Returns the new exception number.
1611 */
1612static int check_exception(int intno, int *error_code)
1613{
1614 int first_contributory = env->old_exception == 0 ||
1615 (env->old_exception >= 10 &&
1616 env->old_exception <= 13);
1617 int second_contributory = intno == 0 ||
1618 (intno >= 10 && intno <= 13);
1619
1620 if (loglevel & CPU_LOG_INT)
1621 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1622 env->old_exception, intno);
1623
1624 if (env->old_exception == EXCP08_DBLE)
1625 cpu_abort(env, "triple fault");
1626
1627 if ((first_contributory && second_contributory)
1628 || (env->old_exception == EXCP0E_PAGE &&
1629 (second_contributory || (intno == EXCP0E_PAGE)))) {
1630 intno = EXCP08_DBLE;
1631 *error_code = 0;
1632 }
1633
1634 if (second_contributory || (intno == EXCP0E_PAGE) ||
1635 (intno == EXCP08_DBLE))
1636 env->old_exception = intno;
1637
1638 return intno;
1639}
1640
1641/*
1642 * Signal an interruption. It is executed in the main CPU loop.
1643 * is_int is TRUE if coming from the int instruction. next_eip is the
1644 * EIP value AFTER the interrupt instruction. It is only relevant if
1645 * is_int is TRUE.
1646 */
1647void raise_interrupt(int intno, int is_int, int error_code,
1648 int next_eip_addend)
1649{
1650#if defined(VBOX) && defined(DEBUG)
1651 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1652#endif
1653 if (!is_int) {
1654 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1655 intno = check_exception(intno, &error_code);
1656 } else {
1657 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1658 }
1659
1660 env->exception_index = intno;
1661 env->error_code = error_code;
1662 env->exception_is_int = is_int;
1663 env->exception_next_eip = env->eip + next_eip_addend;
1664 cpu_loop_exit();
1665}
1666
1667/* shortcuts to generate exceptions */
1668
1669void (raise_exception_err)(int exception_index, int error_code)
1670{
1671 raise_interrupt(exception_index, 0, error_code, 0);
1672}
1673
1674void raise_exception(int exception_index)
1675{
1676 raise_interrupt(exception_index, 0, 0, 0);
1677}
1678
1679/* SMM support */
1680
1681#if defined(CONFIG_USER_ONLY)
1682
1683void do_smm_enter(void)
1684{
1685}
1686
1687void helper_rsm(void)
1688{
1689}
1690
1691#else
1692
1693#ifdef TARGET_X86_64
1694#define SMM_REVISION_ID 0x00020064
1695#else
1696#define SMM_REVISION_ID 0x00020000
1697#endif
1698
1699void do_smm_enter(void)
1700{
1701 target_ulong sm_state;
1702 SegmentCache *dt;
1703 int i, offset;
1704
1705 if (loglevel & CPU_LOG_INT) {
1706 fprintf(logfile, "SMM: enter\n");
1707 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1708 }
1709
1710 env->hflags |= HF_SMM_MASK;
1711 cpu_smm_update(env);
1712
1713 sm_state = env->smbase + 0x8000;
1714
1715#ifdef TARGET_X86_64
1716 for(i = 0; i < 6; i++) {
1717 dt = &env->segs[i];
1718 offset = 0x7e00 + i * 16;
1719 stw_phys(sm_state + offset, dt->selector);
1720 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1721 stl_phys(sm_state + offset + 4, dt->limit);
1722 stq_phys(sm_state + offset + 8, dt->base);
1723 }
1724
1725 stq_phys(sm_state + 0x7e68, env->gdt.base);
1726 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1727
1728 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1729 stq_phys(sm_state + 0x7e78, env->ldt.base);
1730 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1731 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1732
1733 stq_phys(sm_state + 0x7e88, env->idt.base);
1734 stl_phys(sm_state + 0x7e84, env->idt.limit);
1735
1736 stw_phys(sm_state + 0x7e90, env->tr.selector);
1737 stq_phys(sm_state + 0x7e98, env->tr.base);
1738 stl_phys(sm_state + 0x7e94, env->tr.limit);
1739 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1740
1741 stq_phys(sm_state + 0x7ed0, env->efer);
1742
1743 stq_phys(sm_state + 0x7ff8, EAX);
1744 stq_phys(sm_state + 0x7ff0, ECX);
1745 stq_phys(sm_state + 0x7fe8, EDX);
1746 stq_phys(sm_state + 0x7fe0, EBX);
1747 stq_phys(sm_state + 0x7fd8, ESP);
1748 stq_phys(sm_state + 0x7fd0, EBP);
1749 stq_phys(sm_state + 0x7fc8, ESI);
1750 stq_phys(sm_state + 0x7fc0, EDI);
1751 for(i = 8; i < 16; i++)
1752 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1753 stq_phys(sm_state + 0x7f78, env->eip);
1754 stl_phys(sm_state + 0x7f70, compute_eflags());
1755 stl_phys(sm_state + 0x7f68, env->dr[6]);
1756 stl_phys(sm_state + 0x7f60, env->dr[7]);
1757
1758 stl_phys(sm_state + 0x7f48, env->cr[4]);
1759 stl_phys(sm_state + 0x7f50, env->cr[3]);
1760 stl_phys(sm_state + 0x7f58, env->cr[0]);
1761
1762 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1763 stl_phys(sm_state + 0x7f00, env->smbase);
1764#else
1765 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1766 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1767 stl_phys(sm_state + 0x7ff4, compute_eflags());
1768 stl_phys(sm_state + 0x7ff0, env->eip);
1769 stl_phys(sm_state + 0x7fec, EDI);
1770 stl_phys(sm_state + 0x7fe8, ESI);
1771 stl_phys(sm_state + 0x7fe4, EBP);
1772 stl_phys(sm_state + 0x7fe0, ESP);
1773 stl_phys(sm_state + 0x7fdc, EBX);
1774 stl_phys(sm_state + 0x7fd8, EDX);
1775 stl_phys(sm_state + 0x7fd4, ECX);
1776 stl_phys(sm_state + 0x7fd0, EAX);
1777 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1778 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1779
1780 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1781 stl_phys(sm_state + 0x7f64, env->tr.base);
1782 stl_phys(sm_state + 0x7f60, env->tr.limit);
1783 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1784
1785 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1786 stl_phys(sm_state + 0x7f80, env->ldt.base);
1787 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1788 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1789
1790 stl_phys(sm_state + 0x7f74, env->gdt.base);
1791 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1792
1793 stl_phys(sm_state + 0x7f58, env->idt.base);
1794 stl_phys(sm_state + 0x7f54, env->idt.limit);
1795
1796 for(i = 0; i < 6; i++) {
1797 dt = &env->segs[i];
1798 if (i < 3)
1799 offset = 0x7f84 + i * 12;
1800 else
1801 offset = 0x7f2c + (i - 3) * 12;
1802 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1803 stl_phys(sm_state + offset + 8, dt->base);
1804 stl_phys(sm_state + offset + 4, dt->limit);
1805 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1806 }
1807 stl_phys(sm_state + 0x7f14, env->cr[4]);
1808
1809 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1810 stl_phys(sm_state + 0x7ef8, env->smbase);
1811#endif
1812 /* init SMM cpu state */
1813
1814#ifdef TARGET_X86_64
1815 cpu_load_efer(env, 0);
1816#endif
1817 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1818 env->eip = 0x00008000;
1819 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1820 0xffffffff, 0);
1821 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1822 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1823 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1824 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1825 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1826
1827 cpu_x86_update_cr0(env,
1828 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1829 cpu_x86_update_cr4(env, 0);
1830 env->dr[7] = 0x00000400;
1831 CC_OP = CC_OP_EFLAGS;
1832}
1833
1834void helper_rsm(void)
1835{
1836#ifdef VBOX
1837 cpu_abort(env, "helper_rsm");
1838#else /* !VBOX */
1839 target_ulong sm_
1840
1841 target_ulong sm_state;
1842 int i, offset;
1843 uint32_t val;
1844
1845 sm_state = env->smbase + 0x8000;
1846#ifdef TARGET_X86_64
1847 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1848
1849 for(i = 0; i < 6; i++) {
1850 offset = 0x7e00 + i * 16;
1851 cpu_x86_load_seg_cache(env, i,
1852 lduw_phys(sm_state + offset),
1853 ldq_phys(sm_state + offset + 8),
1854 ldl_phys(sm_state + offset + 4),
1855 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1856 }
1857
1858 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1859 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1860
1861 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1862 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1863 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1864 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1865
1866 env->idt.base = ldq_phys(sm_state + 0x7e88);
1867 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1868
1869 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1870 env->tr.base = ldq_phys(sm_state + 0x7e98);
1871 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1872 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1873
1874 EAX = ldq_phys(sm_state + 0x7ff8);
1875 ECX = ldq_phys(sm_state + 0x7ff0);
1876 EDX = ldq_phys(sm_state + 0x7fe8);
1877 EBX = ldq_phys(sm_state + 0x7fe0);
1878 ESP = ldq_phys(sm_state + 0x7fd8);
1879 EBP = ldq_phys(sm_state + 0x7fd0);
1880 ESI = ldq_phys(sm_state + 0x7fc8);
1881 EDI = ldq_phys(sm_state + 0x7fc0);
1882 for(i = 8; i < 16; i++)
1883 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1884 env->eip = ldq_phys(sm_state + 0x7f78);
1885 load_eflags(ldl_phys(sm_state + 0x7f70),
1886 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1887 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1888 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1889
1890 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1891 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1892 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1893
1894 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1895 if (val & 0x20000) {
1896 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1897 }
1898#else
1899 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1900 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1901 load_eflags(ldl_phys(sm_state + 0x7ff4),
1902 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1903 env->eip = ldl_phys(sm_state + 0x7ff0);
1904 EDI = ldl_phys(sm_state + 0x7fec);
1905 ESI = ldl_phys(sm_state + 0x7fe8);
1906 EBP = ldl_phys(sm_state + 0x7fe4);
1907 ESP = ldl_phys(sm_state + 0x7fe0);
1908 EBX = ldl_phys(sm_state + 0x7fdc);
1909 EDX = ldl_phys(sm_state + 0x7fd8);
1910 ECX = ldl_phys(sm_state + 0x7fd4);
1911 EAX = ldl_phys(sm_state + 0x7fd0);
1912 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1913 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1914
1915 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1916 env->tr.base = ldl_phys(sm_state + 0x7f64);
1917 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1918 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1919
1920 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1921 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1922 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1923 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1924
1925 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1926 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1927
1928 env->idt.base = ldl_phys(sm_state + 0x7f58);
1929 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1930
1931 for(i = 0; i < 6; i++) {
1932 if (i < 3)
1933 offset = 0x7f84 + i * 12;
1934 else
1935 offset = 0x7f2c + (i - 3) * 12;
1936 cpu_x86_load_seg_cache(env, i,
1937 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1938 ldl_phys(sm_state + offset + 8),
1939 ldl_phys(sm_state + offset + 4),
1940 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1941 }
1942 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1943
1944 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1945 if (val & 0x20000) {
1946 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1947 }
1948#endif
1949 CC_OP = CC_OP_EFLAGS;
1950 env->hflags &= ~HF_SMM_MASK;
1951 cpu_smm_update(env);
1952
1953 if (loglevel & CPU_LOG_INT) {
1954 fprintf(logfile, "SMM: after RSM\n");
1955 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1956 }
1957#endif /* !VBOX */
1958}
1959
1960#endif /* !CONFIG_USER_ONLY */
1961
1962
1963/* division, flags are undefined */
1964
1965void helper_divb_AL(target_ulong t0)
1966{
1967 unsigned int num, den, q, r;
1968
1969 num = (EAX & 0xffff);
1970 den = (t0 & 0xff);
1971 if (den == 0) {
1972 raise_exception(EXCP00_DIVZ);
1973 }
1974 q = (num / den);
1975 if (q > 0xff)
1976 raise_exception(EXCP00_DIVZ);
1977 q &= 0xff;
1978 r = (num % den) & 0xff;
1979 EAX = (EAX & ~0xffff) | (r << 8) | q;
1980}
1981
1982void helper_idivb_AL(target_ulong t0)
1983{
1984 int num, den, q, r;
1985
1986 num = (int16_t)EAX;
1987 den = (int8_t)t0;
1988 if (den == 0) {
1989 raise_exception(EXCP00_DIVZ);
1990 }
1991 q = (num / den);
1992 if (q != (int8_t)q)
1993 raise_exception(EXCP00_DIVZ);
1994 q &= 0xff;
1995 r = (num % den) & 0xff;
1996 EAX = (EAX & ~0xffff) | (r << 8) | q;
1997}
1998
1999void helper_divw_AX(target_ulong t0)
2000{
2001 unsigned int num, den, q, r;
2002
2003 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2004 den = (t0 & 0xffff);
2005 if (den == 0) {
2006 raise_exception(EXCP00_DIVZ);
2007 }
2008 q = (num / den);
2009 if (q > 0xffff)
2010 raise_exception(EXCP00_DIVZ);
2011 q &= 0xffff;
2012 r = (num % den) & 0xffff;
2013 EAX = (EAX & ~0xffff) | q;
2014 EDX = (EDX & ~0xffff) | r;
2015}
2016
2017void helper_idivw_AX(target_ulong t0)
2018{
2019 int num, den, q, r;
2020
2021 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2022 den = (int16_t)t0;
2023 if (den == 0) {
2024 raise_exception(EXCP00_DIVZ);
2025 }
2026 q = (num / den);
2027 if (q != (int16_t)q)
2028 raise_exception(EXCP00_DIVZ);
2029 q &= 0xffff;
2030 r = (num % den) & 0xffff;
2031 EAX = (EAX & ~0xffff) | q;
2032 EDX = (EDX & ~0xffff) | r;
2033}
2034
2035void helper_divl_EAX(target_ulong t0)
2036{
2037 unsigned int den, r;
2038 uint64_t num, q;
2039
2040 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2041 den = t0;
2042 if (den == 0) {
2043 raise_exception(EXCP00_DIVZ);
2044 }
2045 q = (num / den);
2046 r = (num % den);
2047 if (q > 0xffffffff)
2048 raise_exception(EXCP00_DIVZ);
2049 EAX = (uint32_t)q;
2050 EDX = (uint32_t)r;
2051}
2052
2053void helper_idivl_EAX(target_ulong t0)
2054{
2055 int den, r;
2056 int64_t num, q;
2057
2058 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2059 den = t0;
2060 if (den == 0) {
2061 raise_exception(EXCP00_DIVZ);
2062 }
2063 q = (num / den);
2064 r = (num % den);
2065 if (q != (int32_t)q)
2066 raise_exception(EXCP00_DIVZ);
2067 EAX = (uint32_t)q;
2068 EDX = (uint32_t)r;
2069}
2070
2071/* bcd */
2072
2073/* XXX: exception */
2074void helper_aam(int base)
2075{
2076 int al, ah;
2077 al = EAX & 0xff;
2078 ah = al / base;
2079 al = al % base;
2080 EAX = (EAX & ~0xffff) | al | (ah << 8);
2081 CC_DST = al;
2082}
2083
2084void helper_aad(int base)
2085{
2086 int al, ah;
2087 al = EAX & 0xff;
2088 ah = (EAX >> 8) & 0xff;
2089 al = ((ah * base) + al) & 0xff;
2090 EAX = (EAX & ~0xffff) | al;
2091 CC_DST = al;
2092}
2093
2094void helper_aaa(void)
2095{
2096 int icarry;
2097 int al, ah, af;
2098 int eflags;
2099
2100 eflags = cc_table[CC_OP].compute_all();
2101 af = eflags & CC_A;
2102 al = EAX & 0xff;
2103 ah = (EAX >> 8) & 0xff;
2104
2105 icarry = (al > 0xf9);
2106 if (((al & 0x0f) > 9 ) || af) {
2107 al = (al + 6) & 0x0f;
2108 ah = (ah + 1 + icarry) & 0xff;
2109 eflags |= CC_C | CC_A;
2110 } else {
2111 eflags &= ~(CC_C | CC_A);
2112 al &= 0x0f;
2113 }
2114 EAX = (EAX & ~0xffff) | al | (ah << 8);
2115 CC_SRC = eflags;
2116 FORCE_RET();
2117}
2118
2119void helper_aas(void)
2120{
2121 int icarry;
2122 int al, ah, af;
2123 int eflags;
2124
2125 eflags = cc_table[CC_OP].compute_all();
2126 af = eflags & CC_A;
2127 al = EAX & 0xff;
2128 ah = (EAX >> 8) & 0xff;
2129
2130 icarry = (al < 6);
2131 if (((al & 0x0f) > 9 ) || af) {
2132 al = (al - 6) & 0x0f;
2133 ah = (ah - 1 - icarry) & 0xff;
2134 eflags |= CC_C | CC_A;
2135 } else {
2136 eflags &= ~(CC_C | CC_A);
2137 al &= 0x0f;
2138 }
2139 EAX = (EAX & ~0xffff) | al | (ah << 8);
2140 CC_SRC = eflags;
2141 FORCE_RET();
2142}
2143
2144void helper_daa(void)
2145{
2146 int al, af, cf;
2147 int eflags;
2148
2149 eflags = cc_table[CC_OP].compute_all();
2150 cf = eflags & CC_C;
2151 af = eflags & CC_A;
2152 al = EAX & 0xff;
2153
2154 eflags = 0;
2155 if (((al & 0x0f) > 9 ) || af) {
2156 al = (al + 6) & 0xff;
2157 eflags |= CC_A;
2158 }
2159 if ((al > 0x9f) || cf) {
2160 al = (al + 0x60) & 0xff;
2161 eflags |= CC_C;
2162 }
2163 EAX = (EAX & ~0xff) | al;
2164 /* well, speed is not an issue here, so we compute the flags by hand */
2165 eflags |= (al == 0) << 6; /* zf */
2166 eflags |= parity_table[al]; /* pf */
2167 eflags |= (al & 0x80); /* sf */
2168 CC_SRC = eflags;
2169 FORCE_RET();
2170}
2171
2172void helper_das(void)
2173{
2174 int al, al1, af, cf;
2175 int eflags;
2176
2177 eflags = cc_table[CC_OP].compute_all();
2178 cf = eflags & CC_C;
2179 af = eflags & CC_A;
2180 al = EAX & 0xff;
2181
2182 eflags = 0;
2183 al1 = al;
2184 if (((al & 0x0f) > 9 ) || af) {
2185 eflags |= CC_A;
2186 if (al < 6 || cf)
2187 eflags |= CC_C;
2188 al = (al - 6) & 0xff;
2189 }
2190 if ((al1 > 0x99) || cf) {
2191 al = (al - 0x60) & 0xff;
2192 eflags |= CC_C;
2193 }
2194 EAX = (EAX & ~0xff) | al;
2195 /* well, speed is not an issue here, so we compute the flags by hand */
2196 eflags |= (al == 0) << 6; /* zf */
2197 eflags |= parity_table[al]; /* pf */
2198 eflags |= (al & 0x80); /* sf */
2199 CC_SRC = eflags;
2200 FORCE_RET();
2201}
2202
2203void helper_into(int next_eip_addend)
2204{
2205 int eflags;
2206 eflags = cc_table[CC_OP].compute_all();
2207 if (eflags & CC_O) {
2208 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2209 }
2210}
2211
2212void helper_cmpxchg8b(target_ulong a0)
2213{
2214 uint64_t d;
2215 int eflags;
2216
2217 eflags = cc_table[CC_OP].compute_all();
2218 d = ldq(a0);
2219 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2220 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2221 eflags |= CC_Z;
2222 } else {
2223 /* always do the store */
2224 stq(a0, d);
2225 EDX = (uint32_t)(d >> 32);
2226 EAX = (uint32_t)d;
2227 eflags &= ~CC_Z;
2228 }
2229 CC_SRC = eflags;
2230}
2231
2232#ifdef TARGET_X86_64
2233void helper_cmpxchg16b(target_ulong a0)
2234{
2235 uint64_t d0, d1;
2236 int eflags;
2237
2238 if ((a0 & 0xf) != 0)
2239 raise_exception(EXCP0D_GPF);
2240 eflags = cc_table[CC_OP].compute_all();
2241 d0 = ldq(a0);
2242 d1 = ldq(a0 + 8);
2243 if (d0 == EAX && d1 == EDX) {
2244 stq(a0, EBX);
2245 stq(a0 + 8, ECX);
2246 eflags |= CC_Z;
2247 } else {
2248 /* always do the store */
2249 stq(a0, d0);
2250 stq(a0 + 8, d1);
2251 EDX = d1;
2252 EAX = d0;
2253 eflags &= ~CC_Z;
2254 }
2255 CC_SRC = eflags;
2256}
2257#endif
2258
2259void helper_single_step(void)
2260{
2261 env->dr[6] |= 0x4000;
2262 raise_exception(EXCP01_SSTP);
2263}
2264
2265void helper_cpuid(void)
2266{
2267#ifndef VBOX
2268 uint32_t index;
2269
2270 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2271
2272 index = (uint32_t)EAX;
2273 /* test if maximum index reached */
2274 if (index & 0x80000000) {
2275 if (index > env->cpuid_xlevel)
2276 index = env->cpuid_level;
2277 } else {
2278 if (index > env->cpuid_level)
2279 index = env->cpuid_level;
2280 }
2281
2282 switch(index) {
2283 case 0:
2284 EAX = env->cpuid_level;
2285 EBX = env->cpuid_vendor1;
2286 EDX = env->cpuid_vendor2;
2287 ECX = env->cpuid_vendor3;
2288 break;
2289 case 1:
2290 EAX = env->cpuid_version;
2291 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2292 ECX = env->cpuid_ext_features;
2293 EDX = env->cpuid_features;
2294 break;
2295 case 2:
2296 /* cache info: needed for Pentium Pro compatibility */
2297 EAX = 1;
2298 EBX = 0;
2299 ECX = 0;
2300 EDX = 0x2c307d;
2301 break;
2302 case 4:
2303 /* cache info: needed for Core compatibility */
2304 switch (ECX) {
2305 case 0: /* L1 dcache info */
2306 EAX = 0x0000121;
2307 EBX = 0x1c0003f;
2308 ECX = 0x000003f;
2309 EDX = 0x0000001;
2310 break;
2311 case 1: /* L1 icache info */
2312 EAX = 0x0000122;
2313 EBX = 0x1c0003f;
2314 ECX = 0x000003f;
2315 EDX = 0x0000001;
2316 break;
2317 case 2: /* L2 cache info */
2318 EAX = 0x0000143;
2319 EBX = 0x3c0003f;
2320 ECX = 0x0000fff;
2321 EDX = 0x0000001;
2322 break;
2323 default: /* end of info */
2324 EAX = 0;
2325 EBX = 0;
2326 ECX = 0;
2327 EDX = 0;
2328 break;
2329 }
2330
2331 break;
2332 case 5:
2333 /* mwait info: needed for Core compatibility */
2334 EAX = 0; /* Smallest monitor-line size in bytes */
2335 EBX = 0; /* Largest monitor-line size in bytes */
2336 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2337 EDX = 0;
2338 break;
2339 case 6:
2340 /* Thermal and Power Leaf */
2341 EAX = 0;
2342 EBX = 0;
2343 ECX = 0;
2344 EDX = 0;
2345 break;
2346 case 9:
2347 /* Direct Cache Access Information Leaf */
2348 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2349 EBX = 0;
2350 ECX = 0;
2351 EDX = 0;
2352 break;
2353 case 0xA:
2354 /* Architectural Performance Monitoring Leaf */
2355 EAX = 0;
2356 EBX = 0;
2357 ECX = 0;
2358 EDX = 0;
2359 break;
2360 case 0x80000000:
2361 EAX = env->cpuid_xlevel;
2362 EBX = env->cpuid_vendor1;
2363 EDX = env->cpuid_vendor2;
2364 ECX = env->cpuid_vendor3;
2365 break;
2366 case 0x80000001:
2367 EAX = env->cpuid_features;
2368 EBX = 0;
2369 ECX = env->cpuid_ext3_features;
2370 EDX = env->cpuid_ext2_features;
2371 break;
2372 case 0x80000002:
2373 case 0x80000003:
2374 case 0x80000004:
2375 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2376 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2377 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2378 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2379 break;
2380 case 0x80000005:
2381 /* cache info (L1 cache) */
2382 EAX = 0x01ff01ff;
2383 EBX = 0x01ff01ff;
2384 ECX = 0x40020140;
2385 EDX = 0x40020140;
2386 break;
2387 case 0x80000006:
2388 /* cache info (L2 cache) */
2389 EAX = 0;
2390 EBX = 0x42004200;
2391 ECX = 0x02008140;
2392 EDX = 0;
2393 break;
2394 case 0x80000008:
2395 /* virtual & phys address size in low 2 bytes. */
2396/* XXX: This value must match the one used in the MMU code. */
2397 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2398 /* 64 bit processor */
2399#if defined(USE_KQEMU)
2400 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2401#else
2402/* XXX: The physical address space is limited to 42 bits in exec.c. */
2403 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2404#endif
2405 } else {
2406#if defined(USE_KQEMU)
2407 EAX = 0x00000020; /* 32 bits physical */
2408#else
2409 if (env->cpuid_features & CPUID_PSE36)
2410 EAX = 0x00000024; /* 36 bits physical */
2411 else
2412 EAX = 0x00000020; /* 32 bits physical */
2413#endif
2414 }
2415 EBX = 0;
2416 ECX = 0;
2417 EDX = 0;
2418 break;
2419 case 0x8000000A:
2420 EAX = 0x00000001;
2421 EBX = 0;
2422 ECX = 0;
2423 EDX = 0;
2424 break;
2425 default:
2426 /* reserved values: zero */
2427 EAX = 0;
2428 EBX = 0;
2429 ECX = 0;
2430 EDX = 0;
2431 break;
2432 }
2433#else /* VBOX */
2434 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2435#endif /* VBOX */
2436}
2437
2438void helper_enter_level(int level, int data32, target_ulong t1)
2439{
2440 target_ulong ssp;
2441 uint32_t esp_mask, esp, ebp;
2442
2443 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2444 ssp = env->segs[R_SS].base;
2445 ebp = EBP;
2446 esp = ESP;
2447 if (data32) {
2448 /* 32 bit */
2449 esp -= 4;
2450 while (--level) {
2451 esp -= 4;
2452 ebp -= 4;
2453 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2454 }
2455 esp -= 4;
2456 stl(ssp + (esp & esp_mask), t1);
2457 } else {
2458 /* 16 bit */
2459 esp -= 2;
2460 while (--level) {
2461 esp -= 2;
2462 ebp -= 2;
2463 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2464 }
2465 esp -= 2;
2466 stw(ssp + (esp & esp_mask), t1);
2467 }
2468}
2469
2470#ifdef TARGET_X86_64
2471void helper_enter64_level(int level, int data64, target_ulong t1)
2472{
2473 target_ulong esp, ebp;
2474 ebp = EBP;
2475 esp = ESP;
2476
2477 if (data64) {
2478 /* 64 bit */
2479 esp -= 8;
2480 while (--level) {
2481 esp -= 8;
2482 ebp -= 8;
2483 stq(esp, ldq(ebp));
2484 }
2485 esp -= 8;
2486 stq(esp, t1);
2487 } else {
2488 /* 16 bit */
2489 esp -= 2;
2490 while (--level) {
2491 esp -= 2;
2492 ebp -= 2;
2493 stw(esp, lduw(ebp));
2494 }
2495 esp -= 2;
2496 stw(esp, t1);
2497 }
2498}
2499#endif
2500
2501void helper_lldt(int selector)
2502{
2503 SegmentCache *dt;
2504 uint32_t e1, e2;
2505#ifndef VBOX
2506 int index, entry_limit;
2507#else
2508 unsigned int index, entry_limit;
2509#endif
2510 target_ulong ptr;
2511
2512#ifdef VBOX
2513 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2514 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2515#endif
2516
2517 selector &= 0xffff;
2518 if ((selector & 0xfffc) == 0) {
2519 /* XXX: NULL selector case: invalid LDT */
2520 env->ldt.base = 0;
2521 env->ldt.limit = 0;
2522 } else {
2523 if (selector & 0x4)
2524 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2525 dt = &env->gdt;
2526 index = selector & ~7;
2527#ifdef TARGET_X86_64
2528 if (env->hflags & HF_LMA_MASK)
2529 entry_limit = 15;
2530 else
2531#endif
2532 entry_limit = 7;
2533 if ((index + entry_limit) > dt->limit)
2534 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2535 ptr = dt->base + index;
2536 e1 = ldl_kernel(ptr);
2537 e2 = ldl_kernel(ptr + 4);
2538 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2539 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2540 if (!(e2 & DESC_P_MASK))
2541 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2542#ifdef TARGET_X86_64
2543 if (env->hflags & HF_LMA_MASK) {
2544 uint32_t e3;
2545 e3 = ldl_kernel(ptr + 8);
2546 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2547 env->ldt.base |= (target_ulong)e3 << 32;
2548 } else
2549#endif
2550 {
2551 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2552 }
2553 }
2554 env->ldt.selector = selector;
2555#ifdef VBOX
2556 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2557 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2558#endif
2559}
2560
2561void helper_ltr(int selector)
2562{
2563 SegmentCache *dt;
2564 uint32_t e1, e2;
2565#ifndef VBOX
2566 int index, type, entry_limit;
2567#else
2568 unsigned int index;
2569 int type, entry_limit;
2570#endif
2571 target_ulong ptr;
2572
2573#ifdef VBOX
2574 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2575 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2576 env->tr.flags, (RTSEL)(selector & 0xffff)));
2577#endif
2578 selector &= 0xffff;
2579 if ((selector & 0xfffc) == 0) {
2580 /* NULL selector case: invalid TR */
2581 env->tr.base = 0;
2582 env->tr.limit = 0;
2583 env->tr.flags = 0;
2584 } else {
2585 if (selector & 0x4)
2586 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2587 dt = &env->gdt;
2588 index = selector & ~7;
2589#ifdef TARGET_X86_64
2590 if (env->hflags & HF_LMA_MASK)
2591 entry_limit = 15;
2592 else
2593#endif
2594 entry_limit = 7;
2595 if ((index + entry_limit) > dt->limit)
2596 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2597 ptr = dt->base + index;
2598 e1 = ldl_kernel(ptr);
2599 e2 = ldl_kernel(ptr + 4);
2600 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2601 if ((e2 & DESC_S_MASK) ||
2602 (type != 1 && type != 9))
2603 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2604 if (!(e2 & DESC_P_MASK))
2605 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2606#ifdef TARGET_X86_64
2607 if (env->hflags & HF_LMA_MASK) {
2608 uint32_t e3, e4;
2609 e3 = ldl_kernel(ptr + 8);
2610 e4 = ldl_kernel(ptr + 12);
2611 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2612 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2613 load_seg_cache_raw_dt(&env->tr, e1, e2);
2614 env->tr.base |= (target_ulong)e3 << 32;
2615 } else
2616#endif
2617 {
2618 load_seg_cache_raw_dt(&env->tr, e1, e2);
2619 }
2620 e2 |= DESC_TSS_BUSY_MASK;
2621 stl_kernel(ptr + 4, e2);
2622 }
2623 env->tr.selector = selector;
2624#ifdef VBOX
2625 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2626 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2627 env->tr.flags, (RTSEL)(selector & 0xffff)));
2628#endif
2629}
2630
2631/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2632void helper_load_seg(int seg_reg, int selector)
2633{
2634 uint32_t e1, e2;
2635 int cpl, dpl, rpl;
2636 SegmentCache *dt;
2637#ifndef VBOX
2638 int index;
2639#else
2640 unsigned int index;
2641#endif
2642 target_ulong ptr;
2643
2644 selector &= 0xffff;
2645 cpl = env->hflags & HF_CPL_MASK;
2646
2647#ifdef VBOX
2648 /* Trying to load a selector with CPL=1? */
2649 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2650 {
2651 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2652 selector = selector & 0xfffc;
2653 }
2654#endif
2655 if ((selector & 0xfffc) == 0) {
2656 /* null selector case */
2657 if (seg_reg == R_SS
2658#ifdef TARGET_X86_64
2659 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2660#endif
2661 )
2662 raise_exception_err(EXCP0D_GPF, 0);
2663 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2664 } else {
2665
2666 if (selector & 0x4)
2667 dt = &env->ldt;
2668 else
2669 dt = &env->gdt;
2670 index = selector & ~7;
2671 if ((index + 7) > dt->limit)
2672 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2673 ptr = dt->base + index;
2674 e1 = ldl_kernel(ptr);
2675 e2 = ldl_kernel(ptr + 4);
2676
2677 if (!(e2 & DESC_S_MASK))
2678 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2679 rpl = selector & 3;
2680 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2681 if (seg_reg == R_SS) {
2682 /* must be writable segment */
2683 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2684 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2685 if (rpl != cpl || dpl != cpl)
2686 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2687 } else {
2688 /* must be readable segment */
2689 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2691
2692 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2693 /* if not conforming code, test rights */
2694 if (dpl < cpl || dpl < rpl)
2695 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2696 }
2697 }
2698
2699 if (!(e2 & DESC_P_MASK)) {
2700 if (seg_reg == R_SS)
2701 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2702 else
2703 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2704 }
2705
2706 /* set the access bit if not already set */
2707 if (!(e2 & DESC_A_MASK)) {
2708 e2 |= DESC_A_MASK;
2709 stl_kernel(ptr + 4, e2);
2710 }
2711
2712 cpu_x86_load_seg_cache(env, seg_reg, selector,
2713 get_seg_base(e1, e2),
2714 get_seg_limit(e1, e2),
2715 e2);
2716#if 0
2717 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2718 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2719#endif
2720 }
2721}
2722
2723/* protected mode jump */
2724void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2725 int next_eip_addend)
2726{
2727 int gate_cs, type;
2728 uint32_t e1, e2, cpl, dpl, rpl, limit;
2729 target_ulong next_eip;
2730
2731#ifdef VBOX
2732 e1 = e2 = 0;
2733#endif
2734 if ((new_cs & 0xfffc) == 0)
2735 raise_exception_err(EXCP0D_GPF, 0);
2736 if (load_segment(&e1, &e2, new_cs) != 0)
2737 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2738 cpl = env->hflags & HF_CPL_MASK;
2739 if (e2 & DESC_S_MASK) {
2740 if (!(e2 & DESC_CS_MASK))
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2743 if (e2 & DESC_C_MASK) {
2744 /* conforming code segment */
2745 if (dpl > cpl)
2746 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2747 } else {
2748 /* non conforming code segment */
2749 rpl = new_cs & 3;
2750 if (rpl > cpl)
2751 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2752 if (dpl != cpl)
2753 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2754 }
2755 if (!(e2 & DESC_P_MASK))
2756 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2757 limit = get_seg_limit(e1, e2);
2758 if (new_eip > limit &&
2759 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2760 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2761 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2762 get_seg_base(e1, e2), limit, e2);
2763 EIP = new_eip;
2764 } else {
2765 /* jump to call or task gate */
2766 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2767 rpl = new_cs & 3;
2768 cpl = env->hflags & HF_CPL_MASK;
2769 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2770 switch(type) {
2771 case 1: /* 286 TSS */
2772 case 9: /* 386 TSS */
2773 case 5: /* task gate */
2774 if (dpl < cpl || dpl < rpl)
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 next_eip = env->eip + next_eip_addend;
2777 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2778 CC_OP = CC_OP_EFLAGS;
2779 break;
2780 case 4: /* 286 call gate */
2781 case 12: /* 386 call gate */
2782 if ((dpl < cpl) || (dpl < rpl))
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 if (!(e2 & DESC_P_MASK))
2785 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2786 gate_cs = e1 >> 16;
2787 new_eip = (e1 & 0xffff);
2788 if (type == 12)
2789 new_eip |= (e2 & 0xffff0000);
2790 if (load_segment(&e1, &e2, gate_cs) != 0)
2791 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2792 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2793 /* must be code segment */
2794 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2795 (DESC_S_MASK | DESC_CS_MASK)))
2796 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2797 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2798 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2799 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2800 if (!(e2 & DESC_P_MASK))
2801#ifdef VBOX /* See page 3-514 of 253666.pdf */
2802 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2803#else
2804 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2805#endif
2806 limit = get_seg_limit(e1, e2);
2807 if (new_eip > limit)
2808 raise_exception_err(EXCP0D_GPF, 0);
2809 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2810 get_seg_base(e1, e2), limit, e2);
2811 EIP = new_eip;
2812 break;
2813 default:
2814 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2815 break;
2816 }
2817 }
2818}
2819
2820/* real mode call */
2821void helper_lcall_real(int new_cs, target_ulong new_eip1,
2822 int shift, int next_eip)
2823{
2824 int new_eip;
2825 uint32_t esp, esp_mask;
2826 target_ulong ssp;
2827
2828 new_eip = new_eip1;
2829 esp = ESP;
2830 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2831 ssp = env->segs[R_SS].base;
2832 if (shift) {
2833 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2834 PUSHL(ssp, esp, esp_mask, next_eip);
2835 } else {
2836 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2837 PUSHW(ssp, esp, esp_mask, next_eip);
2838 }
2839
2840 SET_ESP(esp, esp_mask);
2841 env->eip = new_eip;
2842 env->segs[R_CS].selector = new_cs;
2843 env->segs[R_CS].base = (new_cs << 4);
2844}
2845
2846/* protected mode call */
2847void helper_lcall_protected(int new_cs, target_ulong new_eip,
2848 int shift, int next_eip_addend)
2849{
2850 int new_stack, i;
2851 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2852 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2853 uint32_t val, limit, old_sp_mask;
2854 target_ulong ssp, old_ssp, next_eip;
2855
2856#ifdef VBOX
2857 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2858#endif
2859 next_eip = env->eip + next_eip_addend;
2860#ifdef DEBUG_PCALL
2861 if (loglevel & CPU_LOG_PCALL) {
2862 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2863 new_cs, (uint32_t)new_eip, shift);
2864 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2865 }
2866#endif
2867 if ((new_cs & 0xfffc) == 0)
2868 raise_exception_err(EXCP0D_GPF, 0);
2869 if (load_segment(&e1, &e2, new_cs) != 0)
2870 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2871 cpl = env->hflags & HF_CPL_MASK;
2872#ifdef DEBUG_PCALL
2873 if (loglevel & CPU_LOG_PCALL) {
2874 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2875 }
2876#endif
2877 if (e2 & DESC_S_MASK) {
2878 if (!(e2 & DESC_CS_MASK))
2879 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2880 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2881 if (e2 & DESC_C_MASK) {
2882 /* conforming code segment */
2883 if (dpl > cpl)
2884 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2885 } else {
2886 /* non conforming code segment */
2887 rpl = new_cs & 3;
2888 if (rpl > cpl)
2889 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2890 if (dpl != cpl)
2891 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2892 }
2893 if (!(e2 & DESC_P_MASK))
2894 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2895
2896#ifdef TARGET_X86_64
2897 /* XXX: check 16/32 bit cases in long mode */
2898 if (shift == 2) {
2899 target_ulong rsp;
2900 /* 64 bit case */
2901 rsp = ESP;
2902 PUSHQ(rsp, env->segs[R_CS].selector);
2903 PUSHQ(rsp, next_eip);
2904 /* from this point, not restartable */
2905 ESP = rsp;
2906 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2907 get_seg_base(e1, e2),
2908 get_seg_limit(e1, e2), e2);
2909 EIP = new_eip;
2910 } else
2911#endif
2912 {
2913 sp = ESP;
2914 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2915 ssp = env->segs[R_SS].base;
2916 if (shift) {
2917 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2918 PUSHL(ssp, sp, sp_mask, next_eip);
2919 } else {
2920 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2921 PUSHW(ssp, sp, sp_mask, next_eip);
2922 }
2923
2924 limit = get_seg_limit(e1, e2);
2925 if (new_eip > limit)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 /* from this point, not restartable */
2928 SET_ESP(sp, sp_mask);
2929 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2930 get_seg_base(e1, e2), limit, e2);
2931 EIP = new_eip;
2932 }
2933 } else {
2934 /* check gate type */
2935 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2937 rpl = new_cs & 3;
2938 switch(type) {
2939 case 1: /* available 286 TSS */
2940 case 9: /* available 386 TSS */
2941 case 5: /* task gate */
2942 if (dpl < cpl || dpl < rpl)
2943 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2944 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2945 CC_OP = CC_OP_EFLAGS;
2946 return;
2947 case 4: /* 286 call gate */
2948 case 12: /* 386 call gate */
2949 break;
2950 default:
2951 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2952 break;
2953 }
2954 shift = type >> 3;
2955
2956 if (dpl < cpl || dpl < rpl)
2957 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2958 /* check valid bit */
2959 if (!(e2 & DESC_P_MASK))
2960 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2961 selector = e1 >> 16;
2962 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2963 param_count = e2 & 0x1f;
2964 if ((selector & 0xfffc) == 0)
2965 raise_exception_err(EXCP0D_GPF, 0);
2966
2967 if (load_segment(&e1, &e2, selector) != 0)
2968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2969 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2970 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2971 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2972 if (dpl > cpl)
2973 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2974 if (!(e2 & DESC_P_MASK))
2975 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2976
2977 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2978 /* to inner privilege */
2979 get_ss_esp_from_tss(&ss, &sp, dpl);
2980#ifdef DEBUG_PCALL
2981 if (loglevel & CPU_LOG_PCALL)
2982 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2983 ss, sp, param_count, ESP);
2984#endif
2985 if ((ss & 0xfffc) == 0)
2986 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2987 if ((ss & 3) != dpl)
2988 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2989 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2990 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2991 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2992 if (ss_dpl != dpl)
2993 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2994 if (!(ss_e2 & DESC_S_MASK) ||
2995 (ss_e2 & DESC_CS_MASK) ||
2996 !(ss_e2 & DESC_W_MASK))
2997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2998 if (!(ss_e2 & DESC_P_MASK))
2999#ifdef VBOX /* See page 3-99 of 253666.pdf */
3000 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3001#else
3002 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3003#endif
3004
3005 // push_size = ((param_count * 2) + 8) << shift;
3006
3007 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3008 old_ssp = env->segs[R_SS].base;
3009
3010 sp_mask = get_sp_mask(ss_e2);
3011 ssp = get_seg_base(ss_e1, ss_e2);
3012 if (shift) {
3013 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3014 PUSHL(ssp, sp, sp_mask, ESP);
3015 for(i = param_count - 1; i >= 0; i--) {
3016 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3017 PUSHL(ssp, sp, sp_mask, val);
3018 }
3019 } else {
3020 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3021 PUSHW(ssp, sp, sp_mask, ESP);
3022 for(i = param_count - 1; i >= 0; i--) {
3023 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3024 PUSHW(ssp, sp, sp_mask, val);
3025 }
3026 }
3027 new_stack = 1;
3028 } else {
3029 /* to same privilege */
3030 sp = ESP;
3031 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3032 ssp = env->segs[R_SS].base;
3033 // push_size = (4 << shift);
3034 new_stack = 0;
3035 }
3036
3037 if (shift) {
3038 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3039 PUSHL(ssp, sp, sp_mask, next_eip);
3040 } else {
3041 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3042 PUSHW(ssp, sp, sp_mask, next_eip);
3043 }
3044
3045 /* from this point, not restartable */
3046
3047 if (new_stack) {
3048 ss = (ss & ~3) | dpl;
3049 cpu_x86_load_seg_cache(env, R_SS, ss,
3050 ssp,
3051 get_seg_limit(ss_e1, ss_e2),
3052 ss_e2);
3053 }
3054
3055 selector = (selector & ~3) | dpl;
3056 cpu_x86_load_seg_cache(env, R_CS, selector,
3057 get_seg_base(e1, e2),
3058 get_seg_limit(e1, e2),
3059 e2);
3060 cpu_x86_set_cpl(env, dpl);
3061 SET_ESP(sp, sp_mask);
3062 EIP = offset;
3063 }
3064#ifdef USE_KQEMU
3065 if (kqemu_is_ok(env)) {
3066 env->exception_index = -1;
3067 cpu_loop_exit();
3068 }
3069#endif
3070}
3071
3072/* real and vm86 mode iret */
3073void helper_iret_real(int shift)
3074{
3075 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3076 target_ulong ssp;
3077 int eflags_mask;
3078#ifdef VBOX
3079 bool fVME = false;
3080
3081 remR3TrapClear(env->pVM);
3082#endif /* VBOX */
3083
3084 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3085 sp = ESP;
3086 ssp = env->segs[R_SS].base;
3087 if (shift == 1) {
3088 /* 32 bits */
3089 POPL(ssp, sp, sp_mask, new_eip);
3090 POPL(ssp, sp, sp_mask, new_cs);
3091 new_cs &= 0xffff;
3092 POPL(ssp, sp, sp_mask, new_eflags);
3093 } else {
3094 /* 16 bits */
3095 POPW(ssp, sp, sp_mask, new_eip);
3096 POPW(ssp, sp, sp_mask, new_cs);
3097 POPW(ssp, sp, sp_mask, new_eflags);
3098 }
3099#ifdef VBOX
3100 if ( (env->eflags & VM_MASK)
3101 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3102 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3103 {
3104 fVME = true;
3105 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3106 /* if TF will be set -> #GP */
3107 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3108 || (new_eflags & TF_MASK))
3109 raise_exception(EXCP0D_GPF);
3110 }
3111#endif /* VBOX */
3112 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3113 env->segs[R_CS].selector = new_cs;
3114 env->segs[R_CS].base = (new_cs << 4);
3115 env->eip = new_eip;
3116#ifdef VBOX
3117 if (fVME)
3118 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3119 else
3120#endif
3121 if (env->eflags & VM_MASK)
3122 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3123 else
3124 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3125 if (shift == 0)
3126 eflags_mask &= 0xffff;
3127 load_eflags(new_eflags, eflags_mask);
3128 env->hflags2 &= ~HF2_NMI_MASK;
3129#ifdef VBOX
3130 if (fVME)
3131 {
3132 if (new_eflags & IF_MASK)
3133 env->eflags |= VIF_MASK;
3134 else
3135 env->eflags &= ~VIF_MASK;
3136 }
3137#endif /* VBOX */
3138}
3139
3140#ifndef VBOX
3141static inline void validate_seg(int seg_reg, int cpl)
3142#else /* VBOX */
3143DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3144#endif /* VBOX */
3145{
3146 int dpl;
3147 uint32_t e2;
3148
3149 /* XXX: on x86_64, we do not want to nullify FS and GS because
3150 they may still contain a valid base. I would be interested to
3151 know how a real x86_64 CPU behaves */
3152 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3153 (env->segs[seg_reg].selector & 0xfffc) == 0)
3154 return;
3155
3156 e2 = env->segs[seg_reg].flags;
3157 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3158 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3159 /* data or non conforming code segment */
3160 if (dpl < cpl) {
3161 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3162 }
3163 }
3164}
3165
3166/* protected mode iret */
3167#ifndef VBOX
3168static inline void helper_ret_protected(int shift, int is_iret, int addend)
3169#else /* VBOX */
3170DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3171#endif /* VBOX */
3172{
3173 uint32_t new_cs, new_eflags, new_ss;
3174 uint32_t new_es, new_ds, new_fs, new_gs;
3175 uint32_t e1, e2, ss_e1, ss_e2;
3176 int cpl, dpl, rpl, eflags_mask, iopl;
3177 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3178
3179#ifdef VBOX
3180 ss_e1 = ss_e2 = e1 = e2 = 0;
3181#endif
3182
3183#ifdef TARGET_X86_64
3184 if (shift == 2)
3185 sp_mask = -1;
3186 else
3187#endif
3188 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3189 sp = ESP;
3190 ssp = env->segs[R_SS].base;
3191 new_eflags = 0; /* avoid warning */
3192#ifdef TARGET_X86_64
3193 if (shift == 2) {
3194 POPQ(sp, new_eip);
3195 POPQ(sp, new_cs);
3196 new_cs &= 0xffff;
3197 if (is_iret) {
3198 POPQ(sp, new_eflags);
3199 }
3200 } else
3201#endif
3202 if (shift == 1) {
3203 /* 32 bits */
3204 POPL(ssp, sp, sp_mask, new_eip);
3205 POPL(ssp, sp, sp_mask, new_cs);
3206 new_cs &= 0xffff;
3207 if (is_iret) {
3208 POPL(ssp, sp, sp_mask, new_eflags);
3209#if defined(VBOX) && defined(DEBUG)
3210 printf("iret: new CS %04X\n", new_cs);
3211 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3212 printf("iret: new EFLAGS %08X\n", new_eflags);
3213 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3214#endif
3215 if (new_eflags & VM_MASK)
3216 goto return_to_vm86;
3217 }
3218#ifdef VBOX
3219 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3220 {
3221#ifdef DEBUG
3222 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3223#endif
3224 new_cs = new_cs & 0xfffc;
3225 }
3226#endif
3227 } else {
3228 /* 16 bits */
3229 POPW(ssp, sp, sp_mask, new_eip);
3230 POPW(ssp, sp, sp_mask, new_cs);
3231 if (is_iret)
3232 POPW(ssp, sp, sp_mask, new_eflags);
3233 }
3234#ifdef DEBUG_PCALL
3235 if (loglevel & CPU_LOG_PCALL) {
3236 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3237 new_cs, new_eip, shift, addend);
3238 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3239 }
3240#endif
3241 if ((new_cs & 0xfffc) == 0)
3242 {
3243#if defined(VBOX) && defined(DEBUG)
3244 printf("new_cs & 0xfffc) == 0\n");
3245#endif
3246 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3247 }
3248 if (load_segment(&e1, &e2, new_cs) != 0)
3249 {
3250#if defined(VBOX) && defined(DEBUG)
3251 printf("load_segment failed\n");
3252#endif
3253 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3254 }
3255 if (!(e2 & DESC_S_MASK) ||
3256 !(e2 & DESC_CS_MASK))
3257 {
3258#if defined(VBOX) && defined(DEBUG)
3259 printf("e2 mask %08x\n", e2);
3260#endif
3261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3262 }
3263 cpl = env->hflags & HF_CPL_MASK;
3264 rpl = new_cs & 3;
3265 if (rpl < cpl)
3266 {
3267#if defined(VBOX) && defined(DEBUG)
3268 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3269#endif
3270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3271 }
3272 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3273 if (e2 & DESC_C_MASK) {
3274 if (dpl > rpl)
3275 {
3276#if defined(VBOX) && defined(DEBUG)
3277 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3278#endif
3279 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3280 }
3281 } else {
3282 if (dpl != rpl)
3283 {
3284#if defined(VBOX) && defined(DEBUG)
3285 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3286#endif
3287 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3288 }
3289 }
3290 if (!(e2 & DESC_P_MASK))
3291 {
3292#if defined(VBOX) && defined(DEBUG)
3293 printf("DESC_P_MASK e2=%08x\n", e2);
3294#endif
3295 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3296 }
3297
3298 sp += addend;
3299 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3300 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3301 /* return to same privilege level */
3302 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3303 get_seg_base(e1, e2),
3304 get_seg_limit(e1, e2),
3305 e2);
3306 } else {
3307 /* return to different privilege level */
3308#ifdef TARGET_X86_64
3309 if (shift == 2) {
3310 POPQ(sp, new_esp);
3311 POPQ(sp, new_ss);
3312 new_ss &= 0xffff;
3313 } else
3314#endif
3315 if (shift == 1) {
3316 /* 32 bits */
3317 POPL(ssp, sp, sp_mask, new_esp);
3318 POPL(ssp, sp, sp_mask, new_ss);
3319 new_ss &= 0xffff;
3320 } else {
3321 /* 16 bits */
3322 POPW(ssp, sp, sp_mask, new_esp);
3323 POPW(ssp, sp, sp_mask, new_ss);
3324 }
3325#ifdef DEBUG_PCALL
3326 if (loglevel & CPU_LOG_PCALL) {
3327 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3328 new_ss, new_esp);
3329 }
3330#endif
3331 if ((new_ss & 0xfffc) == 0) {
3332#ifdef TARGET_X86_64
3333 /* NULL ss is allowed in long mode if cpl != 3*/
3334 /* XXX: test CS64 ? */
3335 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3336 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3337 0, 0xffffffff,
3338 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3339 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3340 DESC_W_MASK | DESC_A_MASK);
3341 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3342 } else
3343#endif
3344 {
3345 raise_exception_err(EXCP0D_GPF, 0);
3346 }
3347 } else {
3348 if ((new_ss & 3) != rpl)
3349 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3350 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3351 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3352 if (!(ss_e2 & DESC_S_MASK) ||
3353 (ss_e2 & DESC_CS_MASK) ||
3354 !(ss_e2 & DESC_W_MASK))
3355 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3356 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3357 if (dpl != rpl)
3358 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3359 if (!(ss_e2 & DESC_P_MASK))
3360 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3361 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3362 get_seg_base(ss_e1, ss_e2),
3363 get_seg_limit(ss_e1, ss_e2),
3364 ss_e2);
3365 }
3366
3367 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3368 get_seg_base(e1, e2),
3369 get_seg_limit(e1, e2),
3370 e2);
3371 cpu_x86_set_cpl(env, rpl);
3372 sp = new_esp;
3373#ifdef TARGET_X86_64
3374 if (env->hflags & HF_CS64_MASK)
3375 sp_mask = -1;
3376 else
3377#endif
3378 sp_mask = get_sp_mask(ss_e2);
3379
3380 /* validate data segments */
3381 validate_seg(R_ES, rpl);
3382 validate_seg(R_DS, rpl);
3383 validate_seg(R_FS, rpl);
3384 validate_seg(R_GS, rpl);
3385
3386 sp += addend;
3387 }
3388 SET_ESP(sp, sp_mask);
3389 env->eip = new_eip;
3390 if (is_iret) {
3391 /* NOTE: 'cpl' is the _old_ CPL */
3392 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3393 if (cpl == 0)
3394#ifdef VBOX
3395 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3396#else
3397 eflags_mask |= IOPL_MASK;
3398#endif
3399 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3400 if (cpl <= iopl)
3401 eflags_mask |= IF_MASK;
3402 if (shift == 0)
3403 eflags_mask &= 0xffff;
3404 load_eflags(new_eflags, eflags_mask);
3405 }
3406 return;
3407
3408 return_to_vm86:
3409 POPL(ssp, sp, sp_mask, new_esp);
3410 POPL(ssp, sp, sp_mask, new_ss);
3411 POPL(ssp, sp, sp_mask, new_es);
3412 POPL(ssp, sp, sp_mask, new_ds);
3413 POPL(ssp, sp, sp_mask, new_fs);
3414 POPL(ssp, sp, sp_mask, new_gs);
3415
3416 /* modify processor state */
3417 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3418 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3419 load_seg_vm(R_CS, new_cs & 0xffff);
3420 cpu_x86_set_cpl(env, 3);
3421 load_seg_vm(R_SS, new_ss & 0xffff);
3422 load_seg_vm(R_ES, new_es & 0xffff);
3423 load_seg_vm(R_DS, new_ds & 0xffff);
3424 load_seg_vm(R_FS, new_fs & 0xffff);
3425 load_seg_vm(R_GS, new_gs & 0xffff);
3426
3427 env->eip = new_eip & 0xffff;
3428 ESP = new_esp;
3429}
3430
3431void helper_iret_protected(int shift, int next_eip)
3432{
3433 int tss_selector, type;
3434 uint32_t e1, e2;
3435
3436#ifdef VBOX
3437 e1 = e2 = 0;
3438 remR3TrapClear(env->pVM);
3439#endif
3440
3441 /* specific case for TSS */
3442 if (env->eflags & NT_MASK) {
3443#ifdef TARGET_X86_64
3444 if (env->hflags & HF_LMA_MASK)
3445 raise_exception_err(EXCP0D_GPF, 0);
3446#endif
3447 tss_selector = lduw_kernel(env->tr.base + 0);
3448 if (tss_selector & 4)
3449 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3450 if (load_segment(&e1, &e2, tss_selector) != 0)
3451 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3452 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3453 /* NOTE: we check both segment and busy TSS */
3454 if (type != 3)
3455 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3456 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3457 } else {
3458 helper_ret_protected(shift, 1, 0);
3459 }
3460 env->hflags2 &= ~HF2_NMI_MASK;
3461#ifdef USE_KQEMU
3462 if (kqemu_is_ok(env)) {
3463 CC_OP = CC_OP_EFLAGS;
3464 env->exception_index = -1;
3465 cpu_loop_exit();
3466 }
3467#endif
3468}
3469
3470void helper_lret_protected(int shift, int addend)
3471{
3472 helper_ret_protected(shift, 0, addend);
3473#ifdef USE_KQEMU
3474 if (kqemu_is_ok(env)) {
3475 env->exception_index = -1;
3476 cpu_loop_exit();
3477 }
3478#endif
3479}
3480
3481void helper_sysenter(void)
3482{
3483 if (env->sysenter_cs == 0) {
3484 raise_exception_err(EXCP0D_GPF, 0);
3485 }
3486 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3487 cpu_x86_set_cpl(env, 0);
3488
3489#ifdef TARGET_X86_64
3490 if (env->hflags & HF_LMA_MASK) {
3491 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3492 0, 0xffffffff,
3493 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3494 DESC_S_MASK |
3495 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3496 } else
3497#endif
3498 {
3499 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3500 0, 0xffffffff,
3501 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3502 DESC_S_MASK |
3503 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3504 }
3505 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3506 0, 0xffffffff,
3507 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3508 DESC_S_MASK |
3509 DESC_W_MASK | DESC_A_MASK);
3510 ESP = env->sysenter_esp;
3511 EIP = env->sysenter_eip;
3512}
3513
3514void helper_sysexit(int dflag)
3515{
3516 int cpl;
3517
3518 cpl = env->hflags & HF_CPL_MASK;
3519 if (env->sysenter_cs == 0 || cpl != 0) {
3520 raise_exception_err(EXCP0D_GPF, 0);
3521 }
3522 cpu_x86_set_cpl(env, 3);
3523#ifdef TARGET_X86_64
3524 if (dflag == 2) {
3525 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3526 0, 0xffffffff,
3527 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3528 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3529 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3530 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3531 0, 0xffffffff,
3532 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3533 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3534 DESC_W_MASK | DESC_A_MASK);
3535 } else
3536#endif
3537 {
3538 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3539 0, 0xffffffff,
3540 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3541 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3542 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3543 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3544 0, 0xffffffff,
3545 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3546 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3547 DESC_W_MASK | DESC_A_MASK);
3548 }
3549 ESP = ECX;
3550 EIP = EDX;
3551#ifdef USE_KQEMU
3552 if (kqemu_is_ok(env)) {
3553 env->exception_index = -1;
3554 cpu_loop_exit();
3555 }
3556#endif
3557}
3558
3559#if defined(CONFIG_USER_ONLY)
3560target_ulong helper_read_crN(int reg)
3561{
3562 return 0;
3563}
3564
3565void helper_write_crN(int reg, target_ulong t0)
3566{
3567}
3568#else
3569target_ulong helper_read_crN(int reg)
3570{
3571 target_ulong val;
3572
3573 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3574 switch(reg) {
3575 default:
3576 val = env->cr[reg];
3577 break;
3578 case 8:
3579 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3580 val = cpu_get_apic_tpr(env);
3581 } else {
3582 val = env->v_tpr;
3583 }
3584 break;
3585 }
3586 return val;
3587}
3588
3589void helper_write_crN(int reg, target_ulong t0)
3590{
3591 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3592 switch(reg) {
3593 case 0:
3594 cpu_x86_update_cr0(env, t0);
3595 break;
3596 case 3:
3597 cpu_x86_update_cr3(env, t0);
3598 break;
3599 case 4:
3600 cpu_x86_update_cr4(env, t0);
3601 break;
3602 case 8:
3603 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3604 cpu_set_apic_tpr(env, t0);
3605 }
3606 env->v_tpr = t0 & 0x0f;
3607 break;
3608 default:
3609 env->cr[reg] = t0;
3610 break;
3611 }
3612}
3613#endif
3614
3615void helper_lmsw(target_ulong t0)
3616{
3617 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3618 if already set to one. */
3619 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3620 helper_write_crN(0, t0);
3621}
3622
3623void helper_clts(void)
3624{
3625 env->cr[0] &= ~CR0_TS_MASK;
3626 env->hflags &= ~HF_TS_MASK;
3627}
3628
3629/* XXX: do more */
3630void helper_movl_drN_T0(int reg, target_ulong t0)
3631{
3632 env->dr[reg] = t0;
3633}
3634
3635void helper_invlpg(target_ulong addr)
3636{
3637 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3638 tlb_flush_page(env, addr);
3639}
3640
3641void helper_rdtsc(void)
3642{
3643 uint64_t val;
3644
3645 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3646 raise_exception(EXCP0D_GPF);
3647 }
3648 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3649
3650 val = cpu_get_tsc(env) + env->tsc_offset;
3651 EAX = (uint32_t)(val);
3652 EDX = (uint32_t)(val >> 32);
3653}
3654
3655#ifdef VBOX
3656void helper_rdtscp(void)
3657{
3658 uint64_t val;
3659 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3660 raise_exception(EXCP0D_GPF);
3661 }
3662
3663 val = cpu_get_tsc(env);
3664 EAX = (uint32_t)(val);
3665 EDX = (uint32_t)(val >> 32);
3666 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3667}
3668#endif
3669
3670void helper_rdpmc(void)
3671{
3672 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3673 raise_exception(EXCP0D_GPF);
3674 }
3675 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3676
3677 /* currently unimplemented */
3678 raise_exception_err(EXCP06_ILLOP, 0);
3679}
3680
3681#if defined(CONFIG_USER_ONLY)
3682void helper_wrmsr(void)
3683{
3684}
3685
3686void helper_rdmsr(void)
3687{
3688}
3689#else
3690void helper_wrmsr(void)
3691{
3692 uint64_t val;
3693
3694 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3695
3696 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3697
3698 switch((uint32_t)ECX) {
3699 case MSR_IA32_SYSENTER_CS:
3700 env->sysenter_cs = val & 0xffff;
3701 break;
3702 case MSR_IA32_SYSENTER_ESP:
3703 env->sysenter_esp = val;
3704 break;
3705 case MSR_IA32_SYSENTER_EIP:
3706 env->sysenter_eip = val;
3707 break;
3708 case MSR_IA32_APICBASE:
3709 cpu_set_apic_base(env, val);
3710 break;
3711 case MSR_EFER:
3712 {
3713 uint64_t update_mask;
3714 update_mask = 0;
3715 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3716 update_mask |= MSR_EFER_SCE;
3717 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3718 update_mask |= MSR_EFER_LME;
3719 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3720 update_mask |= MSR_EFER_FFXSR;
3721 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3722 update_mask |= MSR_EFER_NXE;
3723 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3724 update_mask |= MSR_EFER_SVME;
3725 cpu_load_efer(env, (env->efer & ~update_mask) |
3726 (val & update_mask));
3727 }
3728 break;
3729 case MSR_STAR:
3730 env->star = val;
3731 break;
3732 case MSR_PAT:
3733 env->pat = val;
3734 break;
3735 case MSR_VM_HSAVE_PA:
3736 env->vm_hsave = val;
3737 break;
3738#ifdef TARGET_X86_64
3739 case MSR_LSTAR:
3740 env->lstar = val;
3741 break;
3742 case MSR_CSTAR:
3743 env->cstar = val;
3744 break;
3745 case MSR_FMASK:
3746 env->fmask = val;
3747 break;
3748 case MSR_FSBASE:
3749 env->segs[R_FS].base = val;
3750 break;
3751 case MSR_GSBASE:
3752 env->segs[R_GS].base = val;
3753 break;
3754 case MSR_KERNELGSBASE:
3755 env->kernelgsbase = val;
3756 break;
3757#endif
3758 default:
3759#ifndef VBOX
3760 /* XXX: exception ? */
3761 break;
3762#else /* VBOX */
3763 {
3764 uint32_t ecx = (uint32_t)ECX;
3765 /* In X2APIC specification this range is reserved for APIC control. */
3766 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3767 cpu_apic_wrmsr(env, ecx, val);
3768 /** @todo else exception? */
3769 break;
3770 }
3771 case MSR_K8_TSC_AUX:
3772 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3773 break;
3774#endif /* VBOX */
3775 }
3776}
3777
3778void helper_rdmsr(void)
3779{
3780 uint64_t val;
3781
3782 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3783
3784 switch((uint32_t)ECX) {
3785 case MSR_IA32_SYSENTER_CS:
3786 val = env->sysenter_cs;
3787 break;
3788 case MSR_IA32_SYSENTER_ESP:
3789 val = env->sysenter_esp;
3790 break;
3791 case MSR_IA32_SYSENTER_EIP:
3792 val = env->sysenter_eip;
3793 break;
3794 case MSR_IA32_APICBASE:
3795 val = cpu_get_apic_base(env);
3796 break;
3797 case MSR_EFER:
3798 val = env->efer;
3799 break;
3800 case MSR_STAR:
3801 val = env->star;
3802 break;
3803 case MSR_PAT:
3804 val = env->pat;
3805 break;
3806 case MSR_VM_HSAVE_PA:
3807 val = env->vm_hsave;
3808 break;
3809 case MSR_IA32_PERF_STATUS:
3810 /* tsc_increment_by_tick */
3811 val = 1000ULL;
3812 /* CPU multiplier */
3813 val |= (((uint64_t)4ULL) << 40);
3814 break;
3815#ifdef TARGET_X86_64
3816 case MSR_LSTAR:
3817 val = env->lstar;
3818 break;
3819 case MSR_CSTAR:
3820 val = env->cstar;
3821 break;
3822 case MSR_FMASK:
3823 val = env->fmask;
3824 break;
3825 case MSR_FSBASE:
3826 val = env->segs[R_FS].base;
3827 break;
3828 case MSR_GSBASE:
3829 val = env->segs[R_GS].base;
3830 break;
3831 case MSR_KERNELGSBASE:
3832 val = env->kernelgsbase;
3833 break;
3834#endif
3835#ifdef USE_KQEMU
3836 case MSR_QPI_COMMBASE:
3837 if (env->kqemu_enabled) {
3838 val = kqemu_comm_base;
3839 } else {
3840 val = 0;
3841 }
3842 break;
3843#endif
3844 default:
3845#ifndef VBOX
3846 /* XXX: exception ? */
3847 val = 0;
3848 break;
3849#else /* VBOX */
3850 {
3851 uint32_t ecx = (uint32_t)ECX;
3852 /* In X2APIC specification this range is reserved for APIC control. */
3853 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3854 val = cpu_apic_rdmsr(env, ecx);
3855 else
3856 val = 0; /** @todo else exception? */
3857 break;
3858 }
3859 case MSR_K8_TSC_AUX:
3860 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3861 break;
3862#endif /* VBOX */
3863 }
3864 EAX = (uint32_t)(val);
3865 EDX = (uint32_t)(val >> 32);
3866}
3867#endif
3868
3869target_ulong helper_lsl(target_ulong selector1)
3870{
3871 unsigned int limit;
3872 uint32_t e1, e2, eflags, selector;
3873 int rpl, dpl, cpl, type;
3874
3875 selector = selector1 & 0xffff;
3876 eflags = cc_table[CC_OP].compute_all();
3877 if (load_segment(&e1, &e2, selector) != 0)
3878 goto fail;
3879 rpl = selector & 3;
3880 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3881 cpl = env->hflags & HF_CPL_MASK;
3882 if (e2 & DESC_S_MASK) {
3883 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3884 /* conforming */
3885 } else {
3886 if (dpl < cpl || dpl < rpl)
3887 goto fail;
3888 }
3889 } else {
3890 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3891 switch(type) {
3892 case 1:
3893 case 2:
3894 case 3:
3895 case 9:
3896 case 11:
3897 break;
3898 default:
3899 goto fail;
3900 }
3901 if (dpl < cpl || dpl < rpl) {
3902 fail:
3903 CC_SRC = eflags & ~CC_Z;
3904 return 0;
3905 }
3906 }
3907 limit = get_seg_limit(e1, e2);
3908 CC_SRC = eflags | CC_Z;
3909 return limit;
3910}
3911
3912target_ulong helper_lar(target_ulong selector1)
3913{
3914 uint32_t e1, e2, eflags, selector;
3915 int rpl, dpl, cpl, type;
3916
3917 selector = selector1 & 0xffff;
3918 eflags = cc_table[CC_OP].compute_all();
3919 if ((selector & 0xfffc) == 0)
3920 goto fail;
3921 if (load_segment(&e1, &e2, selector) != 0)
3922 goto fail;
3923 rpl = selector & 3;
3924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3925 cpl = env->hflags & HF_CPL_MASK;
3926 if (e2 & DESC_S_MASK) {
3927 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3928 /* conforming */
3929 } else {
3930 if (dpl < cpl || dpl < rpl)
3931 goto fail;
3932 }
3933 } else {
3934 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3935 switch(type) {
3936 case 1:
3937 case 2:
3938 case 3:
3939 case 4:
3940 case 5:
3941 case 9:
3942 case 11:
3943 case 12:
3944 break;
3945 default:
3946 goto fail;
3947 }
3948 if (dpl < cpl || dpl < rpl) {
3949 fail:
3950 CC_SRC = eflags & ~CC_Z;
3951 return 0;
3952 }
3953 }
3954 CC_SRC = eflags | CC_Z;
3955 return e2 & 0x00f0ff00;
3956}
3957
3958void helper_verr(target_ulong selector1)
3959{
3960 uint32_t e1, e2, eflags, selector;
3961 int rpl, dpl, cpl;
3962
3963 selector = selector1 & 0xffff;
3964 eflags = cc_table[CC_OP].compute_all();
3965 if ((selector & 0xfffc) == 0)
3966 goto fail;
3967 if (load_segment(&e1, &e2, selector) != 0)
3968 goto fail;
3969 if (!(e2 & DESC_S_MASK))
3970 goto fail;
3971 rpl = selector & 3;
3972 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3973 cpl = env->hflags & HF_CPL_MASK;
3974 if (e2 & DESC_CS_MASK) {
3975 if (!(e2 & DESC_R_MASK))
3976 goto fail;
3977 if (!(e2 & DESC_C_MASK)) {
3978 if (dpl < cpl || dpl < rpl)
3979 goto fail;
3980 }
3981 } else {
3982 if (dpl < cpl || dpl < rpl) {
3983 fail:
3984 CC_SRC = eflags & ~CC_Z;
3985 return;
3986 }
3987 }
3988 CC_SRC = eflags | CC_Z;
3989}
3990
3991void helper_verw(target_ulong selector1)
3992{
3993 uint32_t e1, e2, eflags, selector;
3994 int rpl, dpl, cpl;
3995
3996 selector = selector1 & 0xffff;
3997 eflags = cc_table[CC_OP].compute_all();
3998 if ((selector & 0xfffc) == 0)
3999 goto fail;
4000 if (load_segment(&e1, &e2, selector) != 0)
4001 goto fail;
4002 if (!(e2 & DESC_S_MASK))
4003 goto fail;
4004 rpl = selector & 3;
4005 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4006 cpl = env->hflags & HF_CPL_MASK;
4007 if (e2 & DESC_CS_MASK) {
4008 goto fail;
4009 } else {
4010 if (dpl < cpl || dpl < rpl)
4011 goto fail;
4012 if (!(e2 & DESC_W_MASK)) {
4013 fail:
4014 CC_SRC = eflags & ~CC_Z;
4015 return;
4016 }
4017 }
4018 CC_SRC = eflags | CC_Z;
4019}
4020
4021/* x87 FPU helpers */
4022
4023static void fpu_set_exception(int mask)
4024{
4025 env->fpus |= mask;
4026 if (env->fpus & (~env->fpuc & FPUC_EM))
4027 env->fpus |= FPUS_SE | FPUS_B;
4028}
4029
4030#ifndef VBOX
4031static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4032#else /* VBOX */
4033DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4034#endif /* VBOX */
4035{
4036 if (b == 0.0)
4037 fpu_set_exception(FPUS_ZE);
4038 return a / b;
4039}
4040
4041void fpu_raise_exception(void)
4042{
4043 if (env->cr[0] & CR0_NE_MASK) {
4044 raise_exception(EXCP10_COPR);
4045 }
4046#if !defined(CONFIG_USER_ONLY)
4047 else {
4048 cpu_set_ferr(env);
4049 }
4050#endif
4051}
4052
4053void helper_flds_FT0(uint32_t val)
4054{
4055 union {
4056 float32 f;
4057 uint32_t i;
4058 } u;
4059 u.i = val;
4060 FT0 = float32_to_floatx(u.f, &env->fp_status);
4061}
4062
4063void helper_fldl_FT0(uint64_t val)
4064{
4065 union {
4066 float64 f;
4067 uint64_t i;
4068 } u;
4069 u.i = val;
4070 FT0 = float64_to_floatx(u.f, &env->fp_status);
4071}
4072
4073void helper_fildl_FT0(int32_t val)
4074{
4075 FT0 = int32_to_floatx(val, &env->fp_status);
4076}
4077
4078void helper_flds_ST0(uint32_t val)
4079{
4080 int new_fpstt;
4081 union {
4082 float32 f;
4083 uint32_t i;
4084 } u;
4085 new_fpstt = (env->fpstt - 1) & 7;
4086 u.i = val;
4087 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4088 env->fpstt = new_fpstt;
4089 env->fptags[new_fpstt] = 0; /* validate stack entry */
4090}
4091
4092void helper_fldl_ST0(uint64_t val)
4093{
4094 int new_fpstt;
4095 union {
4096 float64 f;
4097 uint64_t i;
4098 } u;
4099 new_fpstt = (env->fpstt - 1) & 7;
4100 u.i = val;
4101 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4102 env->fpstt = new_fpstt;
4103 env->fptags[new_fpstt] = 0; /* validate stack entry */
4104}
4105
4106void helper_fildl_ST0(int32_t val)
4107{
4108 int new_fpstt;
4109 new_fpstt = (env->fpstt - 1) & 7;
4110 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4111 env->fpstt = new_fpstt;
4112 env->fptags[new_fpstt] = 0; /* validate stack entry */
4113}
4114
4115void helper_fildll_ST0(int64_t val)
4116{
4117 int new_fpstt;
4118 new_fpstt = (env->fpstt - 1) & 7;
4119 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4120 env->fpstt = new_fpstt;
4121 env->fptags[new_fpstt] = 0; /* validate stack entry */
4122}
4123
4124#ifndef VBOX
4125uint32_t helper_fsts_ST0(void)
4126#else
4127RTCCUINTREG helper_fsts_ST0(void)
4128#endif
4129{
4130 union {
4131 float32 f;
4132 uint32_t i;
4133 } u;
4134 u.f = floatx_to_float32(ST0, &env->fp_status);
4135 return u.i;
4136}
4137
4138uint64_t helper_fstl_ST0(void)
4139{
4140 union {
4141 float64 f;
4142 uint64_t i;
4143 } u;
4144 u.f = floatx_to_float64(ST0, &env->fp_status);
4145 return u.i;
4146}
4147#ifndef VBOX
4148int32_t helper_fist_ST0(void)
4149#else
4150RTCCINTREG helper_fist_ST0(void)
4151#endif
4152{
4153 int32_t val;
4154 val = floatx_to_int32(ST0, &env->fp_status);
4155 if (val != (int16_t)val)
4156 val = -32768;
4157 return val;
4158}
4159
4160#ifndef VBOX
4161int32_t helper_fistl_ST0(void)
4162#else
4163RTCCINTREG helper_fistl_ST0(void)
4164#endif
4165{
4166 int32_t val;
4167 val = floatx_to_int32(ST0, &env->fp_status);
4168 return val;
4169}
4170
4171int64_t helper_fistll_ST0(void)
4172{
4173 int64_t val;
4174 val = floatx_to_int64(ST0, &env->fp_status);
4175 return val;
4176}
4177
4178#ifndef VBOX
4179int32_t helper_fistt_ST0(void)
4180#else
4181RTCCINTREG helper_fistt_ST0(void)
4182#endif
4183{
4184 int32_t val;
4185 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4186 if (val != (int16_t)val)
4187 val = -32768;
4188 return val;
4189}
4190
4191#ifndef VBOX
4192int32_t helper_fisttl_ST0(void)
4193#else
4194RTCCINTREG helper_fisttl_ST0(void)
4195#endif
4196{
4197 int32_t val;
4198 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4199 return val;
4200}
4201
4202int64_t helper_fisttll_ST0(void)
4203{
4204 int64_t val;
4205 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4206 return val;
4207}
4208
4209void helper_fldt_ST0(target_ulong ptr)
4210{
4211 int new_fpstt;
4212 new_fpstt = (env->fpstt - 1) & 7;
4213 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4214 env->fpstt = new_fpstt;
4215 env->fptags[new_fpstt] = 0; /* validate stack entry */
4216}
4217
4218void helper_fstt_ST0(target_ulong ptr)
4219{
4220 helper_fstt(ST0, ptr);
4221}
4222
4223void helper_fpush(void)
4224{
4225 fpush();
4226}
4227
4228void helper_fpop(void)
4229{
4230 fpop();
4231}
4232
4233void helper_fdecstp(void)
4234{
4235 env->fpstt = (env->fpstt - 1) & 7;
4236 env->fpus &= (~0x4700);
4237}
4238
4239void helper_fincstp(void)
4240{
4241 env->fpstt = (env->fpstt + 1) & 7;
4242 env->fpus &= (~0x4700);
4243}
4244
4245/* FPU move */
4246
4247void helper_ffree_STN(int st_index)
4248{
4249 env->fptags[(env->fpstt + st_index) & 7] = 1;
4250}
4251
4252void helper_fmov_ST0_FT0(void)
4253{
4254 ST0 = FT0;
4255}
4256
4257void helper_fmov_FT0_STN(int st_index)
4258{
4259 FT0 = ST(st_index);
4260}
4261
4262void helper_fmov_ST0_STN(int st_index)
4263{
4264 ST0 = ST(st_index);
4265}
4266
4267void helper_fmov_STN_ST0(int st_index)
4268{
4269 ST(st_index) = ST0;
4270}
4271
4272void helper_fxchg_ST0_STN(int st_index)
4273{
4274 CPU86_LDouble tmp;
4275 tmp = ST(st_index);
4276 ST(st_index) = ST0;
4277 ST0 = tmp;
4278}
4279
4280/* FPU operations */
4281
4282static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4283
4284void helper_fcom_ST0_FT0(void)
4285{
4286 int ret;
4287
4288 ret = floatx_compare(ST0, FT0, &env->fp_status);
4289 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4290 FORCE_RET();
4291}
4292
4293void helper_fucom_ST0_FT0(void)
4294{
4295 int ret;
4296
4297 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4298 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4299 FORCE_RET();
4300}
4301
4302static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4303
4304void helper_fcomi_ST0_FT0(void)
4305{
4306 int eflags;
4307 int ret;
4308
4309 ret = floatx_compare(ST0, FT0, &env->fp_status);
4310 eflags = cc_table[CC_OP].compute_all();
4311 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4312 CC_SRC = eflags;
4313 FORCE_RET();
4314}
4315
4316void helper_fucomi_ST0_FT0(void)
4317{
4318 int eflags;
4319 int ret;
4320
4321 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4322 eflags = cc_table[CC_OP].compute_all();
4323 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4324 CC_SRC = eflags;
4325 FORCE_RET();
4326}
4327
4328void helper_fadd_ST0_FT0(void)
4329{
4330 ST0 += FT0;
4331}
4332
4333void helper_fmul_ST0_FT0(void)
4334{
4335 ST0 *= FT0;
4336}
4337
4338void helper_fsub_ST0_FT0(void)
4339{
4340 ST0 -= FT0;
4341}
4342
4343void helper_fsubr_ST0_FT0(void)
4344{
4345 ST0 = FT0 - ST0;
4346}
4347
4348void helper_fdiv_ST0_FT0(void)
4349{
4350 ST0 = helper_fdiv(ST0, FT0);
4351}
4352
4353void helper_fdivr_ST0_FT0(void)
4354{
4355 ST0 = helper_fdiv(FT0, ST0);
4356}
4357
4358/* fp operations between STN and ST0 */
4359
4360void helper_fadd_STN_ST0(int st_index)
4361{
4362 ST(st_index) += ST0;
4363}
4364
4365void helper_fmul_STN_ST0(int st_index)
4366{
4367 ST(st_index) *= ST0;
4368}
4369
4370void helper_fsub_STN_ST0(int st_index)
4371{
4372 ST(st_index) -= ST0;
4373}
4374
4375void helper_fsubr_STN_ST0(int st_index)
4376{
4377 CPU86_LDouble *p;
4378 p = &ST(st_index);
4379 *p = ST0 - *p;
4380}
4381
4382void helper_fdiv_STN_ST0(int st_index)
4383{
4384 CPU86_LDouble *p;
4385 p = &ST(st_index);
4386 *p = helper_fdiv(*p, ST0);
4387}
4388
4389void helper_fdivr_STN_ST0(int st_index)
4390{
4391 CPU86_LDouble *p;
4392 p = &ST(st_index);
4393 *p = helper_fdiv(ST0, *p);
4394}
4395
4396/* misc FPU operations */
4397void helper_fchs_ST0(void)
4398{
4399 ST0 = floatx_chs(ST0);
4400}
4401
4402void helper_fabs_ST0(void)
4403{
4404 ST0 = floatx_abs(ST0);
4405}
4406
4407void helper_fld1_ST0(void)
4408{
4409 ST0 = f15rk[1];
4410}
4411
4412void helper_fldl2t_ST0(void)
4413{
4414 ST0 = f15rk[6];
4415}
4416
4417void helper_fldl2e_ST0(void)
4418{
4419 ST0 = f15rk[5];
4420}
4421
4422void helper_fldpi_ST0(void)
4423{
4424 ST0 = f15rk[2];
4425}
4426
4427void helper_fldlg2_ST0(void)
4428{
4429 ST0 = f15rk[3];
4430}
4431
4432void helper_fldln2_ST0(void)
4433{
4434 ST0 = f15rk[4];
4435}
4436
4437void helper_fldz_ST0(void)
4438{
4439 ST0 = f15rk[0];
4440}
4441
4442void helper_fldz_FT0(void)
4443{
4444 FT0 = f15rk[0];
4445}
4446
4447#ifndef VBOX
4448uint32_t helper_fnstsw(void)
4449#else
4450RTCCUINTREG helper_fnstsw(void)
4451#endif
4452{
4453 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4454}
4455
4456#ifndef VBOX
4457uint32_t helper_fnstcw(void)
4458#else
4459RTCCUINTREG helper_fnstcw(void)
4460#endif
4461{
4462 return env->fpuc;
4463}
4464
4465static void update_fp_status(void)
4466{
4467 int rnd_type;
4468
4469 /* set rounding mode */
4470 switch(env->fpuc & RC_MASK) {
4471 default:
4472 case RC_NEAR:
4473 rnd_type = float_round_nearest_even;
4474 break;
4475 case RC_DOWN:
4476 rnd_type = float_round_down;
4477 break;
4478 case RC_UP:
4479 rnd_type = float_round_up;
4480 break;
4481 case RC_CHOP:
4482 rnd_type = float_round_to_zero;
4483 break;
4484 }
4485 set_float_rounding_mode(rnd_type, &env->fp_status);
4486#ifdef FLOATX80
4487 switch((env->fpuc >> 8) & 3) {
4488 case 0:
4489 rnd_type = 32;
4490 break;
4491 case 2:
4492 rnd_type = 64;
4493 break;
4494 case 3:
4495 default:
4496 rnd_type = 80;
4497 break;
4498 }
4499 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4500#endif
4501}
4502
4503void helper_fldcw(uint32_t val)
4504{
4505 env->fpuc = val;
4506 update_fp_status();
4507}
4508
4509void helper_fclex(void)
4510{
4511 env->fpus &= 0x7f00;
4512}
4513
4514void helper_fwait(void)
4515{
4516 if (env->fpus & FPUS_SE)
4517 fpu_raise_exception();
4518 FORCE_RET();
4519}
4520
4521void helper_fninit(void)
4522{
4523 env->fpus = 0;
4524 env->fpstt = 0;
4525 env->fpuc = 0x37f;
4526 env->fptags[0] = 1;
4527 env->fptags[1] = 1;
4528 env->fptags[2] = 1;
4529 env->fptags[3] = 1;
4530 env->fptags[4] = 1;
4531 env->fptags[5] = 1;
4532 env->fptags[6] = 1;
4533 env->fptags[7] = 1;
4534}
4535
4536/* BCD ops */
4537
4538void helper_fbld_ST0(target_ulong ptr)
4539{
4540 CPU86_LDouble tmp;
4541 uint64_t val;
4542 unsigned int v;
4543 int i;
4544
4545 val = 0;
4546 for(i = 8; i >= 0; i--) {
4547 v = ldub(ptr + i);
4548 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4549 }
4550 tmp = val;
4551 if (ldub(ptr + 9) & 0x80)
4552 tmp = -tmp;
4553 fpush();
4554 ST0 = tmp;
4555}
4556
4557void helper_fbst_ST0(target_ulong ptr)
4558{
4559 int v;
4560 target_ulong mem_ref, mem_end;
4561 int64_t val;
4562
4563 val = floatx_to_int64(ST0, &env->fp_status);
4564 mem_ref = ptr;
4565 mem_end = mem_ref + 9;
4566 if (val < 0) {
4567 stb(mem_end, 0x80);
4568 val = -val;
4569 } else {
4570 stb(mem_end, 0x00);
4571 }
4572 while (mem_ref < mem_end) {
4573 if (val == 0)
4574 break;
4575 v = val % 100;
4576 val = val / 100;
4577 v = ((v / 10) << 4) | (v % 10);
4578 stb(mem_ref++, v);
4579 }
4580 while (mem_ref < mem_end) {
4581 stb(mem_ref++, 0);
4582 }
4583}
4584
4585void helper_f2xm1(void)
4586{
4587 ST0 = pow(2.0,ST0) - 1.0;
4588}
4589
4590void helper_fyl2x(void)
4591{
4592 CPU86_LDouble fptemp;
4593
4594 fptemp = ST0;
4595 if (fptemp>0.0){
4596 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4597 ST1 *= fptemp;
4598 fpop();
4599 } else {
4600 env->fpus &= (~0x4700);
4601 env->fpus |= 0x400;
4602 }
4603}
4604
4605void helper_fptan(void)
4606{
4607 CPU86_LDouble fptemp;
4608
4609 fptemp = ST0;
4610 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4611 env->fpus |= 0x400;
4612 } else {
4613 ST0 = tan(fptemp);
4614 fpush();
4615 ST0 = 1.0;
4616 env->fpus &= (~0x400); /* C2 <-- 0 */
4617 /* the above code is for |arg| < 2**52 only */
4618 }
4619}
4620
4621void helper_fpatan(void)
4622{
4623 CPU86_LDouble fptemp, fpsrcop;
4624
4625 fpsrcop = ST1;
4626 fptemp = ST0;
4627 ST1 = atan2(fpsrcop,fptemp);
4628 fpop();
4629}
4630
4631void helper_fxtract(void)
4632{
4633 CPU86_LDoubleU temp;
4634 unsigned int expdif;
4635
4636 temp.d = ST0;
4637 expdif = EXPD(temp) - EXPBIAS;
4638 /*DP exponent bias*/
4639 ST0 = expdif;
4640 fpush();
4641 BIASEXPONENT(temp);
4642 ST0 = temp.d;
4643}
4644
4645#ifdef VBOX
4646#ifdef _MSC_VER
4647/* MSC cannot divide by zero */
4648extern double _Nan;
4649#define NaN _Nan
4650#else
4651#define NaN (0.0 / 0.0)
4652#endif
4653#endif /* VBOX */
4654
4655void helper_fprem1(void)
4656{
4657 CPU86_LDouble dblq, fpsrcop, fptemp;
4658 CPU86_LDoubleU fpsrcop1, fptemp1;
4659 int expdif;
4660 signed long long int q;
4661
4662#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4663 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4664#else
4665 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4666#endif
4667 ST0 = 0.0 / 0.0; /* NaN */
4668 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4669 return;
4670 }
4671
4672 fpsrcop = ST0;
4673 fptemp = ST1;
4674 fpsrcop1.d = fpsrcop;
4675 fptemp1.d = fptemp;
4676 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4677
4678 if (expdif < 0) {
4679 /* optimisation? taken from the AMD docs */
4680 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4681 /* ST0 is unchanged */
4682 return;
4683 }
4684
4685 if (expdif < 53) {
4686 dblq = fpsrcop / fptemp;
4687 /* round dblq towards nearest integer */
4688 dblq = rint(dblq);
4689 ST0 = fpsrcop - fptemp * dblq;
4690
4691 /* convert dblq to q by truncating towards zero */
4692 if (dblq < 0.0)
4693 q = (signed long long int)(-dblq);
4694 else
4695 q = (signed long long int)dblq;
4696
4697 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4698 /* (C0,C3,C1) <-- (q2,q1,q0) */
4699 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4700 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4701 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4702 } else {
4703 env->fpus |= 0x400; /* C2 <-- 1 */
4704 fptemp = pow(2.0, expdif - 50);
4705 fpsrcop = (ST0 / ST1) / fptemp;
4706 /* fpsrcop = integer obtained by chopping */
4707 fpsrcop = (fpsrcop < 0.0) ?
4708 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4709 ST0 -= (ST1 * fpsrcop * fptemp);
4710 }
4711}
4712
4713void helper_fprem(void)
4714{
4715 CPU86_LDouble dblq, fpsrcop, fptemp;
4716 CPU86_LDoubleU fpsrcop1, fptemp1;
4717 int expdif;
4718 signed long long int q;
4719
4720#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4721 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4722#else
4723 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4724#endif
4725 ST0 = 0.0 / 0.0; /* NaN */
4726 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4727 return;
4728 }
4729
4730 fpsrcop = (CPU86_LDouble)ST0;
4731 fptemp = (CPU86_LDouble)ST1;
4732 fpsrcop1.d = fpsrcop;
4733 fptemp1.d = fptemp;
4734 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4735
4736 if (expdif < 0) {
4737 /* optimisation? taken from the AMD docs */
4738 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4739 /* ST0 is unchanged */
4740 return;
4741 }
4742
4743 if ( expdif < 53 ) {
4744 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4745 /* round dblq towards zero */
4746 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4747 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4748
4749 /* convert dblq to q by truncating towards zero */
4750 if (dblq < 0.0)
4751 q = (signed long long int)(-dblq);
4752 else
4753 q = (signed long long int)dblq;
4754
4755 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4756 /* (C0,C3,C1) <-- (q2,q1,q0) */
4757 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4758 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4759 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4760 } else {
4761 int N = 32 + (expdif % 32); /* as per AMD docs */
4762 env->fpus |= 0x400; /* C2 <-- 1 */
4763 fptemp = pow(2.0, (double)(expdif - N));
4764 fpsrcop = (ST0 / ST1) / fptemp;
4765 /* fpsrcop = integer obtained by chopping */
4766 fpsrcop = (fpsrcop < 0.0) ?
4767 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4768 ST0 -= (ST1 * fpsrcop * fptemp);
4769 }
4770}
4771
4772void helper_fyl2xp1(void)
4773{
4774 CPU86_LDouble fptemp;
4775
4776 fptemp = ST0;
4777 if ((fptemp+1.0)>0.0) {
4778 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4779 ST1 *= fptemp;
4780 fpop();
4781 } else {
4782 env->fpus &= (~0x4700);
4783 env->fpus |= 0x400;
4784 }
4785}
4786
4787void helper_fsqrt(void)
4788{
4789 CPU86_LDouble fptemp;
4790
4791 fptemp = ST0;
4792 if (fptemp<0.0) {
4793 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4794 env->fpus |= 0x400;
4795 }
4796 ST0 = sqrt(fptemp);
4797}
4798
4799void helper_fsincos(void)
4800{
4801 CPU86_LDouble fptemp;
4802
4803 fptemp = ST0;
4804 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4805 env->fpus |= 0x400;
4806 } else {
4807 ST0 = sin(fptemp);
4808 fpush();
4809 ST0 = cos(fptemp);
4810 env->fpus &= (~0x400); /* C2 <-- 0 */
4811 /* the above code is for |arg| < 2**63 only */
4812 }
4813}
4814
4815void helper_frndint(void)
4816{
4817 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4818}
4819
4820void helper_fscale(void)
4821{
4822 ST0 = ldexp (ST0, (int)(ST1));
4823}
4824
4825void helper_fsin(void)
4826{
4827 CPU86_LDouble fptemp;
4828
4829 fptemp = ST0;
4830 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4831 env->fpus |= 0x400;
4832 } else {
4833 ST0 = sin(fptemp);
4834 env->fpus &= (~0x400); /* C2 <-- 0 */
4835 /* the above code is for |arg| < 2**53 only */
4836 }
4837}
4838
4839void helper_fcos(void)
4840{
4841 CPU86_LDouble fptemp;
4842
4843 fptemp = ST0;
4844 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4845 env->fpus |= 0x400;
4846 } else {
4847 ST0 = cos(fptemp);
4848 env->fpus &= (~0x400); /* C2 <-- 0 */
4849 /* the above code is for |arg5 < 2**63 only */
4850 }
4851}
4852
4853void helper_fxam_ST0(void)
4854{
4855 CPU86_LDoubleU temp;
4856 int expdif;
4857
4858 temp.d = ST0;
4859
4860 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4861 if (SIGND(temp))
4862 env->fpus |= 0x200; /* C1 <-- 1 */
4863
4864 /* XXX: test fptags too */
4865 expdif = EXPD(temp);
4866 if (expdif == MAXEXPD) {
4867#ifdef USE_X86LDOUBLE
4868 if (MANTD(temp) == 0x8000000000000000ULL)
4869#else
4870 if (MANTD(temp) == 0)
4871#endif
4872 env->fpus |= 0x500 /*Infinity*/;
4873 else
4874 env->fpus |= 0x100 /*NaN*/;
4875 } else if (expdif == 0) {
4876 if (MANTD(temp) == 0)
4877 env->fpus |= 0x4000 /*Zero*/;
4878 else
4879 env->fpus |= 0x4400 /*Denormal*/;
4880 } else {
4881 env->fpus |= 0x400;
4882 }
4883}
4884
4885void helper_fstenv(target_ulong ptr, int data32)
4886{
4887 int fpus, fptag, exp, i;
4888 uint64_t mant;
4889 CPU86_LDoubleU tmp;
4890
4891 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4892 fptag = 0;
4893 for (i=7; i>=0; i--) {
4894 fptag <<= 2;
4895 if (env->fptags[i]) {
4896 fptag |= 3;
4897 } else {
4898 tmp.d = env->fpregs[i].d;
4899 exp = EXPD(tmp);
4900 mant = MANTD(tmp);
4901 if (exp == 0 && mant == 0) {
4902 /* zero */
4903 fptag |= 1;
4904 } else if (exp == 0 || exp == MAXEXPD
4905#ifdef USE_X86LDOUBLE
4906 || (mant & (1LL << 63)) == 0
4907#endif
4908 ) {
4909 /* NaNs, infinity, denormal */
4910 fptag |= 2;
4911 }
4912 }
4913 }
4914 if (data32) {
4915 /* 32 bit */
4916 stl(ptr, env->fpuc);
4917 stl(ptr + 4, fpus);
4918 stl(ptr + 8, fptag);
4919 stl(ptr + 12, 0); /* fpip */
4920 stl(ptr + 16, 0); /* fpcs */
4921 stl(ptr + 20, 0); /* fpoo */
4922 stl(ptr + 24, 0); /* fpos */
4923 } else {
4924 /* 16 bit */
4925 stw(ptr, env->fpuc);
4926 stw(ptr + 2, fpus);
4927 stw(ptr + 4, fptag);
4928 stw(ptr + 6, 0);
4929 stw(ptr + 8, 0);
4930 stw(ptr + 10, 0);
4931 stw(ptr + 12, 0);
4932 }
4933}
4934
4935void helper_fldenv(target_ulong ptr, int data32)
4936{
4937 int i, fpus, fptag;
4938
4939 if (data32) {
4940 env->fpuc = lduw(ptr);
4941 fpus = lduw(ptr + 4);
4942 fptag = lduw(ptr + 8);
4943 }
4944 else {
4945 env->fpuc = lduw(ptr);
4946 fpus = lduw(ptr + 2);
4947 fptag = lduw(ptr + 4);
4948 }
4949 env->fpstt = (fpus >> 11) & 7;
4950 env->fpus = fpus & ~0x3800;
4951 for(i = 0;i < 8; i++) {
4952 env->fptags[i] = ((fptag & 3) == 3);
4953 fptag >>= 2;
4954 }
4955}
4956
4957void helper_fsave(target_ulong ptr, int data32)
4958{
4959 CPU86_LDouble tmp;
4960 int i;
4961
4962 helper_fstenv(ptr, data32);
4963
4964 ptr += (14 << data32);
4965 for(i = 0;i < 8; i++) {
4966 tmp = ST(i);
4967 helper_fstt(tmp, ptr);
4968 ptr += 10;
4969 }
4970
4971 /* fninit */
4972 env->fpus = 0;
4973 env->fpstt = 0;
4974 env->fpuc = 0x37f;
4975 env->fptags[0] = 1;
4976 env->fptags[1] = 1;
4977 env->fptags[2] = 1;
4978 env->fptags[3] = 1;
4979 env->fptags[4] = 1;
4980 env->fptags[5] = 1;
4981 env->fptags[6] = 1;
4982 env->fptags[7] = 1;
4983}
4984
4985void helper_frstor(target_ulong ptr, int data32)
4986{
4987 CPU86_LDouble tmp;
4988 int i;
4989
4990 helper_fldenv(ptr, data32);
4991 ptr += (14 << data32);
4992
4993 for(i = 0;i < 8; i++) {
4994 tmp = helper_fldt(ptr);
4995 ST(i) = tmp;
4996 ptr += 10;
4997 }
4998}
4999
5000void helper_fxsave(target_ulong ptr, int data64)
5001{
5002 int fpus, fptag, i, nb_xmm_regs;
5003 CPU86_LDouble tmp;
5004 target_ulong addr;
5005
5006 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5007 fptag = 0;
5008 for(i = 0; i < 8; i++) {
5009 fptag |= (env->fptags[i] << i);
5010 }
5011 stw(ptr, env->fpuc);
5012 stw(ptr + 2, fpus);
5013 stw(ptr + 4, fptag ^ 0xff);
5014#ifdef TARGET_X86_64
5015 if (data64) {
5016 stq(ptr + 0x08, 0); /* rip */
5017 stq(ptr + 0x10, 0); /* rdp */
5018 } else
5019#endif
5020 {
5021 stl(ptr + 0x08, 0); /* eip */
5022 stl(ptr + 0x0c, 0); /* sel */
5023 stl(ptr + 0x10, 0); /* dp */
5024 stl(ptr + 0x14, 0); /* sel */
5025 }
5026
5027 addr = ptr + 0x20;
5028 for(i = 0;i < 8; i++) {
5029 tmp = ST(i);
5030 helper_fstt(tmp, addr);
5031 addr += 16;
5032 }
5033
5034 if (env->cr[4] & CR4_OSFXSR_MASK) {
5035 /* XXX: finish it */
5036 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5037 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5038 if (env->hflags & HF_CS64_MASK)
5039 nb_xmm_regs = 16;
5040 else
5041 nb_xmm_regs = 8;
5042 addr = ptr + 0xa0;
5043 for(i = 0; i < nb_xmm_regs; i++) {
5044 stq(addr, env->xmm_regs[i].XMM_Q(0));
5045 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5046 addr += 16;
5047 }
5048 }
5049}
5050
5051void helper_fxrstor(target_ulong ptr, int data64)
5052{
5053 int i, fpus, fptag, nb_xmm_regs;
5054 CPU86_LDouble tmp;
5055 target_ulong addr;
5056
5057 env->fpuc = lduw(ptr);
5058 fpus = lduw(ptr + 2);
5059 fptag = lduw(ptr + 4);
5060 env->fpstt = (fpus >> 11) & 7;
5061 env->fpus = fpus & ~0x3800;
5062 fptag ^= 0xff;
5063 for(i = 0;i < 8; i++) {
5064 env->fptags[i] = ((fptag >> i) & 1);
5065 }
5066
5067 addr = ptr + 0x20;
5068 for(i = 0;i < 8; i++) {
5069 tmp = helper_fldt(addr);
5070 ST(i) = tmp;
5071 addr += 16;
5072 }
5073
5074 if (env->cr[4] & CR4_OSFXSR_MASK) {
5075 /* XXX: finish it */
5076 env->mxcsr = ldl(ptr + 0x18);
5077 //ldl(ptr + 0x1c);
5078 if (env->hflags & HF_CS64_MASK)
5079 nb_xmm_regs = 16;
5080 else
5081 nb_xmm_regs = 8;
5082 addr = ptr + 0xa0;
5083 for(i = 0; i < nb_xmm_regs; i++) {
5084#if !defined(VBOX) || __GNUC__ < 4
5085 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5086 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5087#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5088# if 1
5089 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5090 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5091 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5092 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5093# else
5094 /* this works fine on Mac OS X, gcc 4.0.1 */
5095 uint64_t u64 = ldq(addr);
5096 env->xmm_regs[i].XMM_Q(0);
5097 u64 = ldq(addr + 4);
5098 env->xmm_regs[i].XMM_Q(1) = u64;
5099# endif
5100#endif
5101 addr += 16;
5102 }
5103 }
5104}
5105
5106#ifndef USE_X86LDOUBLE
5107
5108void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5109{
5110 CPU86_LDoubleU temp;
5111 int e;
5112
5113 temp.d = f;
5114 /* mantissa */
5115 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5116 /* exponent + sign */
5117 e = EXPD(temp) - EXPBIAS + 16383;
5118 e |= SIGND(temp) >> 16;
5119 *pexp = e;
5120}
5121
5122CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5123{
5124 CPU86_LDoubleU temp;
5125 int e;
5126 uint64_t ll;
5127
5128 /* XXX: handle overflow ? */
5129 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5130 e |= (upper >> 4) & 0x800; /* sign */
5131 ll = (mant >> 11) & ((1LL << 52) - 1);
5132#ifdef __arm__
5133 temp.l.upper = (e << 20) | (ll >> 32);
5134 temp.l.lower = ll;
5135#else
5136 temp.ll = ll | ((uint64_t)e << 52);
5137#endif
5138 return temp.d;
5139}
5140
5141#else
5142
5143void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5144{
5145 CPU86_LDoubleU temp;
5146
5147 temp.d = f;
5148 *pmant = temp.l.lower;
5149 *pexp = temp.l.upper;
5150}
5151
5152CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5153{
5154 CPU86_LDoubleU temp;
5155
5156 temp.l.upper = upper;
5157 temp.l.lower = mant;
5158 return temp.d;
5159}
5160#endif
5161
5162#ifdef TARGET_X86_64
5163
5164//#define DEBUG_MULDIV
5165
5166static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5167{
5168 *plow += a;
5169 /* carry test */
5170 if (*plow < a)
5171 (*phigh)++;
5172 *phigh += b;
5173}
5174
5175static void neg128(uint64_t *plow, uint64_t *phigh)
5176{
5177 *plow = ~ *plow;
5178 *phigh = ~ *phigh;
5179 add128(plow, phigh, 1, 0);
5180}
5181
5182/* return TRUE if overflow */
5183static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5184{
5185 uint64_t q, r, a1, a0;
5186 int i, qb, ab;
5187
5188 a0 = *plow;
5189 a1 = *phigh;
5190 if (a1 == 0) {
5191 q = a0 / b;
5192 r = a0 % b;
5193 *plow = q;
5194 *phigh = r;
5195 } else {
5196 if (a1 >= b)
5197 return 1;
5198 /* XXX: use a better algorithm */
5199 for(i = 0; i < 64; i++) {
5200 ab = a1 >> 63;
5201 a1 = (a1 << 1) | (a0 >> 63);
5202 if (ab || a1 >= b) {
5203 a1 -= b;
5204 qb = 1;
5205 } else {
5206 qb = 0;
5207 }
5208 a0 = (a0 << 1) | qb;
5209 }
5210#if defined(DEBUG_MULDIV)
5211 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5212 *phigh, *plow, b, a0, a1);
5213#endif
5214 *plow = a0;
5215 *phigh = a1;
5216 }
5217 return 0;
5218}
5219
5220/* return TRUE if overflow */
5221static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5222{
5223 int sa, sb;
5224 sa = ((int64_t)*phigh < 0);
5225 if (sa)
5226 neg128(plow, phigh);
5227 sb = (b < 0);
5228 if (sb)
5229 b = -b;
5230 if (div64(plow, phigh, b) != 0)
5231 return 1;
5232 if (sa ^ sb) {
5233 if (*plow > (1ULL << 63))
5234 return 1;
5235 *plow = - *plow;
5236 } else {
5237 if (*plow >= (1ULL << 63))
5238 return 1;
5239 }
5240 if (sa)
5241 *phigh = - *phigh;
5242 return 0;
5243}
5244
5245void helper_mulq_EAX_T0(target_ulong t0)
5246{
5247 uint64_t r0, r1;
5248
5249 mulu64(&r0, &r1, EAX, t0);
5250 EAX = r0;
5251 EDX = r1;
5252 CC_DST = r0;
5253 CC_SRC = r1;
5254}
5255
5256void helper_imulq_EAX_T0(target_ulong t0)
5257{
5258 uint64_t r0, r1;
5259
5260 muls64(&r0, &r1, EAX, t0);
5261 EAX = r0;
5262 EDX = r1;
5263 CC_DST = r0;
5264 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5265}
5266
5267target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5268{
5269 uint64_t r0, r1;
5270
5271 muls64(&r0, &r1, t0, t1);
5272 CC_DST = r0;
5273 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5274 return r0;
5275}
5276
5277void helper_divq_EAX(target_ulong t0)
5278{
5279 uint64_t r0, r1;
5280 if (t0 == 0) {
5281 raise_exception(EXCP00_DIVZ);
5282 }
5283 r0 = EAX;
5284 r1 = EDX;
5285 if (div64(&r0, &r1, t0))
5286 raise_exception(EXCP00_DIVZ);
5287 EAX = r0;
5288 EDX = r1;
5289}
5290
5291void helper_idivq_EAX(target_ulong t0)
5292{
5293 uint64_t r0, r1;
5294 if (t0 == 0) {
5295 raise_exception(EXCP00_DIVZ);
5296 }
5297 r0 = EAX;
5298 r1 = EDX;
5299 if (idiv64(&r0, &r1, t0))
5300 raise_exception(EXCP00_DIVZ);
5301 EAX = r0;
5302 EDX = r1;
5303}
5304#endif
5305
5306static void do_hlt(void)
5307{
5308 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5309 env->halted = 1;
5310 env->exception_index = EXCP_HLT;
5311 cpu_loop_exit();
5312}
5313
5314void helper_hlt(int next_eip_addend)
5315{
5316 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5317 EIP += next_eip_addend;
5318
5319 do_hlt();
5320}
5321
5322void helper_monitor(target_ulong ptr)
5323{
5324 if ((uint32_t)ECX != 0)
5325 raise_exception(EXCP0D_GPF);
5326 /* XXX: store address ? */
5327 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5328}
5329
5330void helper_mwait(int next_eip_addend)
5331{
5332 if ((uint32_t)ECX != 0)
5333 raise_exception(EXCP0D_GPF);
5334#ifdef VBOX
5335 helper_hlt(next_eip_addend);
5336#else
5337 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5338 EIP += next_eip_addend;
5339
5340 /* XXX: not complete but not completely erroneous */
5341 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5342 /* more than one CPU: do not sleep because another CPU may
5343 wake this one */
5344 } else {
5345 do_hlt();
5346 }
5347#endif
5348}
5349
5350void helper_debug(void)
5351{
5352 env->exception_index = EXCP_DEBUG;
5353 cpu_loop_exit();
5354}
5355
5356void helper_raise_interrupt(int intno, int next_eip_addend)
5357{
5358 raise_interrupt(intno, 1, 0, next_eip_addend);
5359}
5360
5361void helper_raise_exception(int exception_index)
5362{
5363 raise_exception(exception_index);
5364}
5365
5366void helper_cli(void)
5367{
5368 env->eflags &= ~IF_MASK;
5369}
5370
5371void helper_sti(void)
5372{
5373 env->eflags |= IF_MASK;
5374}
5375
5376#ifdef VBOX
5377void helper_cli_vme(void)
5378{
5379 env->eflags &= ~VIF_MASK;
5380}
5381
5382void helper_sti_vme(void)
5383{
5384 /* First check, then change eflags according to the AMD manual */
5385 if (env->eflags & VIP_MASK) {
5386 raise_exception(EXCP0D_GPF);
5387 }
5388 env->eflags |= VIF_MASK;
5389}
5390#endif
5391
5392#if 0
5393/* vm86plus instructions */
5394void helper_cli_vm(void)
5395{
5396 env->eflags &= ~VIF_MASK;
5397}
5398
5399void helper_sti_vm(void)
5400{
5401 env->eflags |= VIF_MASK;
5402 if (env->eflags & VIP_MASK) {
5403 raise_exception(EXCP0D_GPF);
5404 }
5405}
5406#endif
5407
5408void helper_set_inhibit_irq(void)
5409{
5410 env->hflags |= HF_INHIBIT_IRQ_MASK;
5411}
5412
5413void helper_reset_inhibit_irq(void)
5414{
5415 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5416}
5417
5418void helper_boundw(target_ulong a0, int v)
5419{
5420 int low, high;
5421 low = ldsw(a0);
5422 high = ldsw(a0 + 2);
5423 v = (int16_t)v;
5424 if (v < low || v > high) {
5425 raise_exception(EXCP05_BOUND);
5426 }
5427 FORCE_RET();
5428}
5429
5430void helper_boundl(target_ulong a0, int v)
5431{
5432 int low, high;
5433 low = ldl(a0);
5434 high = ldl(a0 + 4);
5435 if (v < low || v > high) {
5436 raise_exception(EXCP05_BOUND);
5437 }
5438 FORCE_RET();
5439}
5440
5441static float approx_rsqrt(float a)
5442{
5443 return 1.0 / sqrt(a);
5444}
5445
5446static float approx_rcp(float a)
5447{
5448 return 1.0 / a;
5449}
5450
5451#if !defined(CONFIG_USER_ONLY)
5452
5453#define MMUSUFFIX _mmu
5454
5455#define SHIFT 0
5456#include "softmmu_template.h"
5457
5458#define SHIFT 1
5459#include "softmmu_template.h"
5460
5461#define SHIFT 2
5462#include "softmmu_template.h"
5463
5464#define SHIFT 3
5465#include "softmmu_template.h"
5466
5467#endif
5468
5469#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5470/* This code assumes real physical address always fit into host CPU reg,
5471 which is wrong in general, but true for our current use cases. */
5472RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5473{
5474 return remR3PhysReadS8(addr);
5475}
5476RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5477{
5478 return remR3PhysReadU8(addr);
5479}
5480void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5481{
5482 remR3PhysWriteU8(addr, val);
5483}
5484RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5485{
5486 return remR3PhysReadS16(addr);
5487}
5488RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5489{
5490 return remR3PhysReadU16(addr);
5491}
5492void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5493{
5494 remR3PhysWriteU16(addr, val);
5495}
5496RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5497{
5498 return remR3PhysReadS32(addr);
5499}
5500RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5501{
5502 return remR3PhysReadU32(addr);
5503}
5504void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5505{
5506 remR3PhysWriteU32(addr, val);
5507}
5508uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5509{
5510 return remR3PhysReadU64(addr);
5511}
5512void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5513{
5514 remR3PhysWriteU64(addr, val);
5515}
5516#endif
5517
5518/* try to fill the TLB and return an exception if error. If retaddr is
5519 NULL, it means that the function was called in C code (i.e. not
5520 from generated code or from helper.c) */
5521/* XXX: fix it to restore all registers */
5522void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5523{
5524 TranslationBlock *tb;
5525 int ret;
5526 unsigned long pc;
5527 CPUX86State *saved_env;
5528
5529 /* XXX: hack to restore env in all cases, even if not called from
5530 generated code */
5531 saved_env = env;
5532 env = cpu_single_env;
5533
5534 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5535 if (ret) {
5536 if (retaddr) {
5537 /* now we have a real cpu fault */
5538 pc = (unsigned long)retaddr;
5539 tb = tb_find_pc(pc);
5540 if (tb) {
5541 /* the PC is inside the translated code. It means that we have
5542 a virtual CPU fault */
5543 cpu_restore_state(tb, env, pc, NULL);
5544 }
5545 }
5546 raise_exception_err(env->exception_index, env->error_code);
5547 }
5548 env = saved_env;
5549}
5550
5551#ifdef VBOX
5552
5553/**
5554 * Correctly computes the eflags.
5555 * @returns eflags.
5556 * @param env1 CPU environment.
5557 */
5558uint32_t raw_compute_eflags(CPUX86State *env1)
5559{
5560 CPUX86State *savedenv = env;
5561 uint32_t efl;
5562 env = env1;
5563 efl = compute_eflags();
5564 env = savedenv;
5565 return efl;
5566}
5567
5568/**
5569 * Reads byte from virtual address in guest memory area.
5570 * XXX: is it working for any addresses? swapped out pages?
5571 * @returns readed data byte.
5572 * @param env1 CPU environment.
5573 * @param pvAddr GC Virtual address.
5574 */
5575uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5576{
5577 CPUX86State *savedenv = env;
5578 uint8_t u8;
5579 env = env1;
5580 u8 = ldub_kernel(addr);
5581 env = savedenv;
5582 return u8;
5583}
5584
5585/**
5586 * Reads byte from virtual address in guest memory area.
5587 * XXX: is it working for any addresses? swapped out pages?
5588 * @returns readed data byte.
5589 * @param env1 CPU environment.
5590 * @param pvAddr GC Virtual address.
5591 */
5592uint16_t read_word(CPUX86State *env1, target_ulong addr)
5593{
5594 CPUX86State *savedenv = env;
5595 uint16_t u16;
5596 env = env1;
5597 u16 = lduw_kernel(addr);
5598 env = savedenv;
5599 return u16;
5600}
5601
5602/**
5603 * Reads byte from virtual address in guest memory area.
5604 * XXX: is it working for any addresses? swapped out pages?
5605 * @returns readed data byte.
5606 * @param env1 CPU environment.
5607 * @param pvAddr GC Virtual address.
5608 */
5609uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5610{
5611 CPUX86State *savedenv = env;
5612 uint32_t u32;
5613 env = env1;
5614 u32 = ldl_kernel(addr);
5615 env = savedenv;
5616 return u32;
5617}
5618
5619/**
5620 * Writes byte to virtual address in guest memory area.
5621 * XXX: is it working for any addresses? swapped out pages?
5622 * @returns readed data byte.
5623 * @param env1 CPU environment.
5624 * @param pvAddr GC Virtual address.
5625 * @param val byte value
5626 */
5627void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5628{
5629 CPUX86State *savedenv = env;
5630 env = env1;
5631 stb(addr, val);
5632 env = savedenv;
5633}
5634
5635void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5636{
5637 CPUX86State *savedenv = env;
5638 env = env1;
5639 stw(addr, val);
5640 env = savedenv;
5641}
5642
5643void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5644{
5645 CPUX86State *savedenv = env;
5646 env = env1;
5647 stl(addr, val);
5648 env = savedenv;
5649}
5650
5651/**
5652 * Correctly loads selector into segment register with updating internal
5653 * qemu data/caches.
5654 * @param env1 CPU environment.
5655 * @param seg_reg Segment register.
5656 * @param selector Selector to load.
5657 */
5658void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5659{
5660 CPUX86State *savedenv = env;
5661 jmp_buf old_buf;
5662
5663 env = env1;
5664
5665 if ( env->eflags & X86_EFL_VM
5666 || !(env->cr[0] & X86_CR0_PE))
5667 {
5668 load_seg_vm(seg_reg, selector);
5669
5670 env = savedenv;
5671
5672 /* Successful sync. */
5673 env1->segs[seg_reg].newselector = 0;
5674 }
5675 else
5676 {
5677 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5678 time critical - let's not do that */
5679#if 0
5680 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5681#endif
5682 if (setjmp(env1->jmp_env) == 0)
5683 {
5684 if (seg_reg == R_CS)
5685 {
5686 uint32_t e1, e2;
5687 e1 = e2 = 0;
5688 load_segment(&e1, &e2, selector);
5689 cpu_x86_load_seg_cache(env, R_CS, selector,
5690 get_seg_base(e1, e2),
5691 get_seg_limit(e1, e2),
5692 e2);
5693 }
5694 else
5695 tss_load_seg(seg_reg, selector);
5696 env = savedenv;
5697
5698 /* Successful sync. */
5699 env1->segs[seg_reg].newselector = 0;
5700 }
5701 else
5702 {
5703 env = savedenv;
5704
5705 /* Postpone sync until the guest uses the selector. */
5706 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5707 env1->segs[seg_reg].newselector = selector;
5708 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5709 env1->exception_index = -1;
5710 env1->error_code = 0;
5711 env1->old_exception = -1;
5712 }
5713#if 0
5714 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5715#endif
5716 }
5717
5718}
5719
5720DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5721{
5722 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5723}
5724
5725
5726int emulate_single_instr(CPUX86State *env1)
5727{
5728 TranslationBlock *tb;
5729 TranslationBlock *current;
5730 int flags;
5731 uint8_t *tc_ptr;
5732 target_ulong old_eip;
5733
5734 /* ensures env is loaded! */
5735 CPUX86State *savedenv = env;
5736 env = env1;
5737
5738 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5739
5740 current = env->current_tb;
5741 env->current_tb = NULL;
5742 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5743
5744 /*
5745 * Translate only one instruction.
5746 */
5747 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5748 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5749 env->segs[R_CS].base, flags, 0);
5750
5751 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5752
5753
5754 /* tb_link_phys: */
5755 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5756 tb->jmp_next[0] = NULL;
5757 tb->jmp_next[1] = NULL;
5758 Assert(tb->jmp_next[0] == NULL);
5759 Assert(tb->jmp_next[1] == NULL);
5760 if (tb->tb_next_offset[0] != 0xffff)
5761 tb_reset_jump(tb, 0);
5762 if (tb->tb_next_offset[1] != 0xffff)
5763 tb_reset_jump(tb, 1);
5764
5765 /*
5766 * Execute it using emulation
5767 */
5768 old_eip = env->eip;
5769 env->current_tb = tb;
5770
5771 /*
5772 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5773 * perhaps not a very safe hack
5774 */
5775 while(old_eip == env->eip)
5776 {
5777 tc_ptr = tb->tc_ptr;
5778
5779#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5780 int fake_ret;
5781 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5782#else
5783 tcg_qemu_tb_exec(tc_ptr);
5784#endif
5785 /*
5786 * Exit once we detect an external interrupt and interrupts are enabled
5787 */
5788 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5789 ( (env->eflags & IF_MASK) &&
5790 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5791 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5792 {
5793 break;
5794 }
5795 }
5796 env->current_tb = current;
5797
5798 tb_phys_invalidate(tb, -1);
5799 tb_free(tb);
5800/*
5801 Assert(tb->tb_next_offset[0] == 0xffff);
5802 Assert(tb->tb_next_offset[1] == 0xffff);
5803 Assert(tb->tb_next[0] == 0xffff);
5804 Assert(tb->tb_next[1] == 0xffff);
5805 Assert(tb->jmp_next[0] == NULL);
5806 Assert(tb->jmp_next[1] == NULL);
5807 Assert(tb->jmp_first == NULL); */
5808
5809 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5810
5811 /*
5812 * Execute the next instruction when we encounter instruction fusing.
5813 */
5814 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5815 {
5816 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5817 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5818 emulate_single_instr(env);
5819 }
5820
5821 env = savedenv;
5822 return 0;
5823}
5824
5825/**
5826 * Correctly loads a new ldtr selector.
5827 *
5828 * @param env1 CPU environment.
5829 * @param selector Selector to load.
5830 */
5831void sync_ldtr(CPUX86State *env1, int selector)
5832{
5833 CPUX86State *saved_env = env;
5834 if (setjmp(env1->jmp_env) == 0)
5835 {
5836 env = env1;
5837 helper_lldt(selector);
5838 env = saved_env;
5839 }
5840 else
5841 {
5842 env = saved_env;
5843#ifdef VBOX_STRICT
5844 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5845#endif
5846 }
5847}
5848
5849/**
5850 * Correctly loads a new tr selector.
5851 *
5852 * @param env1 CPU environment.
5853 * @param selector Selector to load.
5854 */
5855int sync_tr(CPUX86State *env1, int selector)
5856{
5857 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5858 SegmentCache *dt;
5859 uint32_t e1, e2;
5860 int index, type, entry_limit;
5861 target_ulong ptr;
5862 CPUX86State *saved_env = env;
5863 env = env1;
5864
5865 selector &= 0xffff;
5866 if ((selector & 0xfffc) == 0) {
5867 /* NULL selector case: invalid TR */
5868 env->tr.base = 0;
5869 env->tr.limit = 0;
5870 env->tr.flags = 0;
5871 } else {
5872 if (selector & 0x4)
5873 goto l_failure;
5874 dt = &env->gdt;
5875 index = selector & ~7;
5876#ifdef TARGET_X86_64
5877 if (env->hflags & HF_LMA_MASK)
5878 entry_limit = 15;
5879 else
5880#endif
5881 entry_limit = 7;
5882 if ((index + entry_limit) > dt->limit)
5883 goto l_failure;
5884 ptr = dt->base + index;
5885 e1 = ldl_kernel(ptr);
5886 e2 = ldl_kernel(ptr + 4);
5887 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5888 if ((e2 & DESC_S_MASK) /*||
5889 (type != 1 && type != 9)*/)
5890 goto l_failure;
5891 if (!(e2 & DESC_P_MASK))
5892 goto l_failure;
5893#ifdef TARGET_X86_64
5894 if (env->hflags & HF_LMA_MASK) {
5895 uint32_t e3;
5896 e3 = ldl_kernel(ptr + 8);
5897 load_seg_cache_raw_dt(&env->tr, e1, e2);
5898 env->tr.base |= (target_ulong)e3 << 32;
5899 } else
5900#endif
5901 {
5902 load_seg_cache_raw_dt(&env->tr, e1, e2);
5903 }
5904 e2 |= DESC_TSS_BUSY_MASK;
5905 stl_kernel(ptr + 4, e2);
5906 }
5907 env->tr.selector = selector;
5908
5909 env = saved_env;
5910 return 0;
5911l_failure:
5912 AssertMsgFailed(("selector=%d\n", selector));
5913 return -1;
5914}
5915
5916
5917int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5918 uint32_t *esp_ptr, int dpl)
5919{
5920 int type, index, shift;
5921
5922 CPUX86State *savedenv = env;
5923 env = env1;
5924
5925 if (!(env->tr.flags & DESC_P_MASK))
5926 cpu_abort(env, "invalid tss");
5927 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5928 if ((type & 7) != 1)
5929 cpu_abort(env, "invalid tss type %d", type);
5930 shift = type >> 3;
5931 index = (dpl * 4 + 2) << shift;
5932 if (index + (4 << shift) - 1 > env->tr.limit)
5933 {
5934 env = savedenv;
5935 return 0;
5936 }
5937 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5938
5939 if (shift == 0) {
5940 *esp_ptr = lduw_kernel(env->tr.base + index);
5941 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5942 } else {
5943 *esp_ptr = ldl_kernel(env->tr.base + index);
5944 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5945 }
5946
5947 env = savedenv;
5948 return 1;
5949}
5950
5951//*****************************************************************************
5952// Needs to be at the bottom of the file (overriding macros)
5953
5954#ifndef VBOX
5955static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5956#else /* VBOX */
5957DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5958#endif /* VBOX */
5959{
5960 return *(CPU86_LDouble *)ptr;
5961}
5962
5963#ifndef VBOX
5964static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5965#else /* VBOX */
5966DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5967#endif /* VBOX */
5968{
5969 *(CPU86_LDouble *)ptr = f;
5970}
5971
5972#undef stw
5973#undef stl
5974#undef stq
5975#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5976#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5977#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5978#define data64 0
5979
5980//*****************************************************************************
5981void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5982{
5983 int fpus, fptag, i, nb_xmm_regs;
5984 CPU86_LDouble tmp;
5985 uint8_t *addr;
5986
5987 if (env->cpuid_features & CPUID_FXSR)
5988 {
5989 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5990 fptag = 0;
5991 for(i = 0; i < 8; i++) {
5992 fptag |= (env->fptags[i] << i);
5993 }
5994 stw(ptr, env->fpuc);
5995 stw(ptr + 2, fpus);
5996 stw(ptr + 4, fptag ^ 0xff);
5997
5998 addr = ptr + 0x20;
5999 for(i = 0;i < 8; i++) {
6000 tmp = ST(i);
6001 helper_fstt_raw(tmp, addr);
6002 addr += 16;
6003 }
6004
6005 if (env->cr[4] & CR4_OSFXSR_MASK) {
6006 /* XXX: finish it */
6007 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6008 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6009 nb_xmm_regs = 8 << data64;
6010 addr = ptr + 0xa0;
6011 for(i = 0; i < nb_xmm_regs; i++) {
6012#if __GNUC__ < 4
6013 stq(addr, env->xmm_regs[i].XMM_Q(0));
6014 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6015#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6016 stl(addr, env->xmm_regs[i].XMM_L(0));
6017 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6018 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6019 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6020#endif
6021 addr += 16;
6022 }
6023 }
6024 }
6025 else
6026 {
6027 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6028 int fptag;
6029
6030 fp->FCW = env->fpuc;
6031 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6032 fptag = 0;
6033 for (i=7; i>=0; i--) {
6034 fptag <<= 2;
6035 if (env->fptags[i]) {
6036 fptag |= 3;
6037 } else {
6038 /* the FPU automatically computes it */
6039 }
6040 }
6041 fp->FTW = fptag;
6042
6043 for(i = 0;i < 8; i++) {
6044 tmp = ST(i);
6045 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6046 }
6047 }
6048}
6049
6050//*****************************************************************************
6051#undef lduw
6052#undef ldl
6053#undef ldq
6054#define lduw(a) *(uint16_t *)(a)
6055#define ldl(a) *(uint32_t *)(a)
6056#define ldq(a) *(uint64_t *)(a)
6057//*****************************************************************************
6058void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6059{
6060 int i, fpus, fptag, nb_xmm_regs;
6061 CPU86_LDouble tmp;
6062 uint8_t *addr;
6063
6064 if (env->cpuid_features & CPUID_FXSR)
6065 {
6066 env->fpuc = lduw(ptr);
6067 fpus = lduw(ptr + 2);
6068 fptag = lduw(ptr + 4);
6069 env->fpstt = (fpus >> 11) & 7;
6070 env->fpus = fpus & ~0x3800;
6071 fptag ^= 0xff;
6072 for(i = 0;i < 8; i++) {
6073 env->fptags[i] = ((fptag >> i) & 1);
6074 }
6075
6076 addr = ptr + 0x20;
6077 for(i = 0;i < 8; i++) {
6078 tmp = helper_fldt_raw(addr);
6079 ST(i) = tmp;
6080 addr += 16;
6081 }
6082
6083 if (env->cr[4] & CR4_OSFXSR_MASK) {
6084 /* XXX: finish it, endianness */
6085 env->mxcsr = ldl(ptr + 0x18);
6086 //ldl(ptr + 0x1c);
6087 nb_xmm_regs = 8 << data64;
6088 addr = ptr + 0xa0;
6089 for(i = 0; i < nb_xmm_regs; i++) {
6090#if HC_ARCH_BITS == 32
6091 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6092 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6093 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6094 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6095 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6096#else
6097 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6098 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6099#endif
6100 addr += 16;
6101 }
6102 }
6103 }
6104 else
6105 {
6106 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6107 int fptag, j;
6108
6109 env->fpuc = fp->FCW;
6110 env->fpstt = (fp->FSW >> 11) & 7;
6111 env->fpus = fp->FSW & ~0x3800;
6112 fptag = fp->FTW;
6113 for(i = 0;i < 8; i++) {
6114 env->fptags[i] = ((fptag & 3) == 3);
6115 fptag >>= 2;
6116 }
6117 j = env->fpstt;
6118 for(i = 0;i < 8; i++) {
6119 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6120 ST(i) = tmp;
6121 }
6122 }
6123}
6124//*****************************************************************************
6125//*****************************************************************************
6126
6127#endif /* VBOX */
6128
6129/* Secure Virtual Machine helpers */
6130
6131#if defined(CONFIG_USER_ONLY)
6132
6133void helper_vmrun(int aflag, int next_eip_addend)
6134{
6135}
6136void helper_vmmcall(void)
6137{
6138}
6139void helper_vmload(int aflag)
6140{
6141}
6142void helper_vmsave(int aflag)
6143{
6144}
6145void helper_stgi(void)
6146{
6147}
6148void helper_clgi(void)
6149{
6150}
6151void helper_skinit(void)
6152{
6153}
6154void helper_invlpga(int aflag)
6155{
6156}
6157void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6158{
6159}
6160void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6161{
6162}
6163
6164void helper_svm_check_io(uint32_t port, uint32_t param,
6165 uint32_t next_eip_addend)
6166{
6167}
6168#else
6169
6170#ifndef VBOX
6171static inline void svm_save_seg(target_phys_addr_t addr,
6172#else /* VBOX */
6173DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6174#endif /* VBOX */
6175 const SegmentCache *sc)
6176{
6177 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6178 sc->selector);
6179 stq_phys(addr + offsetof(struct vmcb_seg, base),
6180 sc->base);
6181 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6182 sc->limit);
6183 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6184 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6185}
6186
6187#ifndef VBOX
6188static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6189#else /* VBOX */
6190DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6191#endif /* VBOX */
6192{
6193 unsigned int flags;
6194
6195 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6196 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6197 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6198 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6199 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6200}
6201
6202#ifndef VBOX
6203static inline void svm_load_seg_cache(target_phys_addr_t addr,
6204#else /* VBOX */
6205DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6206#endif /* VBOX */
6207 CPUState *env, int seg_reg)
6208{
6209 SegmentCache sc1, *sc = &sc1;
6210 svm_load_seg(addr, sc);
6211 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6212 sc->base, sc->limit, sc->flags);
6213}
6214
6215void helper_vmrun(int aflag, int next_eip_addend)
6216{
6217 target_ulong addr;
6218 uint32_t event_inj;
6219 uint32_t int_ctl;
6220
6221 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6222
6223 if (aflag == 2)
6224 addr = EAX;
6225 else
6226 addr = (uint32_t)EAX;
6227
6228 if (loglevel & CPU_LOG_TB_IN_ASM)
6229 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6230
6231 env->vm_vmcb = addr;
6232
6233 /* save the current CPU state in the hsave page */
6234 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6235 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6236
6237 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6238 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6239
6240 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6241 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6242 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6243 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6244 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6245 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6246
6247 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6248 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6249
6250 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6251 &env->segs[R_ES]);
6252 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6253 &env->segs[R_CS]);
6254 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6255 &env->segs[R_SS]);
6256 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6257 &env->segs[R_DS]);
6258
6259 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6260 EIP + next_eip_addend);
6261 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6262 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6263
6264 /* load the interception bitmaps so we do not need to access the
6265 vmcb in svm mode */
6266 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6267 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6268 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6269 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6270 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6271 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6272
6273 /* enable intercepts */
6274 env->hflags |= HF_SVMI_MASK;
6275
6276 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6277
6278 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6279 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6280
6281 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6282 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6283
6284 /* clear exit_info_2 so we behave like the real hardware */
6285 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6286
6287 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6288 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6289 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6290 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6291 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6292 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6293 if (int_ctl & V_INTR_MASKING_MASK) {
6294 env->v_tpr = int_ctl & V_TPR_MASK;
6295 env->hflags2 |= HF2_VINTR_MASK;
6296 if (env->eflags & IF_MASK)
6297 env->hflags2 |= HF2_HIF_MASK;
6298 }
6299
6300 cpu_load_efer(env,
6301 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6302 env->eflags = 0;
6303 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6304 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6305 CC_OP = CC_OP_EFLAGS;
6306
6307 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6308 env, R_ES);
6309 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6310 env, R_CS);
6311 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6312 env, R_SS);
6313 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6314 env, R_DS);
6315
6316 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6317 env->eip = EIP;
6318 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6319 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6320 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6321 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6322 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6323
6324 /* FIXME: guest state consistency checks */
6325
6326 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6327 case TLB_CONTROL_DO_NOTHING:
6328 break;
6329 case TLB_CONTROL_FLUSH_ALL_ASID:
6330 /* FIXME: this is not 100% correct but should work for now */
6331 tlb_flush(env, 1);
6332 break;
6333 }
6334
6335 env->hflags2 |= HF2_GIF_MASK;
6336
6337 if (int_ctl & V_IRQ_MASK) {
6338 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6339 }
6340
6341 /* maybe we need to inject an event */
6342 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6343 if (event_inj & SVM_EVTINJ_VALID) {
6344 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6345 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6346 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6347 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6348
6349 if (loglevel & CPU_LOG_TB_IN_ASM)
6350 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6351 /* FIXME: need to implement valid_err */
6352 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6353 case SVM_EVTINJ_TYPE_INTR:
6354 env->exception_index = vector;
6355 env->error_code = event_inj_err;
6356 env->exception_is_int = 0;
6357 env->exception_next_eip = -1;
6358 if (loglevel & CPU_LOG_TB_IN_ASM)
6359 fprintf(logfile, "INTR");
6360 /* XXX: is it always correct ? */
6361 do_interrupt(vector, 0, 0, 0, 1);
6362 break;
6363 case SVM_EVTINJ_TYPE_NMI:
6364 env->exception_index = EXCP02_NMI;
6365 env->error_code = event_inj_err;
6366 env->exception_is_int = 0;
6367 env->exception_next_eip = EIP;
6368 if (loglevel & CPU_LOG_TB_IN_ASM)
6369 fprintf(logfile, "NMI");
6370 cpu_loop_exit();
6371 break;
6372 case SVM_EVTINJ_TYPE_EXEPT:
6373 env->exception_index = vector;
6374 env->error_code = event_inj_err;
6375 env->exception_is_int = 0;
6376 env->exception_next_eip = -1;
6377 if (loglevel & CPU_LOG_TB_IN_ASM)
6378 fprintf(logfile, "EXEPT");
6379 cpu_loop_exit();
6380 break;
6381 case SVM_EVTINJ_TYPE_SOFT:
6382 env->exception_index = vector;
6383 env->error_code = event_inj_err;
6384 env->exception_is_int = 1;
6385 env->exception_next_eip = EIP;
6386 if (loglevel & CPU_LOG_TB_IN_ASM)
6387 fprintf(logfile, "SOFT");
6388 cpu_loop_exit();
6389 break;
6390 }
6391 if (loglevel & CPU_LOG_TB_IN_ASM)
6392 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6393 }
6394}
6395
6396void helper_vmmcall(void)
6397{
6398 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6399 raise_exception(EXCP06_ILLOP);
6400}
6401
6402void helper_vmload(int aflag)
6403{
6404 target_ulong addr;
6405 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6406
6407 if (aflag == 2)
6408 addr = EAX;
6409 else
6410 addr = (uint32_t)EAX;
6411
6412 if (loglevel & CPU_LOG_TB_IN_ASM)
6413 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6414 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6415 env->segs[R_FS].base);
6416
6417 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6418 env, R_FS);
6419 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6420 env, R_GS);
6421 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6422 &env->tr);
6423 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6424 &env->ldt);
6425
6426#ifdef TARGET_X86_64
6427 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6428 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6429 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6430 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6431#endif
6432 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6433 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6434 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6435 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6436}
6437
6438void helper_vmsave(int aflag)
6439{
6440 target_ulong addr;
6441 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6442
6443 if (aflag == 2)
6444 addr = EAX;
6445 else
6446 addr = (uint32_t)EAX;
6447
6448 if (loglevel & CPU_LOG_TB_IN_ASM)
6449 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6450 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6451 env->segs[R_FS].base);
6452
6453 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6454 &env->segs[R_FS]);
6455 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6456 &env->segs[R_GS]);
6457 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6458 &env->tr);
6459 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6460 &env->ldt);
6461
6462#ifdef TARGET_X86_64
6463 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6464 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6465 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6466 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6467#endif
6468 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6469 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6470 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6471 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6472}
6473
6474void helper_stgi(void)
6475{
6476 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6477 env->hflags2 |= HF2_GIF_MASK;
6478}
6479
6480void helper_clgi(void)
6481{
6482 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6483 env->hflags2 &= ~HF2_GIF_MASK;
6484}
6485
6486void helper_skinit(void)
6487{
6488 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6489 /* XXX: not implemented */
6490 raise_exception(EXCP06_ILLOP);
6491}
6492
6493void helper_invlpga(int aflag)
6494{
6495 target_ulong addr;
6496 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6497
6498 if (aflag == 2)
6499 addr = EAX;
6500 else
6501 addr = (uint32_t)EAX;
6502
6503 /* XXX: could use the ASID to see if it is needed to do the
6504 flush */
6505 tlb_flush_page(env, addr);
6506}
6507
6508void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6509{
6510 if (likely(!(env->hflags & HF_SVMI_MASK)))
6511 return;
6512#ifndef VBOX
6513 switch(type) {
6514#ifndef VBOX
6515 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6516#else
6517 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6518 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6519 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6520#endif
6521 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6522 helper_vmexit(type, param);
6523 }
6524 break;
6525#ifndef VBOX
6526 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6527#else
6528 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6529 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6530 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6531#endif
6532 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6533 helper_vmexit(type, param);
6534 }
6535 break;
6536 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6537 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6538 helper_vmexit(type, param);
6539 }
6540 break;
6541 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6542 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6543 helper_vmexit(type, param);
6544 }
6545 break;
6546 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6547 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6548 helper_vmexit(type, param);
6549 }
6550 break;
6551 case SVM_EXIT_MSR:
6552 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6553 /* FIXME: this should be read in at vmrun (faster this way?) */
6554 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6555 uint32_t t0, t1;
6556 switch((uint32_t)ECX) {
6557 case 0 ... 0x1fff:
6558 t0 = (ECX * 2) % 8;
6559 t1 = ECX / 8;
6560 break;
6561 case 0xc0000000 ... 0xc0001fff:
6562 t0 = (8192 + ECX - 0xc0000000) * 2;
6563 t1 = (t0 / 8);
6564 t0 %= 8;
6565 break;
6566 case 0xc0010000 ... 0xc0011fff:
6567 t0 = (16384 + ECX - 0xc0010000) * 2;
6568 t1 = (t0 / 8);
6569 t0 %= 8;
6570 break;
6571 default:
6572 helper_vmexit(type, param);
6573 t0 = 0;
6574 t1 = 0;
6575 break;
6576 }
6577 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6578 helper_vmexit(type, param);
6579 }
6580 break;
6581 default:
6582 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6583 helper_vmexit(type, param);
6584 }
6585 break;
6586 }
6587#else
6588 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6589#endif
6590}
6591
6592void helper_svm_check_io(uint32_t port, uint32_t param,
6593 uint32_t next_eip_addend)
6594{
6595 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6596 /* FIXME: this should be read in at vmrun (faster this way?) */
6597 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6598 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6599 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6600 /* next EIP */
6601 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6602 env->eip + next_eip_addend);
6603 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6604 }
6605 }
6606}
6607
6608/* Note: currently only 32 bits of exit_code are used */
6609void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6610{
6611 uint32_t int_ctl;
6612
6613 if (loglevel & CPU_LOG_TB_IN_ASM)
6614 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6615 exit_code, exit_info_1,
6616 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6617 EIP);
6618
6619 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6620 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6621 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6622 } else {
6623 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6624 }
6625
6626 /* Save the VM state in the vmcb */
6627 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6628 &env->segs[R_ES]);
6629 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6630 &env->segs[R_CS]);
6631 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6632 &env->segs[R_SS]);
6633 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6634 &env->segs[R_DS]);
6635
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6637 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6638
6639 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6640 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6641
6642 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6643 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6644 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6645 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6646 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6647
6648 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6649 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6650 int_ctl |= env->v_tpr & V_TPR_MASK;
6651 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6652 int_ctl |= V_IRQ_MASK;
6653 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6654
6655 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6656 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6657 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6658 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6659 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6660 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6661 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6662
6663 /* Reload the host state from vm_hsave */
6664 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6665 env->hflags &= ~HF_SVMI_MASK;
6666 env->intercept = 0;
6667 env->intercept_exceptions = 0;
6668 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6669 env->tsc_offset = 0;
6670
6671 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6672 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6673
6674 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6675 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6676
6677 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6678 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6679 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6680 /* we need to set the efer after the crs so the hidden flags get
6681 set properly */
6682 cpu_load_efer(env,
6683 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6684 env->eflags = 0;
6685 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6686 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6687 CC_OP = CC_OP_EFLAGS;
6688
6689 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6690 env, R_ES);
6691 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6692 env, R_CS);
6693 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6694 env, R_SS);
6695 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6696 env, R_DS);
6697
6698 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6699 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6700 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6701
6702 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6703 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6704
6705 /* other setups */
6706 cpu_x86_set_cpl(env, 0);
6707 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6708 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6709
6710 env->hflags2 &= ~HF2_GIF_MASK;
6711 /* FIXME: Resets the current ASID register to zero (host ASID). */
6712
6713 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6714
6715 /* Clears the TSC_OFFSET inside the processor. */
6716
6717 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6718 from the page table indicated the host's CR3. If the PDPEs contain
6719 illegal state, the processor causes a shutdown. */
6720
6721 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6722 env->cr[0] |= CR0_PE_MASK;
6723 env->eflags &= ~VM_MASK;
6724
6725 /* Disables all breakpoints in the host DR7 register. */
6726
6727 /* Checks the reloaded host state for consistency. */
6728
6729 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6730 host's code segment or non-canonical (in the case of long mode), a
6731 #GP fault is delivered inside the host.) */
6732
6733 /* remove any pending exception */
6734 env->exception_index = -1;
6735 env->error_code = 0;
6736 env->old_exception = -1;
6737
6738 cpu_loop_exit();
6739}
6740
6741#endif
6742
6743/* MMX/SSE */
6744/* XXX: optimize by storing fptt and fptags in the static cpu state */
6745void helper_enter_mmx(void)
6746{
6747 env->fpstt = 0;
6748 *(uint32_t *)(env->fptags) = 0;
6749 *(uint32_t *)(env->fptags + 4) = 0;
6750}
6751
6752void helper_emms(void)
6753{
6754 /* set to empty state */
6755 *(uint32_t *)(env->fptags) = 0x01010101;
6756 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6757}
6758
6759/* XXX: suppress */
6760void helper_movq(uint64_t *d, uint64_t *s)
6761{
6762 *d = *s;
6763}
6764
6765#define SHIFT 0
6766#include "ops_sse.h"
6767
6768#define SHIFT 1
6769#include "ops_sse.h"
6770
6771#define SHIFT 0
6772#include "helper_template.h"
6773#undef SHIFT
6774
6775#define SHIFT 1
6776#include "helper_template.h"
6777#undef SHIFT
6778
6779#define SHIFT 2
6780#include "helper_template.h"
6781#undef SHIFT
6782
6783#ifdef TARGET_X86_64
6784
6785#define SHIFT 3
6786#include "helper_template.h"
6787#undef SHIFT
6788
6789#endif
6790
6791/* bit operations */
6792target_ulong helper_bsf(target_ulong t0)
6793{
6794 int count;
6795 target_ulong res;
6796
6797 res = t0;
6798 count = 0;
6799 while ((res & 1) == 0) {
6800 count++;
6801 res >>= 1;
6802 }
6803 return count;
6804}
6805
6806target_ulong helper_bsr(target_ulong t0)
6807{
6808 int count;
6809 target_ulong res, mask;
6810
6811 res = t0;
6812 count = TARGET_LONG_BITS - 1;
6813 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6814 while ((res & mask) == 0) {
6815 count--;
6816 res <<= 1;
6817 }
6818 return count;
6819}
6820
6821
6822static int compute_all_eflags(void)
6823{
6824 return CC_SRC;
6825}
6826
6827static int compute_c_eflags(void)
6828{
6829 return CC_SRC & CC_C;
6830}
6831
6832#ifndef VBOX
6833CCTable cc_table[CC_OP_NB] = {
6834 [CC_OP_DYNAMIC] = { /* should never happen */ },
6835
6836 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6837
6838 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6839 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6840 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6841
6842 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6843 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6844 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6845
6846 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6847 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6848 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6849
6850 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6851 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6852 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6853
6854 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6855 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6856 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6857
6858 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6859 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6860 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6861
6862 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6863 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6864 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6865
6866 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6867 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6868 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6869
6870 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6871 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6872 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6873
6874 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6875 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6876 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6877
6878#ifdef TARGET_X86_64
6879 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6880
6881 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6882
6883 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6884
6885 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6886
6887 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6888
6889 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6890
6891 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6892
6893 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6894
6895 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6896
6897 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6898#endif
6899};
6900#else /* VBOX */
6901/* Sync carefully with cpu.h */
6902CCTable cc_table[CC_OP_NB] = {
6903 /* CC_OP_DYNAMIC */ { 0, 0 },
6904
6905 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6906
6907 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6908 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6909 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6910#ifdef TARGET_X86_64
6911 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6912#else
6913 /* CC_OP_MULQ */ { 0, 0 },
6914#endif
6915
6916 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6917 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6918 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6919#ifdef TARGET_X86_64
6920 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6921#else
6922 /* CC_OP_ADDQ */ { 0, 0 },
6923#endif
6924
6925 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6926 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6927 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6928#ifdef TARGET_X86_64
6929 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6930#else
6931 /* CC_OP_ADCQ */ { 0, 0 },
6932#endif
6933
6934 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6935 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6936 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6937#ifdef TARGET_X86_64
6938 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6939#else
6940 /* CC_OP_SUBQ */ { 0, 0 },
6941#endif
6942
6943 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6944 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6945 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6946#ifdef TARGET_X86_64
6947 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6948#else
6949 /* CC_OP_SBBQ */ { 0, 0 },
6950#endif
6951
6952 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6953 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6954 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6955#ifdef TARGET_X86_64
6956 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6957#else
6958 /* CC_OP_LOGICQ */ { 0, 0 },
6959#endif
6960
6961 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6962 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6963 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6964#ifdef TARGET_X86_64
6965 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6966#else
6967 /* CC_OP_INCQ */ { 0, 0 },
6968#endif
6969
6970 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6971 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6972 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6973#ifdef TARGET_X86_64
6974 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6975#else
6976 /* CC_OP_DECQ */ { 0, 0 },
6977#endif
6978
6979 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6980 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6981 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6982#ifdef TARGET_X86_64
6983 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6984#else
6985 /* CC_OP_SHLQ */ { 0, 0 },
6986#endif
6987
6988 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6989 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6990 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6991#ifdef TARGET_X86_64
6992 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6993#else
6994 /* CC_OP_SARQ */ { 0, 0 },
6995#endif
6996};
6997#endif /* VBOX */
6998
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette