VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 19484

Last change on this file since 19484 was 19287, checked in by vboxsync, 16 years ago

VBox/cdefs.h,REM,CFGM: VMMRZDECL + some cleanup.

  • Property svn:eol-style set to native
File size: 194.5 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387#if 0
388 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 0, 0, 0);
391#endif
392#endif
393 }
394}
395
396#define SWITCH_TSS_JMP 0
397#define SWITCH_TSS_IRET 1
398#define SWITCH_TSS_CALL 2
399
400/* XXX: restore CPU state in registers (PowerPC case) */
401static void switch_tss(int tss_selector,
402 uint32_t e1, uint32_t e2, int source,
403 uint32_t next_eip)
404{
405 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
406 target_ulong tss_base;
407 uint32_t new_regs[8], new_segs[6];
408 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
409 uint32_t old_eflags, eflags_mask;
410 SegmentCache *dt;
411#ifndef VBOX
412 int index;
413#else
414 unsigned int index;
415#endif
416 target_ulong ptr;
417
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419#ifdef DEBUG_PCALL
420 if (loglevel & CPU_LOG_PCALL)
421 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
422#endif
423
424#if defined(VBOX) && defined(DEBUG)
425 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
426#endif
427
428 /* if task gate, we read the TSS segment and we load it */
429 if (type == 5) {
430 if (!(e2 & DESC_P_MASK))
431 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
432 tss_selector = e1 >> 16;
433 if (tss_selector & 4)
434 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
435 if (load_segment(&e1, &e2, tss_selector) != 0)
436 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
437 if (e2 & DESC_S_MASK)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
440 if ((type & 7) != 1)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 }
443
444 if (!(e2 & DESC_P_MASK))
445 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
446
447 if (type & 8)
448 tss_limit_max = 103;
449 else
450 tss_limit_max = 43;
451 tss_limit = get_seg_limit(e1, e2);
452 tss_base = get_seg_base(e1, e2);
453 if ((tss_selector & 4) != 0 ||
454 tss_limit < tss_limit_max)
455 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
456 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
457 if (old_type & 8)
458 old_tss_limit_max = 103;
459 else
460 old_tss_limit_max = 43;
461
462 /* read all the registers from the new TSS */
463 if (type & 8) {
464 /* 32 bit */
465 new_cr3 = ldl_kernel(tss_base + 0x1c);
466 new_eip = ldl_kernel(tss_base + 0x20);
467 new_eflags = ldl_kernel(tss_base + 0x24);
468 for(i = 0; i < 8; i++)
469 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
470 for(i = 0; i < 6; i++)
471 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
472 new_ldt = lduw_kernel(tss_base + 0x60);
473 new_trap = ldl_kernel(tss_base + 0x64);
474 } else {
475 /* 16 bit */
476 new_cr3 = 0;
477 new_eip = lduw_kernel(tss_base + 0x0e);
478 new_eflags = lduw_kernel(tss_base + 0x10);
479 for(i = 0; i < 8; i++)
480 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
481 for(i = 0; i < 4; i++)
482 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
483 new_ldt = lduw_kernel(tss_base + 0x2a);
484 new_segs[R_FS] = 0;
485 new_segs[R_GS] = 0;
486 new_trap = 0;
487 }
488
489 /* NOTE: we must avoid memory exceptions during the task switch,
490 so we make dummy accesses before */
491 /* XXX: it can still fail in some cases, so a bigger hack is
492 necessary to valid the TLB after having done the accesses */
493
494 v1 = ldub_kernel(env->tr.base);
495 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
496 stb_kernel(env->tr.base, v1);
497 stb_kernel(env->tr.base + old_tss_limit_max, v2);
498
499 /* clear busy bit (it is restartable) */
500 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
501 target_ulong ptr;
502 uint32_t e2;
503 ptr = env->gdt.base + (env->tr.selector & ~7);
504 e2 = ldl_kernel(ptr + 4);
505 e2 &= ~DESC_TSS_BUSY_MASK;
506 stl_kernel(ptr + 4, e2);
507 }
508 old_eflags = compute_eflags();
509 if (source == SWITCH_TSS_IRET)
510 old_eflags &= ~NT_MASK;
511
512 /* save the current state in the old TSS */
513 if (type & 8) {
514 /* 32 bit */
515 stl_kernel(env->tr.base + 0x20, next_eip);
516 stl_kernel(env->tr.base + 0x24, old_eflags);
517 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
518 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
519 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
520 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
521 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
522 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
523 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
524 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
525 for(i = 0; i < 6; i++)
526 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
527#if defined(VBOX) && defined(DEBUG)
528 printf("TSS 32 bits switch\n");
529 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
530#endif
531 } else {
532 /* 16 bit */
533 stw_kernel(env->tr.base + 0x0e, next_eip);
534 stw_kernel(env->tr.base + 0x10, old_eflags);
535 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
536 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
537 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
538 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
539 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
540 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
541 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
542 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
543 for(i = 0; i < 4; i++)
544 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
545 }
546
547 /* now if an exception occurs, it will occurs in the next task
548 context */
549
550 if (source == SWITCH_TSS_CALL) {
551 stw_kernel(tss_base, env->tr.selector);
552 new_eflags |= NT_MASK;
553 }
554
555 /* set busy bit */
556 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
557 target_ulong ptr;
558 uint32_t e2;
559 ptr = env->gdt.base + (tss_selector & ~7);
560 e2 = ldl_kernel(ptr + 4);
561 e2 |= DESC_TSS_BUSY_MASK;
562 stl_kernel(ptr + 4, e2);
563 }
564
565 /* set the new CPU state */
566 /* from this point, any exception which occurs can give problems */
567 env->cr[0] |= CR0_TS_MASK;
568 env->hflags |= HF_TS_MASK;
569 env->tr.selector = tss_selector;
570 env->tr.base = tss_base;
571 env->tr.limit = tss_limit;
572 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
573
574 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
575 cpu_x86_update_cr3(env, new_cr3);
576 }
577
578 /* load all registers without an exception, then reload them with
579 possible exception */
580 env->eip = new_eip;
581 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
582 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
583 if (!(type & 8))
584 eflags_mask &= 0xffff;
585 load_eflags(new_eflags, eflags_mask);
586 /* XXX: what to do in 16 bit case ? */
587 EAX = new_regs[0];
588 ECX = new_regs[1];
589 EDX = new_regs[2];
590 EBX = new_regs[3];
591 ESP = new_regs[4];
592 EBP = new_regs[5];
593 ESI = new_regs[6];
594 EDI = new_regs[7];
595 if (new_eflags & VM_MASK) {
596 for(i = 0; i < 6; i++)
597 load_seg_vm(i, new_segs[i]);
598 /* in vm86, CPL is always 3 */
599 cpu_x86_set_cpl(env, 3);
600 } else {
601 /* CPL is set the RPL of CS */
602 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
603 /* first just selectors as the rest may trigger exceptions */
604 for(i = 0; i < 6; i++)
605 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
606 }
607
608 env->ldt.selector = new_ldt & ~4;
609 env->ldt.base = 0;
610 env->ldt.limit = 0;
611 env->ldt.flags = 0;
612
613 /* load the LDT */
614 if (new_ldt & 4)
615 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
616
617 if ((new_ldt & 0xfffc) != 0) {
618 dt = &env->gdt;
619 index = new_ldt & ~7;
620 if ((index + 7) > dt->limit)
621 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
622 ptr = dt->base + index;
623 e1 = ldl_kernel(ptr);
624 e2 = ldl_kernel(ptr + 4);
625 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 if (!(e2 & DESC_P_MASK))
628 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
629 load_seg_cache_raw_dt(&env->ldt, e1, e2);
630 }
631
632 /* load the segments */
633 if (!(new_eflags & VM_MASK)) {
634 tss_load_seg(R_CS, new_segs[R_CS]);
635 tss_load_seg(R_SS, new_segs[R_SS]);
636 tss_load_seg(R_ES, new_segs[R_ES]);
637 tss_load_seg(R_DS, new_segs[R_DS]);
638 tss_load_seg(R_FS, new_segs[R_FS]);
639 tss_load_seg(R_GS, new_segs[R_GS]);
640 }
641
642 /* check that EIP is in the CS segment limits */
643 if (new_eip > env->segs[R_CS].limit) {
644 /* XXX: different exception if CALL ? */
645 raise_exception_err(EXCP0D_GPF, 0);
646 }
647}
648
649/* check if Port I/O is allowed in TSS */
650#ifndef VBOX
651static inline void check_io(int addr, int size)
652{
653 int io_offset, val, mask;
654
655#else /* VBOX */
656DECLINLINE(void) check_io(int addr, int size)
657{
658 int val, mask;
659 unsigned int io_offset;
660#endif /* VBOX */
661 /* TSS must be a valid 32 bit one */
662 if (!(env->tr.flags & DESC_P_MASK) ||
663 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
664 env->tr.limit < 103)
665 goto fail;
666 io_offset = lduw_kernel(env->tr.base + 0x66);
667 io_offset += (addr >> 3);
668 /* Note: the check needs two bytes */
669 if ((io_offset + 1) > env->tr.limit)
670 goto fail;
671 val = lduw_kernel(env->tr.base + io_offset);
672 val >>= (addr & 7);
673 mask = (1 << size) - 1;
674 /* all bits must be zero to allow the I/O */
675 if ((val & mask) != 0) {
676 fail:
677 raise_exception_err(EXCP0D_GPF, 0);
678 }
679}
680
681#ifdef VBOX
682/* Keep in sync with gen_check_external_event() */
683void helper_check_external_event()
684{
685 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
686 | CPU_INTERRUPT_EXTERNAL_TIMER
687 | CPU_INTERRUPT_EXTERNAL_DMA))
688 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
689 && (env->eflags & IF_MASK)
690 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
691 {
692 helper_external_event();
693 }
694
695}
696
697void helper_sync_seg(uint32_t reg)
698{
699 if (env->segs[reg].newselector)
700 sync_seg(env, reg, env->segs[reg].newselector);
701}
702#endif
703
704void helper_check_iob(uint32_t t0)
705{
706 check_io(t0, 1);
707}
708
709void helper_check_iow(uint32_t t0)
710{
711 check_io(t0, 2);
712}
713
714void helper_check_iol(uint32_t t0)
715{
716 check_io(t0, 4);
717}
718
719void helper_outb(uint32_t port, uint32_t data)
720{
721 cpu_outb(env, port, data & 0xff);
722}
723
724target_ulong helper_inb(uint32_t port)
725{
726 return cpu_inb(env, port);
727}
728
729void helper_outw(uint32_t port, uint32_t data)
730{
731 cpu_outw(env, port, data & 0xffff);
732}
733
734target_ulong helper_inw(uint32_t port)
735{
736 return cpu_inw(env, port);
737}
738
739void helper_outl(uint32_t port, uint32_t data)
740{
741 cpu_outl(env, port, data);
742}
743
744target_ulong helper_inl(uint32_t port)
745{
746 return cpu_inl(env, port);
747}
748
749#ifndef VBOX
750static inline unsigned int get_sp_mask(unsigned int e2)
751#else /* VBOX */
752DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
753#endif /* VBOX */
754{
755 if (e2 & DESC_B_MASK)
756 return 0xffffffff;
757 else
758 return 0xffff;
759}
760
761#ifdef TARGET_X86_64
762#define SET_ESP(val, sp_mask)\
763do {\
764 if ((sp_mask) == 0xffff)\
765 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
766 else if ((sp_mask) == 0xffffffffLL)\
767 ESP = (uint32_t)(val);\
768 else\
769 ESP = (val);\
770} while (0)
771#else
772#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
773#endif
774
775/* in 64-bit machines, this can overflow. So this segment addition macro
776 * can be used to trim the value to 32-bit whenever needed */
777#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
778
779/* XXX: add a is_user flag to have proper security support */
780#define PUSHW(ssp, sp, sp_mask, val)\
781{\
782 sp -= 2;\
783 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
784}
785
786#define PUSHL(ssp, sp, sp_mask, val)\
787{\
788 sp -= 4;\
789 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
790}
791
792#define POPW(ssp, sp, sp_mask, val)\
793{\
794 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
795 sp += 2;\
796}
797
798#define POPL(ssp, sp, sp_mask, val)\
799{\
800 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
801 sp += 4;\
802}
803
804/* protected mode interrupt */
805static void do_interrupt_protected(int intno, int is_int, int error_code,
806 unsigned int next_eip, int is_hw)
807{
808 SegmentCache *dt;
809 target_ulong ptr, ssp;
810 int type, dpl, selector, ss_dpl, cpl;
811 int has_error_code, new_stack, shift;
812 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
813 uint32_t old_eip, sp_mask;
814
815#ifdef VBOX
816 ss = ss_e1 = ss_e2 = 0;
817# ifdef VBOX_WITH_VMI
818 if ( intno == 6
819 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
820 {
821 env->exception_index = EXCP_PARAV_CALL;
822 cpu_loop_exit();
823 }
824# endif
825 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
826 cpu_loop_exit();
827#endif
828
829 has_error_code = 0;
830 if (!is_int && !is_hw) {
831 switch(intno) {
832 case 8:
833 case 10:
834 case 11:
835 case 12:
836 case 13:
837 case 14:
838 case 17:
839 has_error_code = 1;
840 break;
841 }
842 }
843 if (is_int)
844 old_eip = next_eip;
845 else
846 old_eip = env->eip;
847
848 dt = &env->idt;
849#ifndef VBOX
850 if (intno * 8 + 7 > dt->limit)
851#else
852 if ((unsigned)intno * 8 + 7 > dt->limit)
853#endif
854 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
855 ptr = dt->base + intno * 8;
856 e1 = ldl_kernel(ptr);
857 e2 = ldl_kernel(ptr + 4);
858 /* check gate type */
859 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
860 switch(type) {
861 case 5: /* task gate */
862 /* must do that check here to return the correct error code */
863 if (!(e2 & DESC_P_MASK))
864 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
865 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
866 if (has_error_code) {
867 int type;
868 uint32_t mask;
869 /* push the error code */
870 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
871 shift = type >> 3;
872 if (env->segs[R_SS].flags & DESC_B_MASK)
873 mask = 0xffffffff;
874 else
875 mask = 0xffff;
876 esp = (ESP - (2 << shift)) & mask;
877 ssp = env->segs[R_SS].base + esp;
878 if (shift)
879 stl_kernel(ssp, error_code);
880 else
881 stw_kernel(ssp, error_code);
882 SET_ESP(esp, mask);
883 }
884 return;
885 case 6: /* 286 interrupt gate */
886 case 7: /* 286 trap gate */
887 case 14: /* 386 interrupt gate */
888 case 15: /* 386 trap gate */
889 break;
890 default:
891 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
892 break;
893 }
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 cpl = env->hflags & HF_CPL_MASK;
896 /* check privilege if software int */
897 if (is_int && dpl < cpl)
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 /* check valid bit */
900 if (!(e2 & DESC_P_MASK))
901 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
902 selector = e1 >> 16;
903 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 if ((selector & 0xfffc) == 0)
905 raise_exception_err(EXCP0D_GPF, 0);
906
907 if (load_segment(&e1, &e2, selector) != 0)
908 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 if (dpl > cpl)
913 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
916 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
917 /* to inner privilege */
918 get_ss_esp_from_tss(&ss, &esp, dpl);
919 if ((ss & 0xfffc) == 0)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 if ((ss & 3) != dpl)
922 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
923 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
926 if (ss_dpl != dpl)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if (!(ss_e2 & DESC_S_MASK) ||
929 (ss_e2 & DESC_CS_MASK) ||
930 !(ss_e2 & DESC_W_MASK))
931 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
932 if (!(ss_e2 & DESC_P_MASK))
933#ifdef VBOX /* See page 3-477 of 253666.pdf */
934 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
935#else
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937#endif
938 new_stack = 1;
939 sp_mask = get_sp_mask(ss_e2);
940 ssp = get_seg_base(ss_e1, ss_e2);
941#if defined(VBOX) && defined(DEBUG)
942 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
943#endif
944 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
945 /* to same privilege */
946 if (env->eflags & VM_MASK)
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0;
949 sp_mask = get_sp_mask(env->segs[R_SS].flags);
950 ssp = env->segs[R_SS].base;
951 esp = ESP;
952 dpl = cpl;
953 } else {
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0; /* avoid warning */
956 sp_mask = 0; /* avoid warning */
957 ssp = 0; /* avoid warning */
958 esp = 0; /* avoid warning */
959 }
960
961 shift = type >> 3;
962
963#if 0
964 /* XXX: check that enough room is available */
965 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
966 if (env->eflags & VM_MASK)
967 push_size += 8;
968 push_size <<= shift;
969#endif
970 if (shift == 1) {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHL(ssp, esp, sp_mask, ESP);
980 }
981 PUSHL(ssp, esp, sp_mask, compute_eflags());
982 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHL(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHL(ssp, esp, sp_mask, error_code);
986 }
987 } else {
988 if (new_stack) {
989 if (env->eflags & VM_MASK) {
990 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
991 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
992 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
993 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
994 }
995 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
996 PUSHW(ssp, esp, sp_mask, ESP);
997 }
998 PUSHW(ssp, esp, sp_mask, compute_eflags());
999 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1000 PUSHW(ssp, esp, sp_mask, old_eip);
1001 if (has_error_code) {
1002 PUSHW(ssp, esp, sp_mask, error_code);
1003 }
1004 }
1005
1006 if (new_stack) {
1007 if (env->eflags & VM_MASK) {
1008 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1009 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1010 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1011 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1012 }
1013 ss = (ss & ~3) | dpl;
1014 cpu_x86_load_seg_cache(env, R_SS, ss,
1015 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1016 }
1017 SET_ESP(esp, sp_mask);
1018
1019 selector = (selector & ~3) | dpl;
1020 cpu_x86_load_seg_cache(env, R_CS, selector,
1021 get_seg_base(e1, e2),
1022 get_seg_limit(e1, e2),
1023 e2);
1024 cpu_x86_set_cpl(env, dpl);
1025 env->eip = offset;
1026
1027 /* interrupt gate clear IF mask */
1028 if ((type & 1) == 0) {
1029 env->eflags &= ~IF_MASK;
1030 }
1031#ifndef VBOX
1032 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1033#else
1034 /*
1035 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1036 * gets confused by seeingingly changed EFLAGS. See #3491 and
1037 * public bug #2341.
1038 */
1039 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1040#endif
1041}
1042#ifdef VBOX
1043
1044/* check if VME interrupt redirection is enabled in TSS */
1045DECLINLINE(bool) is_vme_irq_redirected(int intno)
1046{
1047 unsigned int io_offset, intredir_offset;
1048 unsigned char val, mask;
1049
1050 /* TSS must be a valid 32 bit one */
1051 if (!(env->tr.flags & DESC_P_MASK) ||
1052 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1053 env->tr.limit < 103)
1054 goto fail;
1055 io_offset = lduw_kernel(env->tr.base + 0x66);
1056 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1057 if (io_offset < 0x68 + 0x20)
1058 io_offset = 0x68 + 0x20;
1059 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1060 intredir_offset = io_offset - 0x20;
1061
1062 intredir_offset += (intno >> 3);
1063 if ((intredir_offset) > env->tr.limit)
1064 goto fail;
1065
1066 val = ldub_kernel(env->tr.base + intredir_offset);
1067 mask = 1 << (unsigned char)(intno & 7);
1068
1069 /* bit set means no redirection. */
1070 if ((val & mask) != 0) {
1071 return false;
1072 }
1073 return true;
1074
1075fail:
1076 raise_exception_err(EXCP0D_GPF, 0);
1077 return true;
1078}
1079
1080/* V86 mode software interrupt with CR4.VME=1 */
1081static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1082{
1083 target_ulong ptr, ssp;
1084 int selector;
1085 uint32_t offset, esp;
1086 uint32_t old_cs, old_eflags;
1087 uint32_t iopl;
1088
1089 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1090
1091 if (!is_vme_irq_redirected(intno))
1092 {
1093 if (iopl == 3)
1094 {
1095 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1096 return;
1097 }
1098 else
1099 raise_exception_err(EXCP0D_GPF, 0);
1100 }
1101
1102 /* virtual mode idt is at linear address 0 */
1103 ptr = 0 + intno * 4;
1104 offset = lduw_kernel(ptr);
1105 selector = lduw_kernel(ptr + 2);
1106 esp = ESP;
1107 ssp = env->segs[R_SS].base;
1108 old_cs = env->segs[R_CS].selector;
1109
1110 old_eflags = compute_eflags();
1111 if (iopl < 3)
1112 {
1113 /* copy VIF into IF and set IOPL to 3 */
1114 if (env->eflags & VIF_MASK)
1115 old_eflags |= IF_MASK;
1116 else
1117 old_eflags &= ~IF_MASK;
1118
1119 old_eflags |= (3 << IOPL_SHIFT);
1120 }
1121
1122 /* XXX: use SS segment size ? */
1123 PUSHW(ssp, esp, 0xffff, old_eflags);
1124 PUSHW(ssp, esp, 0xffff, old_cs);
1125 PUSHW(ssp, esp, 0xffff, next_eip);
1126
1127 /* update processor state */
1128 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1129 env->eip = offset;
1130 env->segs[R_CS].selector = selector;
1131 env->segs[R_CS].base = (selector << 4);
1132 env->eflags &= ~(TF_MASK | RF_MASK);
1133
1134 if (iopl < 3)
1135 env->eflags &= ~VIF_MASK;
1136 else
1137 env->eflags &= ~IF_MASK;
1138}
1139#endif /* VBOX */
1140
1141#ifdef TARGET_X86_64
1142
1143#define PUSHQ(sp, val)\
1144{\
1145 sp -= 8;\
1146 stq_kernel(sp, (val));\
1147}
1148
1149#define POPQ(sp, val)\
1150{\
1151 val = ldq_kernel(sp);\
1152 sp += 8;\
1153}
1154
1155#ifndef VBOX
1156static inline target_ulong get_rsp_from_tss(int level)
1157#else /* VBOX */
1158DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1159#endif /* VBOX */
1160{
1161 int index;
1162
1163#if 0
1164 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1165 env->tr.base, env->tr.limit);
1166#endif
1167
1168 if (!(env->tr.flags & DESC_P_MASK))
1169 cpu_abort(env, "invalid tss");
1170 index = 8 * level + 4;
1171 if ((index + 7) > env->tr.limit)
1172 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1173 return ldq_kernel(env->tr.base + index);
1174}
1175
1176/* 64 bit interrupt */
1177static void do_interrupt64(int intno, int is_int, int error_code,
1178 target_ulong next_eip, int is_hw)
1179{
1180 SegmentCache *dt;
1181 target_ulong ptr;
1182 int type, dpl, selector, cpl, ist;
1183 int has_error_code, new_stack;
1184 uint32_t e1, e2, e3, ss;
1185 target_ulong old_eip, esp, offset;
1186
1187#ifdef VBOX
1188 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1189 cpu_loop_exit();
1190#endif
1191
1192 has_error_code = 0;
1193 if (!is_int && !is_hw) {
1194 switch(intno) {
1195 case 8:
1196 case 10:
1197 case 11:
1198 case 12:
1199 case 13:
1200 case 14:
1201 case 17:
1202 has_error_code = 1;
1203 break;
1204 }
1205 }
1206 if (is_int)
1207 old_eip = next_eip;
1208 else
1209 old_eip = env->eip;
1210
1211 dt = &env->idt;
1212 if (intno * 16 + 15 > dt->limit)
1213 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1214 ptr = dt->base + intno * 16;
1215 e1 = ldl_kernel(ptr);
1216 e2 = ldl_kernel(ptr + 4);
1217 e3 = ldl_kernel(ptr + 8);
1218 /* check gate type */
1219 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1220 switch(type) {
1221 case 14: /* 386 interrupt gate */
1222 case 15: /* 386 trap gate */
1223 break;
1224 default:
1225 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1226 break;
1227 }
1228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1229 cpl = env->hflags & HF_CPL_MASK;
1230 /* check privilege if software int */
1231 if (is_int && dpl < cpl)
1232 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1233 /* check valid bit */
1234 if (!(e2 & DESC_P_MASK))
1235 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1236 selector = e1 >> 16;
1237 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1238 ist = e2 & 7;
1239 if ((selector & 0xfffc) == 0)
1240 raise_exception_err(EXCP0D_GPF, 0);
1241
1242 if (load_segment(&e1, &e2, selector) != 0)
1243 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1244 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1247 if (dpl > cpl)
1248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1249 if (!(e2 & DESC_P_MASK))
1250 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1251 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1252 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1253 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1254 /* to inner privilege */
1255 if (ist != 0)
1256 esp = get_rsp_from_tss(ist + 3);
1257 else
1258 esp = get_rsp_from_tss(dpl);
1259 esp &= ~0xfLL; /* align stack */
1260 ss = 0;
1261 new_stack = 1;
1262 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1263 /* to same privilege */
1264 if (env->eflags & VM_MASK)
1265 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1266 new_stack = 0;
1267 if (ist != 0)
1268 esp = get_rsp_from_tss(ist + 3);
1269 else
1270 esp = ESP;
1271 esp &= ~0xfLL; /* align stack */
1272 dpl = cpl;
1273 } else {
1274 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1275 new_stack = 0; /* avoid warning */
1276 esp = 0; /* avoid warning */
1277 }
1278
1279 PUSHQ(esp, env->segs[R_SS].selector);
1280 PUSHQ(esp, ESP);
1281 PUSHQ(esp, compute_eflags());
1282 PUSHQ(esp, env->segs[R_CS].selector);
1283 PUSHQ(esp, old_eip);
1284 if (has_error_code) {
1285 PUSHQ(esp, error_code);
1286 }
1287
1288 if (new_stack) {
1289 ss = 0 | dpl;
1290 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1291 }
1292 ESP = esp;
1293
1294 selector = (selector & ~3) | dpl;
1295 cpu_x86_load_seg_cache(env, R_CS, selector,
1296 get_seg_base(e1, e2),
1297 get_seg_limit(e1, e2),
1298 e2);
1299 cpu_x86_set_cpl(env, dpl);
1300 env->eip = offset;
1301
1302 /* interrupt gate clear IF mask */
1303 if ((type & 1) == 0) {
1304 env->eflags &= ~IF_MASK;
1305 }
1306
1307#ifndef VBOX
1308 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1309#else
1310 /*
1311 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1312 * gets confused by seeingingly changed EFLAGS. See #3491 and
1313 * public bug #2341.
1314 */
1315 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1316#endif
1317}
1318#endif
1319
1320#if defined(CONFIG_USER_ONLY)
1321void helper_syscall(int next_eip_addend)
1322{
1323 env->exception_index = EXCP_SYSCALL;
1324 env->exception_next_eip = env->eip + next_eip_addend;
1325 cpu_loop_exit();
1326}
1327#else
1328void helper_syscall(int next_eip_addend)
1329{
1330 int selector;
1331
1332 if (!(env->efer & MSR_EFER_SCE)) {
1333 raise_exception_err(EXCP06_ILLOP, 0);
1334 }
1335 selector = (env->star >> 32) & 0xffff;
1336#ifdef TARGET_X86_64
1337 if (env->hflags & HF_LMA_MASK) {
1338 int code64;
1339
1340 ECX = env->eip + next_eip_addend;
1341 env->regs[11] = compute_eflags();
1342
1343 code64 = env->hflags & HF_CS64_MASK;
1344
1345 cpu_x86_set_cpl(env, 0);
1346 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1347 0, 0xffffffff,
1348 DESC_G_MASK | DESC_P_MASK |
1349 DESC_S_MASK |
1350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1351 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1352 0, 0xffffffff,
1353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1354 DESC_S_MASK |
1355 DESC_W_MASK | DESC_A_MASK);
1356 env->eflags &= ~env->fmask;
1357 load_eflags(env->eflags, 0);
1358 if (code64)
1359 env->eip = env->lstar;
1360 else
1361 env->eip = env->cstar;
1362 } else
1363#endif
1364 {
1365 ECX = (uint32_t)(env->eip + next_eip_addend);
1366
1367 cpu_x86_set_cpl(env, 0);
1368 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1369 0, 0xffffffff,
1370 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1371 DESC_S_MASK |
1372 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1373 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1374 0, 0xffffffff,
1375 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1376 DESC_S_MASK |
1377 DESC_W_MASK | DESC_A_MASK);
1378 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1379 env->eip = (uint32_t)env->star;
1380 }
1381}
1382#endif
1383
1384void helper_sysret(int dflag)
1385{
1386 int cpl, selector;
1387
1388 if (!(env->efer & MSR_EFER_SCE)) {
1389 raise_exception_err(EXCP06_ILLOP, 0);
1390 }
1391 cpl = env->hflags & HF_CPL_MASK;
1392 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1393 raise_exception_err(EXCP0D_GPF, 0);
1394 }
1395 selector = (env->star >> 48) & 0xffff;
1396#ifdef TARGET_X86_64
1397 if (env->hflags & HF_LMA_MASK) {
1398 if (dflag == 2) {
1399 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1400 0, 0xffffffff,
1401 DESC_G_MASK | DESC_P_MASK |
1402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1403 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1404 DESC_L_MASK);
1405 env->eip = ECX;
1406 } else {
1407 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1408 0, 0xffffffff,
1409 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1410 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1411 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1412 env->eip = (uint32_t)ECX;
1413 }
1414 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1415 0, 0xffffffff,
1416 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1417 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1418 DESC_W_MASK | DESC_A_MASK);
1419 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1420 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1421 cpu_x86_set_cpl(env, 3);
1422 } else
1423#endif
1424 {
1425 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1426 0, 0xffffffff,
1427 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1428 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1429 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1430 env->eip = (uint32_t)ECX;
1431 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1432 0, 0xffffffff,
1433 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1434 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1435 DESC_W_MASK | DESC_A_MASK);
1436 env->eflags |= IF_MASK;
1437 cpu_x86_set_cpl(env, 3);
1438 }
1439#ifdef USE_KQEMU
1440 if (kqemu_is_ok(env)) {
1441 if (env->hflags & HF_LMA_MASK)
1442 CC_OP = CC_OP_EFLAGS;
1443 env->exception_index = -1;
1444 cpu_loop_exit();
1445 }
1446#endif
1447}
1448
1449#ifdef VBOX
1450/**
1451 * Checks and processes external VMM events.
1452 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1453 */
1454void helper_external_event(void)
1455{
1456#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1457 uintptr_t uSP;
1458# ifdef RT_ARCH_AMD64
1459 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1460# else
1461 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1462# endif
1463 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1464#endif
1465 /* Keep in sync with flags checked by gen_check_external_event() */
1466 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1467 {
1468 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1469 ~CPU_INTERRUPT_EXTERNAL_HARD);
1470 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1471 }
1472 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1473 {
1474 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1475 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1476 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1477 }
1478 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1479 {
1480 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1481 ~CPU_INTERRUPT_EXTERNAL_DMA);
1482 remR3DmaRun(env);
1483 }
1484 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1485 {
1486 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1487 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1488 remR3TimersRun(env);
1489 }
1490}
1491/* helper for recording call instruction addresses for later scanning */
1492void helper_record_call()
1493{
1494 if ( !(env->state & CPU_RAW_RING0)
1495 && (env->cr[0] & CR0_PG_MASK)
1496 && !(env->eflags & X86_EFL_IF))
1497 remR3RecordCall(env);
1498}
1499#endif /* VBOX */
1500
1501/* real mode interrupt */
1502static void do_interrupt_real(int intno, int is_int, int error_code,
1503 unsigned int next_eip)
1504{
1505 SegmentCache *dt;
1506 target_ulong ptr, ssp;
1507 int selector;
1508 uint32_t offset, esp;
1509 uint32_t old_cs, old_eip;
1510
1511 /* real mode (simpler !) */
1512 dt = &env->idt;
1513#ifndef VBOX
1514 if (intno * 4 + 3 > dt->limit)
1515#else
1516 if ((unsigned)intno * 4 + 3 > dt->limit)
1517#endif
1518 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1519 ptr = dt->base + intno * 4;
1520 offset = lduw_kernel(ptr);
1521 selector = lduw_kernel(ptr + 2);
1522 esp = ESP;
1523 ssp = env->segs[R_SS].base;
1524 if (is_int)
1525 old_eip = next_eip;
1526 else
1527 old_eip = env->eip;
1528 old_cs = env->segs[R_CS].selector;
1529 /* XXX: use SS segment size ? */
1530 PUSHW(ssp, esp, 0xffff, compute_eflags());
1531 PUSHW(ssp, esp, 0xffff, old_cs);
1532 PUSHW(ssp, esp, 0xffff, old_eip);
1533
1534 /* update processor state */
1535 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1536 env->eip = offset;
1537 env->segs[R_CS].selector = selector;
1538 env->segs[R_CS].base = (selector << 4);
1539 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1540}
1541
1542/* fake user mode interrupt */
1543void do_interrupt_user(int intno, int is_int, int error_code,
1544 target_ulong next_eip)
1545{
1546 SegmentCache *dt;
1547 target_ulong ptr;
1548 int dpl, cpl, shift;
1549 uint32_t e2;
1550
1551 dt = &env->idt;
1552 if (env->hflags & HF_LMA_MASK) {
1553 shift = 4;
1554 } else {
1555 shift = 3;
1556 }
1557 ptr = dt->base + (intno << shift);
1558 e2 = ldl_kernel(ptr + 4);
1559
1560 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1561 cpl = env->hflags & HF_CPL_MASK;
1562 /* check privilege if software int */
1563 if (is_int && dpl < cpl)
1564 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1565
1566 /* Since we emulate only user space, we cannot do more than
1567 exiting the emulation with the suitable exception and error
1568 code */
1569 if (is_int)
1570 EIP = next_eip;
1571}
1572
1573/*
1574 * Begin execution of an interruption. is_int is TRUE if coming from
1575 * the int instruction. next_eip is the EIP value AFTER the interrupt
1576 * instruction. It is only relevant if is_int is TRUE.
1577 */
1578void do_interrupt(int intno, int is_int, int error_code,
1579 target_ulong next_eip, int is_hw)
1580{
1581 if (loglevel & CPU_LOG_INT) {
1582 if ((env->cr[0] & CR0_PE_MASK)) {
1583 static int count;
1584 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1585 count, intno, error_code, is_int,
1586 env->hflags & HF_CPL_MASK,
1587 env->segs[R_CS].selector, EIP,
1588 (int)env->segs[R_CS].base + EIP,
1589 env->segs[R_SS].selector, ESP);
1590 if (intno == 0x0e) {
1591 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1592 } else {
1593 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1594 }
1595 fprintf(logfile, "\n");
1596 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1597#if 0
1598 {
1599 int i;
1600 uint8_t *ptr;
1601 fprintf(logfile, " code=");
1602 ptr = env->segs[R_CS].base + env->eip;
1603 for(i = 0; i < 16; i++) {
1604 fprintf(logfile, " %02x", ldub(ptr + i));
1605 }
1606 fprintf(logfile, "\n");
1607 }
1608#endif
1609 count++;
1610 }
1611 }
1612 if (env->cr[0] & CR0_PE_MASK) {
1613#ifdef TARGET_X86_64
1614 if (env->hflags & HF_LMA_MASK) {
1615 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1616 } else
1617#endif
1618 {
1619#ifdef VBOX
1620 /* int xx *, v86 code and VME enabled? */
1621 if ( (env->eflags & VM_MASK)
1622 && (env->cr[4] & CR4_VME_MASK)
1623 && is_int
1624 && !is_hw
1625 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1626 )
1627 do_soft_interrupt_vme(intno, error_code, next_eip);
1628 else
1629#endif /* VBOX */
1630 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1631 }
1632 } else {
1633 do_interrupt_real(intno, is_int, error_code, next_eip);
1634 }
1635}
1636
1637/*
1638 * Check nested exceptions and change to double or triple fault if
1639 * needed. It should only be called, if this is not an interrupt.
1640 * Returns the new exception number.
1641 */
1642static int check_exception(int intno, int *error_code)
1643{
1644 int first_contributory = env->old_exception == 0 ||
1645 (env->old_exception >= 10 &&
1646 env->old_exception <= 13);
1647 int second_contributory = intno == 0 ||
1648 (intno >= 10 && intno <= 13);
1649
1650 if (loglevel & CPU_LOG_INT)
1651 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1652 env->old_exception, intno);
1653
1654 if (env->old_exception == EXCP08_DBLE)
1655 cpu_abort(env, "triple fault");
1656
1657 if ((first_contributory && second_contributory)
1658 || (env->old_exception == EXCP0E_PAGE &&
1659 (second_contributory || (intno == EXCP0E_PAGE)))) {
1660 intno = EXCP08_DBLE;
1661 *error_code = 0;
1662 }
1663
1664 if (second_contributory || (intno == EXCP0E_PAGE) ||
1665 (intno == EXCP08_DBLE))
1666 env->old_exception = intno;
1667
1668 return intno;
1669}
1670
1671/*
1672 * Signal an interruption. It is executed in the main CPU loop.
1673 * is_int is TRUE if coming from the int instruction. next_eip is the
1674 * EIP value AFTER the interrupt instruction. It is only relevant if
1675 * is_int is TRUE.
1676 */
1677void raise_interrupt(int intno, int is_int, int error_code,
1678 int next_eip_addend)
1679{
1680#if defined(VBOX) && defined(DEBUG)
1681 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend));
1682#endif
1683 if (!is_int) {
1684 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1685 intno = check_exception(intno, &error_code);
1686 } else {
1687 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1688 }
1689
1690 env->exception_index = intno;
1691 env->error_code = error_code;
1692 env->exception_is_int = is_int;
1693 env->exception_next_eip = env->eip + next_eip_addend;
1694 cpu_loop_exit();
1695}
1696
1697/* shortcuts to generate exceptions */
1698
1699void (raise_exception_err)(int exception_index, int error_code)
1700{
1701 raise_interrupt(exception_index, 0, error_code, 0);
1702}
1703
1704void raise_exception(int exception_index)
1705{
1706 raise_interrupt(exception_index, 0, 0, 0);
1707}
1708
1709/* SMM support */
1710
1711#if defined(CONFIG_USER_ONLY)
1712
1713void do_smm_enter(void)
1714{
1715}
1716
1717void helper_rsm(void)
1718{
1719}
1720
1721#else
1722
1723#ifdef TARGET_X86_64
1724#define SMM_REVISION_ID 0x00020064
1725#else
1726#define SMM_REVISION_ID 0x00020000
1727#endif
1728
1729void do_smm_enter(void)
1730{
1731 target_ulong sm_state;
1732 SegmentCache *dt;
1733 int i, offset;
1734
1735 if (loglevel & CPU_LOG_INT) {
1736 fprintf(logfile, "SMM: enter\n");
1737 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1738 }
1739
1740 env->hflags |= HF_SMM_MASK;
1741 cpu_smm_update(env);
1742
1743 sm_state = env->smbase + 0x8000;
1744
1745#ifdef TARGET_X86_64
1746 for(i = 0; i < 6; i++) {
1747 dt = &env->segs[i];
1748 offset = 0x7e00 + i * 16;
1749 stw_phys(sm_state + offset, dt->selector);
1750 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1751 stl_phys(sm_state + offset + 4, dt->limit);
1752 stq_phys(sm_state + offset + 8, dt->base);
1753 }
1754
1755 stq_phys(sm_state + 0x7e68, env->gdt.base);
1756 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1757
1758 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1759 stq_phys(sm_state + 0x7e78, env->ldt.base);
1760 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1761 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1762
1763 stq_phys(sm_state + 0x7e88, env->idt.base);
1764 stl_phys(sm_state + 0x7e84, env->idt.limit);
1765
1766 stw_phys(sm_state + 0x7e90, env->tr.selector);
1767 stq_phys(sm_state + 0x7e98, env->tr.base);
1768 stl_phys(sm_state + 0x7e94, env->tr.limit);
1769 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1770
1771 stq_phys(sm_state + 0x7ed0, env->efer);
1772
1773 stq_phys(sm_state + 0x7ff8, EAX);
1774 stq_phys(sm_state + 0x7ff0, ECX);
1775 stq_phys(sm_state + 0x7fe8, EDX);
1776 stq_phys(sm_state + 0x7fe0, EBX);
1777 stq_phys(sm_state + 0x7fd8, ESP);
1778 stq_phys(sm_state + 0x7fd0, EBP);
1779 stq_phys(sm_state + 0x7fc8, ESI);
1780 stq_phys(sm_state + 0x7fc0, EDI);
1781 for(i = 8; i < 16; i++)
1782 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1783 stq_phys(sm_state + 0x7f78, env->eip);
1784 stl_phys(sm_state + 0x7f70, compute_eflags());
1785 stl_phys(sm_state + 0x7f68, env->dr[6]);
1786 stl_phys(sm_state + 0x7f60, env->dr[7]);
1787
1788 stl_phys(sm_state + 0x7f48, env->cr[4]);
1789 stl_phys(sm_state + 0x7f50, env->cr[3]);
1790 stl_phys(sm_state + 0x7f58, env->cr[0]);
1791
1792 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1793 stl_phys(sm_state + 0x7f00, env->smbase);
1794#else
1795 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1796 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1797 stl_phys(sm_state + 0x7ff4, compute_eflags());
1798 stl_phys(sm_state + 0x7ff0, env->eip);
1799 stl_phys(sm_state + 0x7fec, EDI);
1800 stl_phys(sm_state + 0x7fe8, ESI);
1801 stl_phys(sm_state + 0x7fe4, EBP);
1802 stl_phys(sm_state + 0x7fe0, ESP);
1803 stl_phys(sm_state + 0x7fdc, EBX);
1804 stl_phys(sm_state + 0x7fd8, EDX);
1805 stl_phys(sm_state + 0x7fd4, ECX);
1806 stl_phys(sm_state + 0x7fd0, EAX);
1807 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1808 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1809
1810 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1811 stl_phys(sm_state + 0x7f64, env->tr.base);
1812 stl_phys(sm_state + 0x7f60, env->tr.limit);
1813 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1814
1815 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1816 stl_phys(sm_state + 0x7f80, env->ldt.base);
1817 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1818 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1819
1820 stl_phys(sm_state + 0x7f74, env->gdt.base);
1821 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1822
1823 stl_phys(sm_state + 0x7f58, env->idt.base);
1824 stl_phys(sm_state + 0x7f54, env->idt.limit);
1825
1826 for(i = 0; i < 6; i++) {
1827 dt = &env->segs[i];
1828 if (i < 3)
1829 offset = 0x7f84 + i * 12;
1830 else
1831 offset = 0x7f2c + (i - 3) * 12;
1832 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1833 stl_phys(sm_state + offset + 8, dt->base);
1834 stl_phys(sm_state + offset + 4, dt->limit);
1835 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1836 }
1837 stl_phys(sm_state + 0x7f14, env->cr[4]);
1838
1839 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1840 stl_phys(sm_state + 0x7ef8, env->smbase);
1841#endif
1842 /* init SMM cpu state */
1843
1844#ifdef TARGET_X86_64
1845 cpu_load_efer(env, 0);
1846#endif
1847 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1848 env->eip = 0x00008000;
1849 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1850 0xffffffff, 0);
1851 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1852 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1853 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1854 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1855 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1856
1857 cpu_x86_update_cr0(env,
1858 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1859 cpu_x86_update_cr4(env, 0);
1860 env->dr[7] = 0x00000400;
1861 CC_OP = CC_OP_EFLAGS;
1862}
1863
1864void helper_rsm(void)
1865{
1866#ifdef VBOX
1867 cpu_abort(env, "helper_rsm");
1868#else /* !VBOX */
1869 target_ulong sm_
1870
1871 target_ulong sm_state;
1872 int i, offset;
1873 uint32_t val;
1874
1875 sm_state = env->smbase + 0x8000;
1876#ifdef TARGET_X86_64
1877 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1878
1879 for(i = 0; i < 6; i++) {
1880 offset = 0x7e00 + i * 16;
1881 cpu_x86_load_seg_cache(env, i,
1882 lduw_phys(sm_state + offset),
1883 ldq_phys(sm_state + offset + 8),
1884 ldl_phys(sm_state + offset + 4),
1885 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1886 }
1887
1888 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1889 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1890
1891 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1892 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1893 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1894 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1895
1896 env->idt.base = ldq_phys(sm_state + 0x7e88);
1897 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1898
1899 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1900 env->tr.base = ldq_phys(sm_state + 0x7e98);
1901 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1902 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1903
1904 EAX = ldq_phys(sm_state + 0x7ff8);
1905 ECX = ldq_phys(sm_state + 0x7ff0);
1906 EDX = ldq_phys(sm_state + 0x7fe8);
1907 EBX = ldq_phys(sm_state + 0x7fe0);
1908 ESP = ldq_phys(sm_state + 0x7fd8);
1909 EBP = ldq_phys(sm_state + 0x7fd0);
1910 ESI = ldq_phys(sm_state + 0x7fc8);
1911 EDI = ldq_phys(sm_state + 0x7fc0);
1912 for(i = 8; i < 16; i++)
1913 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1914 env->eip = ldq_phys(sm_state + 0x7f78);
1915 load_eflags(ldl_phys(sm_state + 0x7f70),
1916 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1917 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1918 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1919
1920 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1921 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1922 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1923
1924 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1925 if (val & 0x20000) {
1926 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1927 }
1928#else
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1930 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1931 load_eflags(ldl_phys(sm_state + 0x7ff4),
1932 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1933 env->eip = ldl_phys(sm_state + 0x7ff0);
1934 EDI = ldl_phys(sm_state + 0x7fec);
1935 ESI = ldl_phys(sm_state + 0x7fe8);
1936 EBP = ldl_phys(sm_state + 0x7fe4);
1937 ESP = ldl_phys(sm_state + 0x7fe0);
1938 EBX = ldl_phys(sm_state + 0x7fdc);
1939 EDX = ldl_phys(sm_state + 0x7fd8);
1940 ECX = ldl_phys(sm_state + 0x7fd4);
1941 EAX = ldl_phys(sm_state + 0x7fd0);
1942 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1943 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1944
1945 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1946 env->tr.base = ldl_phys(sm_state + 0x7f64);
1947 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1948 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1949
1950 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1951 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1952 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1953 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1954
1955 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1956 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1957
1958 env->idt.base = ldl_phys(sm_state + 0x7f58);
1959 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1960
1961 for(i = 0; i < 6; i++) {
1962 if (i < 3)
1963 offset = 0x7f84 + i * 12;
1964 else
1965 offset = 0x7f2c + (i - 3) * 12;
1966 cpu_x86_load_seg_cache(env, i,
1967 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1968 ldl_phys(sm_state + offset + 8),
1969 ldl_phys(sm_state + offset + 4),
1970 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1971 }
1972 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1973
1974 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1975 if (val & 0x20000) {
1976 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1977 }
1978#endif
1979 CC_OP = CC_OP_EFLAGS;
1980 env->hflags &= ~HF_SMM_MASK;
1981 cpu_smm_update(env);
1982
1983 if (loglevel & CPU_LOG_INT) {
1984 fprintf(logfile, "SMM: after RSM\n");
1985 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1986 }
1987#endif /* !VBOX */
1988}
1989
1990#endif /* !CONFIG_USER_ONLY */
1991
1992
1993/* division, flags are undefined */
1994
1995void helper_divb_AL(target_ulong t0)
1996{
1997 unsigned int num, den, q, r;
1998
1999 num = (EAX & 0xffff);
2000 den = (t0 & 0xff);
2001 if (den == 0) {
2002 raise_exception(EXCP00_DIVZ);
2003 }
2004 q = (num / den);
2005 if (q > 0xff)
2006 raise_exception(EXCP00_DIVZ);
2007 q &= 0xff;
2008 r = (num % den) & 0xff;
2009 EAX = (EAX & ~0xffff) | (r << 8) | q;
2010}
2011
2012void helper_idivb_AL(target_ulong t0)
2013{
2014 int num, den, q, r;
2015
2016 num = (int16_t)EAX;
2017 den = (int8_t)t0;
2018 if (den == 0) {
2019 raise_exception(EXCP00_DIVZ);
2020 }
2021 q = (num / den);
2022 if (q != (int8_t)q)
2023 raise_exception(EXCP00_DIVZ);
2024 q &= 0xff;
2025 r = (num % den) & 0xff;
2026 EAX = (EAX & ~0xffff) | (r << 8) | q;
2027}
2028
2029void helper_divw_AX(target_ulong t0)
2030{
2031 unsigned int num, den, q, r;
2032
2033 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2034 den = (t0 & 0xffff);
2035 if (den == 0) {
2036 raise_exception(EXCP00_DIVZ);
2037 }
2038 q = (num / den);
2039 if (q > 0xffff)
2040 raise_exception(EXCP00_DIVZ);
2041 q &= 0xffff;
2042 r = (num % den) & 0xffff;
2043 EAX = (EAX & ~0xffff) | q;
2044 EDX = (EDX & ~0xffff) | r;
2045}
2046
2047void helper_idivw_AX(target_ulong t0)
2048{
2049 int num, den, q, r;
2050
2051 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2052 den = (int16_t)t0;
2053 if (den == 0) {
2054 raise_exception(EXCP00_DIVZ);
2055 }
2056 q = (num / den);
2057 if (q != (int16_t)q)
2058 raise_exception(EXCP00_DIVZ);
2059 q &= 0xffff;
2060 r = (num % den) & 0xffff;
2061 EAX = (EAX & ~0xffff) | q;
2062 EDX = (EDX & ~0xffff) | r;
2063}
2064
2065void helper_divl_EAX(target_ulong t0)
2066{
2067 unsigned int den, r;
2068 uint64_t num, q;
2069
2070 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2071 den = t0;
2072 if (den == 0) {
2073 raise_exception(EXCP00_DIVZ);
2074 }
2075 q = (num / den);
2076 r = (num % den);
2077 if (q > 0xffffffff)
2078 raise_exception(EXCP00_DIVZ);
2079 EAX = (uint32_t)q;
2080 EDX = (uint32_t)r;
2081}
2082
2083void helper_idivl_EAX(target_ulong t0)
2084{
2085 int den, r;
2086 int64_t num, q;
2087
2088 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2089 den = t0;
2090 if (den == 0) {
2091 raise_exception(EXCP00_DIVZ);
2092 }
2093 q = (num / den);
2094 r = (num % den);
2095 if (q != (int32_t)q)
2096 raise_exception(EXCP00_DIVZ);
2097 EAX = (uint32_t)q;
2098 EDX = (uint32_t)r;
2099}
2100
2101/* bcd */
2102
2103/* XXX: exception */
2104void helper_aam(int base)
2105{
2106 int al, ah;
2107 al = EAX & 0xff;
2108 ah = al / base;
2109 al = al % base;
2110 EAX = (EAX & ~0xffff) | al | (ah << 8);
2111 CC_DST = al;
2112}
2113
2114void helper_aad(int base)
2115{
2116 int al, ah;
2117 al = EAX & 0xff;
2118 ah = (EAX >> 8) & 0xff;
2119 al = ((ah * base) + al) & 0xff;
2120 EAX = (EAX & ~0xffff) | al;
2121 CC_DST = al;
2122}
2123
2124void helper_aaa(void)
2125{
2126 int icarry;
2127 int al, ah, af;
2128 int eflags;
2129
2130 eflags = cc_table[CC_OP].compute_all();
2131 af = eflags & CC_A;
2132 al = EAX & 0xff;
2133 ah = (EAX >> 8) & 0xff;
2134
2135 icarry = (al > 0xf9);
2136 if (((al & 0x0f) > 9 ) || af) {
2137 al = (al + 6) & 0x0f;
2138 ah = (ah + 1 + icarry) & 0xff;
2139 eflags |= CC_C | CC_A;
2140 } else {
2141 eflags &= ~(CC_C | CC_A);
2142 al &= 0x0f;
2143 }
2144 EAX = (EAX & ~0xffff) | al | (ah << 8);
2145 CC_SRC = eflags;
2146 FORCE_RET();
2147}
2148
2149void helper_aas(void)
2150{
2151 int icarry;
2152 int al, ah, af;
2153 int eflags;
2154
2155 eflags = cc_table[CC_OP].compute_all();
2156 af = eflags & CC_A;
2157 al = EAX & 0xff;
2158 ah = (EAX >> 8) & 0xff;
2159
2160 icarry = (al < 6);
2161 if (((al & 0x0f) > 9 ) || af) {
2162 al = (al - 6) & 0x0f;
2163 ah = (ah - 1 - icarry) & 0xff;
2164 eflags |= CC_C | CC_A;
2165 } else {
2166 eflags &= ~(CC_C | CC_A);
2167 al &= 0x0f;
2168 }
2169 EAX = (EAX & ~0xffff) | al | (ah << 8);
2170 CC_SRC = eflags;
2171 FORCE_RET();
2172}
2173
2174void helper_daa(void)
2175{
2176 int al, af, cf;
2177 int eflags;
2178
2179 eflags = cc_table[CC_OP].compute_all();
2180 cf = eflags & CC_C;
2181 af = eflags & CC_A;
2182 al = EAX & 0xff;
2183
2184 eflags = 0;
2185 if (((al & 0x0f) > 9 ) || af) {
2186 al = (al + 6) & 0xff;
2187 eflags |= CC_A;
2188 }
2189 if ((al > 0x9f) || cf) {
2190 al = (al + 0x60) & 0xff;
2191 eflags |= CC_C;
2192 }
2193 EAX = (EAX & ~0xff) | al;
2194 /* well, speed is not an issue here, so we compute the flags by hand */
2195 eflags |= (al == 0) << 6; /* zf */
2196 eflags |= parity_table[al]; /* pf */
2197 eflags |= (al & 0x80); /* sf */
2198 CC_SRC = eflags;
2199 FORCE_RET();
2200}
2201
2202void helper_das(void)
2203{
2204 int al, al1, af, cf;
2205 int eflags;
2206
2207 eflags = cc_table[CC_OP].compute_all();
2208 cf = eflags & CC_C;
2209 af = eflags & CC_A;
2210 al = EAX & 0xff;
2211
2212 eflags = 0;
2213 al1 = al;
2214 if (((al & 0x0f) > 9 ) || af) {
2215 eflags |= CC_A;
2216 if (al < 6 || cf)
2217 eflags |= CC_C;
2218 al = (al - 6) & 0xff;
2219 }
2220 if ((al1 > 0x99) || cf) {
2221 al = (al - 0x60) & 0xff;
2222 eflags |= CC_C;
2223 }
2224 EAX = (EAX & ~0xff) | al;
2225 /* well, speed is not an issue here, so we compute the flags by hand */
2226 eflags |= (al == 0) << 6; /* zf */
2227 eflags |= parity_table[al]; /* pf */
2228 eflags |= (al & 0x80); /* sf */
2229 CC_SRC = eflags;
2230 FORCE_RET();
2231}
2232
2233void helper_into(int next_eip_addend)
2234{
2235 int eflags;
2236 eflags = cc_table[CC_OP].compute_all();
2237 if (eflags & CC_O) {
2238 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2239 }
2240}
2241
2242void helper_cmpxchg8b(target_ulong a0)
2243{
2244 uint64_t d;
2245 int eflags;
2246
2247 eflags = cc_table[CC_OP].compute_all();
2248 d = ldq(a0);
2249 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2250 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2251 eflags |= CC_Z;
2252 } else {
2253 /* always do the store */
2254 stq(a0, d);
2255 EDX = (uint32_t)(d >> 32);
2256 EAX = (uint32_t)d;
2257 eflags &= ~CC_Z;
2258 }
2259 CC_SRC = eflags;
2260}
2261
2262#ifdef TARGET_X86_64
2263void helper_cmpxchg16b(target_ulong a0)
2264{
2265 uint64_t d0, d1;
2266 int eflags;
2267
2268 if ((a0 & 0xf) != 0)
2269 raise_exception(EXCP0D_GPF);
2270 eflags = cc_table[CC_OP].compute_all();
2271 d0 = ldq(a0);
2272 d1 = ldq(a0 + 8);
2273 if (d0 == EAX && d1 == EDX) {
2274 stq(a0, EBX);
2275 stq(a0 + 8, ECX);
2276 eflags |= CC_Z;
2277 } else {
2278 /* always do the store */
2279 stq(a0, d0);
2280 stq(a0 + 8, d1);
2281 EDX = d1;
2282 EAX = d0;
2283 eflags &= ~CC_Z;
2284 }
2285 CC_SRC = eflags;
2286}
2287#endif
2288
2289void helper_single_step(void)
2290{
2291 env->dr[6] |= 0x4000;
2292 raise_exception(EXCP01_SSTP);
2293}
2294
2295void helper_cpuid(void)
2296{
2297#ifndef VBOX
2298 uint32_t index;
2299
2300 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2301
2302 index = (uint32_t)EAX;
2303 /* test if maximum index reached */
2304 if (index & 0x80000000) {
2305 if (index > env->cpuid_xlevel)
2306 index = env->cpuid_level;
2307 } else {
2308 if (index > env->cpuid_level)
2309 index = env->cpuid_level;
2310 }
2311
2312 switch(index) {
2313 case 0:
2314 EAX = env->cpuid_level;
2315 EBX = env->cpuid_vendor1;
2316 EDX = env->cpuid_vendor2;
2317 ECX = env->cpuid_vendor3;
2318 break;
2319 case 1:
2320 EAX = env->cpuid_version;
2321 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 ECX = env->cpuid_ext_features;
2323 EDX = env->cpuid_features;
2324 break;
2325 case 2:
2326 /* cache info: needed for Pentium Pro compatibility */
2327 EAX = 1;
2328 EBX = 0;
2329 ECX = 0;
2330 EDX = 0x2c307d;
2331 break;
2332 case 4:
2333 /* cache info: needed for Core compatibility */
2334 switch (ECX) {
2335 case 0: /* L1 dcache info */
2336 EAX = 0x0000121;
2337 EBX = 0x1c0003f;
2338 ECX = 0x000003f;
2339 EDX = 0x0000001;
2340 break;
2341 case 1: /* L1 icache info */
2342 EAX = 0x0000122;
2343 EBX = 0x1c0003f;
2344 ECX = 0x000003f;
2345 EDX = 0x0000001;
2346 break;
2347 case 2: /* L2 cache info */
2348 EAX = 0x0000143;
2349 EBX = 0x3c0003f;
2350 ECX = 0x0000fff;
2351 EDX = 0x0000001;
2352 break;
2353 default: /* end of info */
2354 EAX = 0;
2355 EBX = 0;
2356 ECX = 0;
2357 EDX = 0;
2358 break;
2359 }
2360
2361 break;
2362 case 5:
2363 /* mwait info: needed for Core compatibility */
2364 EAX = 0; /* Smallest monitor-line size in bytes */
2365 EBX = 0; /* Largest monitor-line size in bytes */
2366 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2367 EDX = 0;
2368 break;
2369 case 6:
2370 /* Thermal and Power Leaf */
2371 EAX = 0;
2372 EBX = 0;
2373 ECX = 0;
2374 EDX = 0;
2375 break;
2376 case 9:
2377 /* Direct Cache Access Information Leaf */
2378 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2379 EBX = 0;
2380 ECX = 0;
2381 EDX = 0;
2382 break;
2383 case 0xA:
2384 /* Architectural Performance Monitoring Leaf */
2385 EAX = 0;
2386 EBX = 0;
2387 ECX = 0;
2388 EDX = 0;
2389 break;
2390 case 0x80000000:
2391 EAX = env->cpuid_xlevel;
2392 EBX = env->cpuid_vendor1;
2393 EDX = env->cpuid_vendor2;
2394 ECX = env->cpuid_vendor3;
2395 break;
2396 case 0x80000001:
2397 EAX = env->cpuid_features;
2398 EBX = 0;
2399 ECX = env->cpuid_ext3_features;
2400 EDX = env->cpuid_ext2_features;
2401 break;
2402 case 0x80000002:
2403 case 0x80000003:
2404 case 0x80000004:
2405 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2406 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2407 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2408 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2409 break;
2410 case 0x80000005:
2411 /* cache info (L1 cache) */
2412 EAX = 0x01ff01ff;
2413 EBX = 0x01ff01ff;
2414 ECX = 0x40020140;
2415 EDX = 0x40020140;
2416 break;
2417 case 0x80000006:
2418 /* cache info (L2 cache) */
2419 EAX = 0;
2420 EBX = 0x42004200;
2421 ECX = 0x02008140;
2422 EDX = 0;
2423 break;
2424 case 0x80000008:
2425 /* virtual & phys address size in low 2 bytes. */
2426/* XXX: This value must match the one used in the MMU code. */
2427 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2428 /* 64 bit processor */
2429#if defined(USE_KQEMU)
2430 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2431#else
2432/* XXX: The physical address space is limited to 42 bits in exec.c. */
2433 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2434#endif
2435 } else {
2436#if defined(USE_KQEMU)
2437 EAX = 0x00000020; /* 32 bits physical */
2438#else
2439 if (env->cpuid_features & CPUID_PSE36)
2440 EAX = 0x00000024; /* 36 bits physical */
2441 else
2442 EAX = 0x00000020; /* 32 bits physical */
2443#endif
2444 }
2445 EBX = 0;
2446 ECX = 0;
2447 EDX = 0;
2448 break;
2449 case 0x8000000A:
2450 EAX = 0x00000001;
2451 EBX = 0;
2452 ECX = 0;
2453 EDX = 0;
2454 break;
2455 default:
2456 /* reserved values: zero */
2457 EAX = 0;
2458 EBX = 0;
2459 ECX = 0;
2460 EDX = 0;
2461 break;
2462 }
2463#else /* VBOX */
2464 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2465#endif /* VBOX */
2466}
2467
2468void helper_enter_level(int level, int data32, target_ulong t1)
2469{
2470 target_ulong ssp;
2471 uint32_t esp_mask, esp, ebp;
2472
2473 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2474 ssp = env->segs[R_SS].base;
2475 ebp = EBP;
2476 esp = ESP;
2477 if (data32) {
2478 /* 32 bit */
2479 esp -= 4;
2480 while (--level) {
2481 esp -= 4;
2482 ebp -= 4;
2483 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2484 }
2485 esp -= 4;
2486 stl(ssp + (esp & esp_mask), t1);
2487 } else {
2488 /* 16 bit */
2489 esp -= 2;
2490 while (--level) {
2491 esp -= 2;
2492 ebp -= 2;
2493 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2494 }
2495 esp -= 2;
2496 stw(ssp + (esp & esp_mask), t1);
2497 }
2498}
2499
2500#ifdef TARGET_X86_64
2501void helper_enter64_level(int level, int data64, target_ulong t1)
2502{
2503 target_ulong esp, ebp;
2504 ebp = EBP;
2505 esp = ESP;
2506
2507 if (data64) {
2508 /* 64 bit */
2509 esp -= 8;
2510 while (--level) {
2511 esp -= 8;
2512 ebp -= 8;
2513 stq(esp, ldq(ebp));
2514 }
2515 esp -= 8;
2516 stq(esp, t1);
2517 } else {
2518 /* 16 bit */
2519 esp -= 2;
2520 while (--level) {
2521 esp -= 2;
2522 ebp -= 2;
2523 stw(esp, lduw(ebp));
2524 }
2525 esp -= 2;
2526 stw(esp, t1);
2527 }
2528}
2529#endif
2530
2531void helper_lldt(int selector)
2532{
2533 SegmentCache *dt;
2534 uint32_t e1, e2;
2535#ifndef VBOX
2536 int index, entry_limit;
2537#else
2538 unsigned int index, entry_limit;
2539#endif
2540 target_ulong ptr;
2541
2542#ifdef VBOX
2543 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2544 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2545#endif
2546
2547 selector &= 0xffff;
2548 if ((selector & 0xfffc) == 0) {
2549 /* XXX: NULL selector case: invalid LDT */
2550 env->ldt.base = 0;
2551 env->ldt.limit = 0;
2552 } else {
2553 if (selector & 0x4)
2554 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2555 dt = &env->gdt;
2556 index = selector & ~7;
2557#ifdef TARGET_X86_64
2558 if (env->hflags & HF_LMA_MASK)
2559 entry_limit = 15;
2560 else
2561#endif
2562 entry_limit = 7;
2563 if ((index + entry_limit) > dt->limit)
2564 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2565 ptr = dt->base + index;
2566 e1 = ldl_kernel(ptr);
2567 e2 = ldl_kernel(ptr + 4);
2568 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2569 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2570 if (!(e2 & DESC_P_MASK))
2571 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2572#ifdef TARGET_X86_64
2573 if (env->hflags & HF_LMA_MASK) {
2574 uint32_t e3;
2575 e3 = ldl_kernel(ptr + 8);
2576 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2577 env->ldt.base |= (target_ulong)e3 << 32;
2578 } else
2579#endif
2580 {
2581 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2582 }
2583 }
2584 env->ldt.selector = selector;
2585#ifdef VBOX
2586 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2587 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2588#endif
2589}
2590
2591void helper_ltr(int selector)
2592{
2593 SegmentCache *dt;
2594 uint32_t e1, e2;
2595#ifndef VBOX
2596 int index, type, entry_limit;
2597#else
2598 unsigned int index;
2599 int type, entry_limit;
2600#endif
2601 target_ulong ptr;
2602
2603#ifdef VBOX
2604 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2605 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2606 env->tr.flags, (RTSEL)(selector & 0xffff)));
2607#endif
2608 selector &= 0xffff;
2609 if ((selector & 0xfffc) == 0) {
2610 /* NULL selector case: invalid TR */
2611 env->tr.base = 0;
2612 env->tr.limit = 0;
2613 env->tr.flags = 0;
2614 } else {
2615 if (selector & 0x4)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 dt = &env->gdt;
2618 index = selector & ~7;
2619#ifdef TARGET_X86_64
2620 if (env->hflags & HF_LMA_MASK)
2621 entry_limit = 15;
2622 else
2623#endif
2624 entry_limit = 7;
2625 if ((index + entry_limit) > dt->limit)
2626 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2627 ptr = dt->base + index;
2628 e1 = ldl_kernel(ptr);
2629 e2 = ldl_kernel(ptr + 4);
2630 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2631 if ((e2 & DESC_S_MASK) ||
2632 (type != 1 && type != 9))
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 if (!(e2 & DESC_P_MASK))
2635 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2636#ifdef TARGET_X86_64
2637 if (env->hflags & HF_LMA_MASK) {
2638 uint32_t e3, e4;
2639 e3 = ldl_kernel(ptr + 8);
2640 e4 = ldl_kernel(ptr + 12);
2641 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2642 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2643 load_seg_cache_raw_dt(&env->tr, e1, e2);
2644 env->tr.base |= (target_ulong)e3 << 32;
2645 } else
2646#endif
2647 {
2648 load_seg_cache_raw_dt(&env->tr, e1, e2);
2649 }
2650 e2 |= DESC_TSS_BUSY_MASK;
2651 stl_kernel(ptr + 4, e2);
2652 }
2653 env->tr.selector = selector;
2654#ifdef VBOX
2655 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2656 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2657 env->tr.flags, (RTSEL)(selector & 0xffff)));
2658#endif
2659}
2660
2661/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2662void helper_load_seg(int seg_reg, int selector)
2663{
2664 uint32_t e1, e2;
2665 int cpl, dpl, rpl;
2666 SegmentCache *dt;
2667#ifndef VBOX
2668 int index;
2669#else
2670 unsigned int index;
2671#endif
2672 target_ulong ptr;
2673
2674 selector &= 0xffff;
2675 cpl = env->hflags & HF_CPL_MASK;
2676
2677#ifdef VBOX
2678 /* Trying to load a selector with CPL=1? */
2679 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2680 {
2681 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2682 selector = selector & 0xfffc;
2683 }
2684#endif
2685 if ((selector & 0xfffc) == 0) {
2686 /* null selector case */
2687 if (seg_reg == R_SS
2688#ifdef TARGET_X86_64
2689 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2690#endif
2691 )
2692 raise_exception_err(EXCP0D_GPF, 0);
2693 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2694 } else {
2695
2696 if (selector & 0x4)
2697 dt = &env->ldt;
2698 else
2699 dt = &env->gdt;
2700 index = selector & ~7;
2701 if ((index + 7) > dt->limit)
2702 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2703 ptr = dt->base + index;
2704 e1 = ldl_kernel(ptr);
2705 e2 = ldl_kernel(ptr + 4);
2706
2707 if (!(e2 & DESC_S_MASK))
2708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2709 rpl = selector & 3;
2710 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2711 if (seg_reg == R_SS) {
2712 /* must be writable segment */
2713 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2714 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2715 if (rpl != cpl || dpl != cpl)
2716 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2717 } else {
2718 /* must be readable segment */
2719 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2721
2722 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2723 /* if not conforming code, test rights */
2724 if (dpl < cpl || dpl < rpl)
2725 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2726 }
2727 }
2728
2729 if (!(e2 & DESC_P_MASK)) {
2730 if (seg_reg == R_SS)
2731 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2732 else
2733 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2734 }
2735
2736 /* set the access bit if not already set */
2737 if (!(e2 & DESC_A_MASK)) {
2738 e2 |= DESC_A_MASK;
2739 stl_kernel(ptr + 4, e2);
2740 }
2741
2742 cpu_x86_load_seg_cache(env, seg_reg, selector,
2743 get_seg_base(e1, e2),
2744 get_seg_limit(e1, e2),
2745 e2);
2746#if 0
2747 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2748 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2749#endif
2750 }
2751}
2752
2753/* protected mode jump */
2754void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2755 int next_eip_addend)
2756{
2757 int gate_cs, type;
2758 uint32_t e1, e2, cpl, dpl, rpl, limit;
2759 target_ulong next_eip;
2760
2761#ifdef VBOX
2762 e1 = e2 = 0;
2763#endif
2764 if ((new_cs & 0xfffc) == 0)
2765 raise_exception_err(EXCP0D_GPF, 0);
2766 if (load_segment(&e1, &e2, new_cs) != 0)
2767 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2768 cpl = env->hflags & HF_CPL_MASK;
2769 if (e2 & DESC_S_MASK) {
2770 if (!(e2 & DESC_CS_MASK))
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2773 if (e2 & DESC_C_MASK) {
2774 /* conforming code segment */
2775 if (dpl > cpl)
2776 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2777 } else {
2778 /* non conforming code segment */
2779 rpl = new_cs & 3;
2780 if (rpl > cpl)
2781 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2782 if (dpl != cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 }
2785 if (!(e2 & DESC_P_MASK))
2786 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2787 limit = get_seg_limit(e1, e2);
2788 if (new_eip > limit &&
2789 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2792 get_seg_base(e1, e2), limit, e2);
2793 EIP = new_eip;
2794 } else {
2795 /* jump to call or task gate */
2796 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2797 rpl = new_cs & 3;
2798 cpl = env->hflags & HF_CPL_MASK;
2799 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2800 switch(type) {
2801 case 1: /* 286 TSS */
2802 case 9: /* 386 TSS */
2803 case 5: /* task gate */
2804 if (dpl < cpl || dpl < rpl)
2805 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2806 next_eip = env->eip + next_eip_addend;
2807 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2808 CC_OP = CC_OP_EFLAGS;
2809 break;
2810 case 4: /* 286 call gate */
2811 case 12: /* 386 call gate */
2812 if ((dpl < cpl) || (dpl < rpl))
2813 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2814 if (!(e2 & DESC_P_MASK))
2815 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2816 gate_cs = e1 >> 16;
2817 new_eip = (e1 & 0xffff);
2818 if (type == 12)
2819 new_eip |= (e2 & 0xffff0000);
2820 if (load_segment(&e1, &e2, gate_cs) != 0)
2821 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2822 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2823 /* must be code segment */
2824 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2825 (DESC_S_MASK | DESC_CS_MASK)))
2826 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2827 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2828 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2829 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2830 if (!(e2 & DESC_P_MASK))
2831#ifdef VBOX /* See page 3-514 of 253666.pdf */
2832 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2833#else
2834 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2835#endif
2836 limit = get_seg_limit(e1, e2);
2837 if (new_eip > limit)
2838 raise_exception_err(EXCP0D_GPF, 0);
2839 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2840 get_seg_base(e1, e2), limit, e2);
2841 EIP = new_eip;
2842 break;
2843 default:
2844 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2845 break;
2846 }
2847 }
2848}
2849
2850/* real mode call */
2851void helper_lcall_real(int new_cs, target_ulong new_eip1,
2852 int shift, int next_eip)
2853{
2854 int new_eip;
2855 uint32_t esp, esp_mask;
2856 target_ulong ssp;
2857
2858 new_eip = new_eip1;
2859 esp = ESP;
2860 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2861 ssp = env->segs[R_SS].base;
2862 if (shift) {
2863 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2864 PUSHL(ssp, esp, esp_mask, next_eip);
2865 } else {
2866 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2867 PUSHW(ssp, esp, esp_mask, next_eip);
2868 }
2869
2870 SET_ESP(esp, esp_mask);
2871 env->eip = new_eip;
2872 env->segs[R_CS].selector = new_cs;
2873 env->segs[R_CS].base = (new_cs << 4);
2874}
2875
2876/* protected mode call */
2877void helper_lcall_protected(int new_cs, target_ulong new_eip,
2878 int shift, int next_eip_addend)
2879{
2880 int new_stack, i;
2881 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2882 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2883 uint32_t val, limit, old_sp_mask;
2884 target_ulong ssp, old_ssp, next_eip;
2885
2886#ifdef VBOX
2887 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2888#endif
2889 next_eip = env->eip + next_eip_addend;
2890#ifdef DEBUG_PCALL
2891 if (loglevel & CPU_LOG_PCALL) {
2892 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2893 new_cs, (uint32_t)new_eip, shift);
2894 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2895 }
2896#endif
2897 if ((new_cs & 0xfffc) == 0)
2898 raise_exception_err(EXCP0D_GPF, 0);
2899 if (load_segment(&e1, &e2, new_cs) != 0)
2900 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2901 cpl = env->hflags & HF_CPL_MASK;
2902#ifdef DEBUG_PCALL
2903 if (loglevel & CPU_LOG_PCALL) {
2904 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2905 }
2906#endif
2907 if (e2 & DESC_S_MASK) {
2908 if (!(e2 & DESC_CS_MASK))
2909 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2911 if (e2 & DESC_C_MASK) {
2912 /* conforming code segment */
2913 if (dpl > cpl)
2914 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2915 } else {
2916 /* non conforming code segment */
2917 rpl = new_cs & 3;
2918 if (rpl > cpl)
2919 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2920 if (dpl != cpl)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 }
2923 if (!(e2 & DESC_P_MASK))
2924 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2925
2926#ifdef TARGET_X86_64
2927 /* XXX: check 16/32 bit cases in long mode */
2928 if (shift == 2) {
2929 target_ulong rsp;
2930 /* 64 bit case */
2931 rsp = ESP;
2932 PUSHQ(rsp, env->segs[R_CS].selector);
2933 PUSHQ(rsp, next_eip);
2934 /* from this point, not restartable */
2935 ESP = rsp;
2936 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2937 get_seg_base(e1, e2),
2938 get_seg_limit(e1, e2), e2);
2939 EIP = new_eip;
2940 } else
2941#endif
2942 {
2943 sp = ESP;
2944 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2945 ssp = env->segs[R_SS].base;
2946 if (shift) {
2947 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2948 PUSHL(ssp, sp, sp_mask, next_eip);
2949 } else {
2950 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2951 PUSHW(ssp, sp, sp_mask, next_eip);
2952 }
2953
2954 limit = get_seg_limit(e1, e2);
2955 if (new_eip > limit)
2956 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2957 /* from this point, not restartable */
2958 SET_ESP(sp, sp_mask);
2959 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2960 get_seg_base(e1, e2), limit, e2);
2961 EIP = new_eip;
2962 }
2963 } else {
2964 /* check gate type */
2965 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2966 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2967 rpl = new_cs & 3;
2968 switch(type) {
2969 case 1: /* available 286 TSS */
2970 case 9: /* available 386 TSS */
2971 case 5: /* task gate */
2972 if (dpl < cpl || dpl < rpl)
2973 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2974 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2975 CC_OP = CC_OP_EFLAGS;
2976 return;
2977 case 4: /* 286 call gate */
2978 case 12: /* 386 call gate */
2979 break;
2980 default:
2981 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2982 break;
2983 }
2984 shift = type >> 3;
2985
2986 if (dpl < cpl || dpl < rpl)
2987 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2988 /* check valid bit */
2989 if (!(e2 & DESC_P_MASK))
2990 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2991 selector = e1 >> 16;
2992 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2993 param_count = e2 & 0x1f;
2994 if ((selector & 0xfffc) == 0)
2995 raise_exception_err(EXCP0D_GPF, 0);
2996
2997 if (load_segment(&e1, &e2, selector) != 0)
2998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2999 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3001 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3002 if (dpl > cpl)
3003 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3004 if (!(e2 & DESC_P_MASK))
3005 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3006
3007 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3008 /* to inner privilege */
3009 get_ss_esp_from_tss(&ss, &sp, dpl);
3010#ifdef DEBUG_PCALL
3011 if (loglevel & CPU_LOG_PCALL)
3012 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3013 ss, sp, param_count, ESP);
3014#endif
3015 if ((ss & 0xfffc) == 0)
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017 if ((ss & 3) != dpl)
3018 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3019 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3020 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3021 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3022 if (ss_dpl != dpl)
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if (!(ss_e2 & DESC_S_MASK) ||
3025 (ss_e2 & DESC_CS_MASK) ||
3026 !(ss_e2 & DESC_W_MASK))
3027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3028 if (!(ss_e2 & DESC_P_MASK))
3029#ifdef VBOX /* See page 3-99 of 253666.pdf */
3030 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3031#else
3032 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3033#endif
3034
3035 // push_size = ((param_count * 2) + 8) << shift;
3036
3037 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3038 old_ssp = env->segs[R_SS].base;
3039
3040 sp_mask = get_sp_mask(ss_e2);
3041 ssp = get_seg_base(ss_e1, ss_e2);
3042 if (shift) {
3043 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3044 PUSHL(ssp, sp, sp_mask, ESP);
3045 for(i = param_count - 1; i >= 0; i--) {
3046 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3047 PUSHL(ssp, sp, sp_mask, val);
3048 }
3049 } else {
3050 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3051 PUSHW(ssp, sp, sp_mask, ESP);
3052 for(i = param_count - 1; i >= 0; i--) {
3053 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3054 PUSHW(ssp, sp, sp_mask, val);
3055 }
3056 }
3057 new_stack = 1;
3058 } else {
3059 /* to same privilege */
3060 sp = ESP;
3061 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3062 ssp = env->segs[R_SS].base;
3063 // push_size = (4 << shift);
3064 new_stack = 0;
3065 }
3066
3067 if (shift) {
3068 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3069 PUSHL(ssp, sp, sp_mask, next_eip);
3070 } else {
3071 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3072 PUSHW(ssp, sp, sp_mask, next_eip);
3073 }
3074
3075 /* from this point, not restartable */
3076
3077 if (new_stack) {
3078 ss = (ss & ~3) | dpl;
3079 cpu_x86_load_seg_cache(env, R_SS, ss,
3080 ssp,
3081 get_seg_limit(ss_e1, ss_e2),
3082 ss_e2);
3083 }
3084
3085 selector = (selector & ~3) | dpl;
3086 cpu_x86_load_seg_cache(env, R_CS, selector,
3087 get_seg_base(e1, e2),
3088 get_seg_limit(e1, e2),
3089 e2);
3090 cpu_x86_set_cpl(env, dpl);
3091 SET_ESP(sp, sp_mask);
3092 EIP = offset;
3093 }
3094#ifdef USE_KQEMU
3095 if (kqemu_is_ok(env)) {
3096 env->exception_index = -1;
3097 cpu_loop_exit();
3098 }
3099#endif
3100}
3101
3102/* real and vm86 mode iret */
3103void helper_iret_real(int shift)
3104{
3105 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3106 target_ulong ssp;
3107 int eflags_mask;
3108#ifdef VBOX
3109 bool fVME = false;
3110
3111 remR3TrapClear(env->pVM);
3112#endif /* VBOX */
3113
3114 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3115 sp = ESP;
3116 ssp = env->segs[R_SS].base;
3117 if (shift == 1) {
3118 /* 32 bits */
3119 POPL(ssp, sp, sp_mask, new_eip);
3120 POPL(ssp, sp, sp_mask, new_cs);
3121 new_cs &= 0xffff;
3122 POPL(ssp, sp, sp_mask, new_eflags);
3123 } else {
3124 /* 16 bits */
3125 POPW(ssp, sp, sp_mask, new_eip);
3126 POPW(ssp, sp, sp_mask, new_cs);
3127 POPW(ssp, sp, sp_mask, new_eflags);
3128 }
3129#ifdef VBOX
3130 if ( (env->eflags & VM_MASK)
3131 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3132 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3133 {
3134 fVME = true;
3135 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3136 /* if TF will be set -> #GP */
3137 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3138 || (new_eflags & TF_MASK))
3139 raise_exception(EXCP0D_GPF);
3140 }
3141#endif /* VBOX */
3142 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3143 env->segs[R_CS].selector = new_cs;
3144 env->segs[R_CS].base = (new_cs << 4);
3145 env->eip = new_eip;
3146#ifdef VBOX
3147 if (fVME)
3148 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3149 else
3150#endif
3151 if (env->eflags & VM_MASK)
3152 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3153 else
3154 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3155 if (shift == 0)
3156 eflags_mask &= 0xffff;
3157 load_eflags(new_eflags, eflags_mask);
3158 env->hflags2 &= ~HF2_NMI_MASK;
3159#ifdef VBOX
3160 if (fVME)
3161 {
3162 if (new_eflags & IF_MASK)
3163 env->eflags |= VIF_MASK;
3164 else
3165 env->eflags &= ~VIF_MASK;
3166 }
3167#endif /* VBOX */
3168}
3169
3170#ifndef VBOX
3171static inline void validate_seg(int seg_reg, int cpl)
3172#else /* VBOX */
3173DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3174#endif /* VBOX */
3175{
3176 int dpl;
3177 uint32_t e2;
3178
3179 /* XXX: on x86_64, we do not want to nullify FS and GS because
3180 they may still contain a valid base. I would be interested to
3181 know how a real x86_64 CPU behaves */
3182 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3183 (env->segs[seg_reg].selector & 0xfffc) == 0)
3184 return;
3185
3186 e2 = env->segs[seg_reg].flags;
3187 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3188 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3189 /* data or non conforming code segment */
3190 if (dpl < cpl) {
3191 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3192 }
3193 }
3194}
3195
3196/* protected mode iret */
3197#ifndef VBOX
3198static inline void helper_ret_protected(int shift, int is_iret, int addend)
3199#else /* VBOX */
3200DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3201#endif /* VBOX */
3202{
3203 uint32_t new_cs, new_eflags, new_ss;
3204 uint32_t new_es, new_ds, new_fs, new_gs;
3205 uint32_t e1, e2, ss_e1, ss_e2;
3206 int cpl, dpl, rpl, eflags_mask, iopl;
3207 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3208
3209#ifdef VBOX
3210 ss_e1 = ss_e2 = e1 = e2 = 0;
3211#endif
3212
3213#ifdef TARGET_X86_64
3214 if (shift == 2)
3215 sp_mask = -1;
3216 else
3217#endif
3218 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3219 sp = ESP;
3220 ssp = env->segs[R_SS].base;
3221 new_eflags = 0; /* avoid warning */
3222#ifdef TARGET_X86_64
3223 if (shift == 2) {
3224 POPQ(sp, new_eip);
3225 POPQ(sp, new_cs);
3226 new_cs &= 0xffff;
3227 if (is_iret) {
3228 POPQ(sp, new_eflags);
3229 }
3230 } else
3231#endif
3232 if (shift == 1) {
3233 /* 32 bits */
3234 POPL(ssp, sp, sp_mask, new_eip);
3235 POPL(ssp, sp, sp_mask, new_cs);
3236 new_cs &= 0xffff;
3237 if (is_iret) {
3238 POPL(ssp, sp, sp_mask, new_eflags);
3239#if defined(VBOX) && defined(DEBUG)
3240 printf("iret: new CS %04X\n", new_cs);
3241 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3242 printf("iret: new EFLAGS %08X\n", new_eflags);
3243 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3244#endif
3245 if (new_eflags & VM_MASK)
3246 goto return_to_vm86;
3247 }
3248#ifdef VBOX
3249 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3250 {
3251#ifdef DEBUG
3252 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3253#endif
3254 new_cs = new_cs & 0xfffc;
3255 }
3256#endif
3257 } else {
3258 /* 16 bits */
3259 POPW(ssp, sp, sp_mask, new_eip);
3260 POPW(ssp, sp, sp_mask, new_cs);
3261 if (is_iret)
3262 POPW(ssp, sp, sp_mask, new_eflags);
3263 }
3264#ifdef DEBUG_PCALL
3265 if (loglevel & CPU_LOG_PCALL) {
3266 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3267 new_cs, new_eip, shift, addend);
3268 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3269 }
3270#endif
3271 if ((new_cs & 0xfffc) == 0)
3272 {
3273#if defined(VBOX) && defined(DEBUG)
3274 printf("new_cs & 0xfffc) == 0\n");
3275#endif
3276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3277 }
3278 if (load_segment(&e1, &e2, new_cs) != 0)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("load_segment failed\n");
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 if (!(e2 & DESC_S_MASK) ||
3286 !(e2 & DESC_CS_MASK))
3287 {
3288#if defined(VBOX) && defined(DEBUG)
3289 printf("e2 mask %08x\n", e2);
3290#endif
3291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3292 }
3293 cpl = env->hflags & HF_CPL_MASK;
3294 rpl = new_cs & 3;
3295 if (rpl < cpl)
3296 {
3297#if defined(VBOX) && defined(DEBUG)
3298 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3299#endif
3300 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3301 }
3302 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3303 if (e2 & DESC_C_MASK) {
3304 if (dpl > rpl)
3305 {
3306#if defined(VBOX) && defined(DEBUG)
3307 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3308#endif
3309 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3310 }
3311 } else {
3312 if (dpl != rpl)
3313 {
3314#if defined(VBOX) && defined(DEBUG)
3315 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3316#endif
3317 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3318 }
3319 }
3320 if (!(e2 & DESC_P_MASK))
3321 {
3322#if defined(VBOX) && defined(DEBUG)
3323 printf("DESC_P_MASK e2=%08x\n", e2);
3324#endif
3325 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3326 }
3327
3328 sp += addend;
3329 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3330 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3331 /* return to same privilege level */
3332 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3333 get_seg_base(e1, e2),
3334 get_seg_limit(e1, e2),
3335 e2);
3336 } else {
3337 /* return to different privilege level */
3338#ifdef TARGET_X86_64
3339 if (shift == 2) {
3340 POPQ(sp, new_esp);
3341 POPQ(sp, new_ss);
3342 new_ss &= 0xffff;
3343 } else
3344#endif
3345 if (shift == 1) {
3346 /* 32 bits */
3347 POPL(ssp, sp, sp_mask, new_esp);
3348 POPL(ssp, sp, sp_mask, new_ss);
3349 new_ss &= 0xffff;
3350 } else {
3351 /* 16 bits */
3352 POPW(ssp, sp, sp_mask, new_esp);
3353 POPW(ssp, sp, sp_mask, new_ss);
3354 }
3355#ifdef DEBUG_PCALL
3356 if (loglevel & CPU_LOG_PCALL) {
3357 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3358 new_ss, new_esp);
3359 }
3360#endif
3361 if ((new_ss & 0xfffc) == 0) {
3362#ifdef TARGET_X86_64
3363 /* NULL ss is allowed in long mode if cpl != 3*/
3364 /* XXX: test CS64 ? */
3365 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3366 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3367 0, 0xffffffff,
3368 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3369 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3370 DESC_W_MASK | DESC_A_MASK);
3371 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3372 } else
3373#endif
3374 {
3375 raise_exception_err(EXCP0D_GPF, 0);
3376 }
3377 } else {
3378 if ((new_ss & 3) != rpl)
3379 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3380 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3381 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3382 if (!(ss_e2 & DESC_S_MASK) ||
3383 (ss_e2 & DESC_CS_MASK) ||
3384 !(ss_e2 & DESC_W_MASK))
3385 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3386 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3387 if (dpl != rpl)
3388 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3389 if (!(ss_e2 & DESC_P_MASK))
3390 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3391 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3392 get_seg_base(ss_e1, ss_e2),
3393 get_seg_limit(ss_e1, ss_e2),
3394 ss_e2);
3395 }
3396
3397 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3398 get_seg_base(e1, e2),
3399 get_seg_limit(e1, e2),
3400 e2);
3401 cpu_x86_set_cpl(env, rpl);
3402 sp = new_esp;
3403#ifdef TARGET_X86_64
3404 if (env->hflags & HF_CS64_MASK)
3405 sp_mask = -1;
3406 else
3407#endif
3408 sp_mask = get_sp_mask(ss_e2);
3409
3410 /* validate data segments */
3411 validate_seg(R_ES, rpl);
3412 validate_seg(R_DS, rpl);
3413 validate_seg(R_FS, rpl);
3414 validate_seg(R_GS, rpl);
3415
3416 sp += addend;
3417 }
3418 SET_ESP(sp, sp_mask);
3419 env->eip = new_eip;
3420 if (is_iret) {
3421 /* NOTE: 'cpl' is the _old_ CPL */
3422 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3423 if (cpl == 0)
3424#ifdef VBOX
3425 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3426#else
3427 eflags_mask |= IOPL_MASK;
3428#endif
3429 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3430 if (cpl <= iopl)
3431 eflags_mask |= IF_MASK;
3432 if (shift == 0)
3433 eflags_mask &= 0xffff;
3434 load_eflags(new_eflags, eflags_mask);
3435 }
3436 return;
3437
3438 return_to_vm86:
3439 POPL(ssp, sp, sp_mask, new_esp);
3440 POPL(ssp, sp, sp_mask, new_ss);
3441 POPL(ssp, sp, sp_mask, new_es);
3442 POPL(ssp, sp, sp_mask, new_ds);
3443 POPL(ssp, sp, sp_mask, new_fs);
3444 POPL(ssp, sp, sp_mask, new_gs);
3445
3446 /* modify processor state */
3447 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3448 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3449 load_seg_vm(R_CS, new_cs & 0xffff);
3450 cpu_x86_set_cpl(env, 3);
3451 load_seg_vm(R_SS, new_ss & 0xffff);
3452 load_seg_vm(R_ES, new_es & 0xffff);
3453 load_seg_vm(R_DS, new_ds & 0xffff);
3454 load_seg_vm(R_FS, new_fs & 0xffff);
3455 load_seg_vm(R_GS, new_gs & 0xffff);
3456
3457 env->eip = new_eip & 0xffff;
3458 ESP = new_esp;
3459}
3460
3461void helper_iret_protected(int shift, int next_eip)
3462{
3463 int tss_selector, type;
3464 uint32_t e1, e2;
3465
3466#ifdef VBOX
3467 e1 = e2 = 0;
3468 remR3TrapClear(env->pVM);
3469#endif
3470
3471 /* specific case for TSS */
3472 if (env->eflags & NT_MASK) {
3473#ifdef TARGET_X86_64
3474 if (env->hflags & HF_LMA_MASK)
3475 raise_exception_err(EXCP0D_GPF, 0);
3476#endif
3477 tss_selector = lduw_kernel(env->tr.base + 0);
3478 if (tss_selector & 4)
3479 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3480 if (load_segment(&e1, &e2, tss_selector) != 0)
3481 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3482 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3483 /* NOTE: we check both segment and busy TSS */
3484 if (type != 3)
3485 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3486 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3487 } else {
3488 helper_ret_protected(shift, 1, 0);
3489 }
3490 env->hflags2 &= ~HF2_NMI_MASK;
3491#ifdef USE_KQEMU
3492 if (kqemu_is_ok(env)) {
3493 CC_OP = CC_OP_EFLAGS;
3494 env->exception_index = -1;
3495 cpu_loop_exit();
3496 }
3497#endif
3498}
3499
3500void helper_lret_protected(int shift, int addend)
3501{
3502 helper_ret_protected(shift, 0, addend);
3503#ifdef USE_KQEMU
3504 if (kqemu_is_ok(env)) {
3505 env->exception_index = -1;
3506 cpu_loop_exit();
3507 }
3508#endif
3509}
3510
3511void helper_sysenter(void)
3512{
3513 if (env->sysenter_cs == 0) {
3514 raise_exception_err(EXCP0D_GPF, 0);
3515 }
3516 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3517 cpu_x86_set_cpl(env, 0);
3518
3519#ifdef TARGET_X86_64
3520 if (env->hflags & HF_LMA_MASK) {
3521 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3522 0, 0xffffffff,
3523 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3524 DESC_S_MASK |
3525 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3526 } else
3527#endif
3528 {
3529 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3530 0, 0xffffffff,
3531 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3532 DESC_S_MASK |
3533 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3534 }
3535 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3536 0, 0xffffffff,
3537 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3538 DESC_S_MASK |
3539 DESC_W_MASK | DESC_A_MASK);
3540 ESP = env->sysenter_esp;
3541 EIP = env->sysenter_eip;
3542}
3543
3544void helper_sysexit(int dflag)
3545{
3546 int cpl;
3547
3548 cpl = env->hflags & HF_CPL_MASK;
3549 if (env->sysenter_cs == 0 || cpl != 0) {
3550 raise_exception_err(EXCP0D_GPF, 0);
3551 }
3552 cpu_x86_set_cpl(env, 3);
3553#ifdef TARGET_X86_64
3554 if (dflag == 2) {
3555 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3556 0, 0xffffffff,
3557 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3558 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3559 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3560 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3561 0, 0xffffffff,
3562 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3563 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3564 DESC_W_MASK | DESC_A_MASK);
3565 } else
3566#endif
3567 {
3568 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3569 0, 0xffffffff,
3570 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3571 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3572 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3573 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3574 0, 0xffffffff,
3575 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3576 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3577 DESC_W_MASK | DESC_A_MASK);
3578 }
3579 ESP = ECX;
3580 EIP = EDX;
3581#ifdef USE_KQEMU
3582 if (kqemu_is_ok(env)) {
3583 env->exception_index = -1;
3584 cpu_loop_exit();
3585 }
3586#endif
3587}
3588
3589#if defined(CONFIG_USER_ONLY)
3590target_ulong helper_read_crN(int reg)
3591{
3592 return 0;
3593}
3594
3595void helper_write_crN(int reg, target_ulong t0)
3596{
3597}
3598#else
3599target_ulong helper_read_crN(int reg)
3600{
3601 target_ulong val;
3602
3603 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3604 switch(reg) {
3605 default:
3606 val = env->cr[reg];
3607 break;
3608 case 8:
3609 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3610 val = cpu_get_apic_tpr(env);
3611 } else {
3612 val = env->v_tpr;
3613 }
3614 break;
3615 }
3616 return val;
3617}
3618
3619void helper_write_crN(int reg, target_ulong t0)
3620{
3621 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3622 switch(reg) {
3623 case 0:
3624 cpu_x86_update_cr0(env, t0);
3625 break;
3626 case 3:
3627 cpu_x86_update_cr3(env, t0);
3628 break;
3629 case 4:
3630 cpu_x86_update_cr4(env, t0);
3631 break;
3632 case 8:
3633 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3634 cpu_set_apic_tpr(env, t0);
3635 }
3636 env->v_tpr = t0 & 0x0f;
3637 break;
3638 default:
3639 env->cr[reg] = t0;
3640 break;
3641 }
3642}
3643#endif
3644
3645void helper_lmsw(target_ulong t0)
3646{
3647 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3648 if already set to one. */
3649 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3650 helper_write_crN(0, t0);
3651}
3652
3653void helper_clts(void)
3654{
3655 env->cr[0] &= ~CR0_TS_MASK;
3656 env->hflags &= ~HF_TS_MASK;
3657}
3658
3659/* XXX: do more */
3660void helper_movl_drN_T0(int reg, target_ulong t0)
3661{
3662 env->dr[reg] = t0;
3663}
3664
3665void helper_invlpg(target_ulong addr)
3666{
3667 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3668 tlb_flush_page(env, addr);
3669}
3670
3671void helper_rdtsc(void)
3672{
3673 uint64_t val;
3674
3675 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3676 raise_exception(EXCP0D_GPF);
3677 }
3678 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3679
3680 val = cpu_get_tsc(env) + env->tsc_offset;
3681 EAX = (uint32_t)(val);
3682 EDX = (uint32_t)(val >> 32);
3683}
3684
3685#ifdef VBOX
3686void helper_rdtscp(void)
3687{
3688 uint64_t val;
3689 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3690 raise_exception(EXCP0D_GPF);
3691 }
3692
3693 val = cpu_get_tsc(env);
3694 EAX = (uint32_t)(val);
3695 EDX = (uint32_t)(val >> 32);
3696 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3697}
3698#endif
3699
3700void helper_rdpmc(void)
3701{
3702#ifdef VBOX
3703 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3704 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3705 raise_exception(EXCP0D_GPF);
3706 }
3707 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3708 EAX = 0;
3709 EDX = 0;
3710#else
3711 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3712 raise_exception(EXCP0D_GPF);
3713 }
3714 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3715
3716 /* currently unimplemented */
3717 raise_exception_err(EXCP06_ILLOP, 0);
3718#endif
3719}
3720
3721#if defined(CONFIG_USER_ONLY)
3722void helper_wrmsr(void)
3723{
3724}
3725
3726void helper_rdmsr(void)
3727{
3728}
3729#else
3730void helper_wrmsr(void)
3731{
3732 uint64_t val;
3733
3734 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3735
3736 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3737
3738 switch((uint32_t)ECX) {
3739 case MSR_IA32_SYSENTER_CS:
3740 env->sysenter_cs = val & 0xffff;
3741 break;
3742 case MSR_IA32_SYSENTER_ESP:
3743 env->sysenter_esp = val;
3744 break;
3745 case MSR_IA32_SYSENTER_EIP:
3746 env->sysenter_eip = val;
3747 break;
3748 case MSR_IA32_APICBASE:
3749 cpu_set_apic_base(env, val);
3750 break;
3751 case MSR_EFER:
3752 {
3753 uint64_t update_mask;
3754 update_mask = 0;
3755 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3756 update_mask |= MSR_EFER_SCE;
3757 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3758 update_mask |= MSR_EFER_LME;
3759 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3760 update_mask |= MSR_EFER_FFXSR;
3761 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3762 update_mask |= MSR_EFER_NXE;
3763 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3764 update_mask |= MSR_EFER_SVME;
3765 cpu_load_efer(env, (env->efer & ~update_mask) |
3766 (val & update_mask));
3767 }
3768 break;
3769 case MSR_STAR:
3770 env->star = val;
3771 break;
3772 case MSR_PAT:
3773 env->pat = val;
3774 break;
3775 case MSR_VM_HSAVE_PA:
3776 env->vm_hsave = val;
3777 break;
3778#ifdef TARGET_X86_64
3779 case MSR_LSTAR:
3780 env->lstar = val;
3781 break;
3782 case MSR_CSTAR:
3783 env->cstar = val;
3784 break;
3785 case MSR_FMASK:
3786 env->fmask = val;
3787 break;
3788 case MSR_FSBASE:
3789 env->segs[R_FS].base = val;
3790 break;
3791 case MSR_GSBASE:
3792 env->segs[R_GS].base = val;
3793 break;
3794 case MSR_KERNELGSBASE:
3795 env->kernelgsbase = val;
3796 break;
3797#endif
3798 default:
3799#ifndef VBOX
3800 /* XXX: exception ? */
3801 break;
3802#else /* VBOX */
3803 {
3804 uint32_t ecx = (uint32_t)ECX;
3805 /* In X2APIC specification this range is reserved for APIC control. */
3806 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3807 cpu_apic_wrmsr(env, ecx, val);
3808 /** @todo else exception? */
3809 break;
3810 }
3811 case MSR_K8_TSC_AUX:
3812 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3813 break;
3814#endif /* VBOX */
3815 }
3816}
3817
3818void helper_rdmsr(void)
3819{
3820 uint64_t val;
3821
3822 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3823
3824 switch((uint32_t)ECX) {
3825 case MSR_IA32_SYSENTER_CS:
3826 val = env->sysenter_cs;
3827 break;
3828 case MSR_IA32_SYSENTER_ESP:
3829 val = env->sysenter_esp;
3830 break;
3831 case MSR_IA32_SYSENTER_EIP:
3832 val = env->sysenter_eip;
3833 break;
3834 case MSR_IA32_APICBASE:
3835 val = cpu_get_apic_base(env);
3836 break;
3837 case MSR_EFER:
3838 val = env->efer;
3839 break;
3840 case MSR_STAR:
3841 val = env->star;
3842 break;
3843 case MSR_PAT:
3844 val = env->pat;
3845 break;
3846 case MSR_VM_HSAVE_PA:
3847 val = env->vm_hsave;
3848 break;
3849 case MSR_IA32_PERF_STATUS:
3850 /* tsc_increment_by_tick */
3851 val = 1000ULL;
3852 /* CPU multiplier */
3853 val |= (((uint64_t)4ULL) << 40);
3854 break;
3855#ifdef TARGET_X86_64
3856 case MSR_LSTAR:
3857 val = env->lstar;
3858 break;
3859 case MSR_CSTAR:
3860 val = env->cstar;
3861 break;
3862 case MSR_FMASK:
3863 val = env->fmask;
3864 break;
3865 case MSR_FSBASE:
3866 val = env->segs[R_FS].base;
3867 break;
3868 case MSR_GSBASE:
3869 val = env->segs[R_GS].base;
3870 break;
3871 case MSR_KERNELGSBASE:
3872 val = env->kernelgsbase;
3873 break;
3874#endif
3875#ifdef USE_KQEMU
3876 case MSR_QPI_COMMBASE:
3877 if (env->kqemu_enabled) {
3878 val = kqemu_comm_base;
3879 } else {
3880 val = 0;
3881 }
3882 break;
3883#endif
3884 default:
3885#ifndef VBOX
3886 /* XXX: exception ? */
3887 val = 0;
3888 break;
3889#else /* VBOX */
3890 {
3891 uint32_t ecx = (uint32_t)ECX;
3892 /* In X2APIC specification this range is reserved for APIC control. */
3893 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3894 val = cpu_apic_rdmsr(env, ecx);
3895 else
3896 val = 0; /** @todo else exception? */
3897 break;
3898 }
3899 case MSR_IA32_TSC:
3900 case MSR_K8_TSC_AUX:
3901 val = cpu_rdmsr(env, (uint32_t)ECX);
3902 break;
3903#endif /* VBOX */
3904 }
3905 EAX = (uint32_t)(val);
3906 EDX = (uint32_t)(val >> 32);
3907}
3908#endif
3909
3910target_ulong helper_lsl(target_ulong selector1)
3911{
3912 unsigned int limit;
3913 uint32_t e1, e2, eflags, selector;
3914 int rpl, dpl, cpl, type;
3915
3916 selector = selector1 & 0xffff;
3917 eflags = cc_table[CC_OP].compute_all();
3918 if (load_segment(&e1, &e2, selector) != 0)
3919 goto fail;
3920 rpl = selector & 3;
3921 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3922 cpl = env->hflags & HF_CPL_MASK;
3923 if (e2 & DESC_S_MASK) {
3924 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3925 /* conforming */
3926 } else {
3927 if (dpl < cpl || dpl < rpl)
3928 goto fail;
3929 }
3930 } else {
3931 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3932 switch(type) {
3933 case 1:
3934 case 2:
3935 case 3:
3936 case 9:
3937 case 11:
3938 break;
3939 default:
3940 goto fail;
3941 }
3942 if (dpl < cpl || dpl < rpl) {
3943 fail:
3944 CC_SRC = eflags & ~CC_Z;
3945 return 0;
3946 }
3947 }
3948 limit = get_seg_limit(e1, e2);
3949 CC_SRC = eflags | CC_Z;
3950 return limit;
3951}
3952
3953target_ulong helper_lar(target_ulong selector1)
3954{
3955 uint32_t e1, e2, eflags, selector;
3956 int rpl, dpl, cpl, type;
3957
3958 selector = selector1 & 0xffff;
3959 eflags = cc_table[CC_OP].compute_all();
3960 if ((selector & 0xfffc) == 0)
3961 goto fail;
3962 if (load_segment(&e1, &e2, selector) != 0)
3963 goto fail;
3964 rpl = selector & 3;
3965 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3966 cpl = env->hflags & HF_CPL_MASK;
3967 if (e2 & DESC_S_MASK) {
3968 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3969 /* conforming */
3970 } else {
3971 if (dpl < cpl || dpl < rpl)
3972 goto fail;
3973 }
3974 } else {
3975 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3976 switch(type) {
3977 case 1:
3978 case 2:
3979 case 3:
3980 case 4:
3981 case 5:
3982 case 9:
3983 case 11:
3984 case 12:
3985 break;
3986 default:
3987 goto fail;
3988 }
3989 if (dpl < cpl || dpl < rpl) {
3990 fail:
3991 CC_SRC = eflags & ~CC_Z;
3992 return 0;
3993 }
3994 }
3995 CC_SRC = eflags | CC_Z;
3996 return e2 & 0x00f0ff00;
3997}
3998
3999void helper_verr(target_ulong selector1)
4000{
4001 uint32_t e1, e2, eflags, selector;
4002 int rpl, dpl, cpl;
4003
4004 selector = selector1 & 0xffff;
4005 eflags = cc_table[CC_OP].compute_all();
4006 if ((selector & 0xfffc) == 0)
4007 goto fail;
4008 if (load_segment(&e1, &e2, selector) != 0)
4009 goto fail;
4010 if (!(e2 & DESC_S_MASK))
4011 goto fail;
4012 rpl = selector & 3;
4013 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4014 cpl = env->hflags & HF_CPL_MASK;
4015 if (e2 & DESC_CS_MASK) {
4016 if (!(e2 & DESC_R_MASK))
4017 goto fail;
4018 if (!(e2 & DESC_C_MASK)) {
4019 if (dpl < cpl || dpl < rpl)
4020 goto fail;
4021 }
4022 } else {
4023 if (dpl < cpl || dpl < rpl) {
4024 fail:
4025 CC_SRC = eflags & ~CC_Z;
4026 return;
4027 }
4028 }
4029 CC_SRC = eflags | CC_Z;
4030}
4031
4032void helper_verw(target_ulong selector1)
4033{
4034 uint32_t e1, e2, eflags, selector;
4035 int rpl, dpl, cpl;
4036
4037 selector = selector1 & 0xffff;
4038 eflags = cc_table[CC_OP].compute_all();
4039 if ((selector & 0xfffc) == 0)
4040 goto fail;
4041 if (load_segment(&e1, &e2, selector) != 0)
4042 goto fail;
4043 if (!(e2 & DESC_S_MASK))
4044 goto fail;
4045 rpl = selector & 3;
4046 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4047 cpl = env->hflags & HF_CPL_MASK;
4048 if (e2 & DESC_CS_MASK) {
4049 goto fail;
4050 } else {
4051 if (dpl < cpl || dpl < rpl)
4052 goto fail;
4053 if (!(e2 & DESC_W_MASK)) {
4054 fail:
4055 CC_SRC = eflags & ~CC_Z;
4056 return;
4057 }
4058 }
4059 CC_SRC = eflags | CC_Z;
4060}
4061
4062/* x87 FPU helpers */
4063
4064static void fpu_set_exception(int mask)
4065{
4066 env->fpus |= mask;
4067 if (env->fpus & (~env->fpuc & FPUC_EM))
4068 env->fpus |= FPUS_SE | FPUS_B;
4069}
4070
4071#ifndef VBOX
4072static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4073#else /* VBOX */
4074DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4075#endif /* VBOX */
4076{
4077 if (b == 0.0)
4078 fpu_set_exception(FPUS_ZE);
4079 return a / b;
4080}
4081
4082void fpu_raise_exception(void)
4083{
4084 if (env->cr[0] & CR0_NE_MASK) {
4085 raise_exception(EXCP10_COPR);
4086 }
4087#if !defined(CONFIG_USER_ONLY)
4088 else {
4089 cpu_set_ferr(env);
4090 }
4091#endif
4092}
4093
4094void helper_flds_FT0(uint32_t val)
4095{
4096 union {
4097 float32 f;
4098 uint32_t i;
4099 } u;
4100 u.i = val;
4101 FT0 = float32_to_floatx(u.f, &env->fp_status);
4102}
4103
4104void helper_fldl_FT0(uint64_t val)
4105{
4106 union {
4107 float64 f;
4108 uint64_t i;
4109 } u;
4110 u.i = val;
4111 FT0 = float64_to_floatx(u.f, &env->fp_status);
4112}
4113
4114void helper_fildl_FT0(int32_t val)
4115{
4116 FT0 = int32_to_floatx(val, &env->fp_status);
4117}
4118
4119void helper_flds_ST0(uint32_t val)
4120{
4121 int new_fpstt;
4122 union {
4123 float32 f;
4124 uint32_t i;
4125 } u;
4126 new_fpstt = (env->fpstt - 1) & 7;
4127 u.i = val;
4128 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4129 env->fpstt = new_fpstt;
4130 env->fptags[new_fpstt] = 0; /* validate stack entry */
4131}
4132
4133void helper_fldl_ST0(uint64_t val)
4134{
4135 int new_fpstt;
4136 union {
4137 float64 f;
4138 uint64_t i;
4139 } u;
4140 new_fpstt = (env->fpstt - 1) & 7;
4141 u.i = val;
4142 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4143 env->fpstt = new_fpstt;
4144 env->fptags[new_fpstt] = 0; /* validate stack entry */
4145}
4146
4147void helper_fildl_ST0(int32_t val)
4148{
4149 int new_fpstt;
4150 new_fpstt = (env->fpstt - 1) & 7;
4151 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4152 env->fpstt = new_fpstt;
4153 env->fptags[new_fpstt] = 0; /* validate stack entry */
4154}
4155
4156void helper_fildll_ST0(int64_t val)
4157{
4158 int new_fpstt;
4159 new_fpstt = (env->fpstt - 1) & 7;
4160 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4161 env->fpstt = new_fpstt;
4162 env->fptags[new_fpstt] = 0; /* validate stack entry */
4163}
4164
4165#ifndef VBOX
4166uint32_t helper_fsts_ST0(void)
4167#else
4168RTCCUINTREG helper_fsts_ST0(void)
4169#endif
4170{
4171 union {
4172 float32 f;
4173 uint32_t i;
4174 } u;
4175 u.f = floatx_to_float32(ST0, &env->fp_status);
4176 return u.i;
4177}
4178
4179uint64_t helper_fstl_ST0(void)
4180{
4181 union {
4182 float64 f;
4183 uint64_t i;
4184 } u;
4185 u.f = floatx_to_float64(ST0, &env->fp_status);
4186 return u.i;
4187}
4188#ifndef VBOX
4189int32_t helper_fist_ST0(void)
4190#else
4191RTCCINTREG helper_fist_ST0(void)
4192#endif
4193{
4194 int32_t val;
4195 val = floatx_to_int32(ST0, &env->fp_status);
4196 if (val != (int16_t)val)
4197 val = -32768;
4198 return val;
4199}
4200
4201#ifndef VBOX
4202int32_t helper_fistl_ST0(void)
4203#else
4204RTCCINTREG helper_fistl_ST0(void)
4205#endif
4206{
4207 int32_t val;
4208 val = floatx_to_int32(ST0, &env->fp_status);
4209 return val;
4210}
4211
4212int64_t helper_fistll_ST0(void)
4213{
4214 int64_t val;
4215 val = floatx_to_int64(ST0, &env->fp_status);
4216 return val;
4217}
4218
4219#ifndef VBOX
4220int32_t helper_fistt_ST0(void)
4221#else
4222RTCCINTREG helper_fistt_ST0(void)
4223#endif
4224{
4225 int32_t val;
4226 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4227 if (val != (int16_t)val)
4228 val = -32768;
4229 return val;
4230}
4231
4232#ifndef VBOX
4233int32_t helper_fisttl_ST0(void)
4234#else
4235RTCCINTREG helper_fisttl_ST0(void)
4236#endif
4237{
4238 int32_t val;
4239 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4240 return val;
4241}
4242
4243int64_t helper_fisttll_ST0(void)
4244{
4245 int64_t val;
4246 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4247 return val;
4248}
4249
4250void helper_fldt_ST0(target_ulong ptr)
4251{
4252 int new_fpstt;
4253 new_fpstt = (env->fpstt - 1) & 7;
4254 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4255 env->fpstt = new_fpstt;
4256 env->fptags[new_fpstt] = 0; /* validate stack entry */
4257}
4258
4259void helper_fstt_ST0(target_ulong ptr)
4260{
4261 helper_fstt(ST0, ptr);
4262}
4263
4264void helper_fpush(void)
4265{
4266 fpush();
4267}
4268
4269void helper_fpop(void)
4270{
4271 fpop();
4272}
4273
4274void helper_fdecstp(void)
4275{
4276 env->fpstt = (env->fpstt - 1) & 7;
4277 env->fpus &= (~0x4700);
4278}
4279
4280void helper_fincstp(void)
4281{
4282 env->fpstt = (env->fpstt + 1) & 7;
4283 env->fpus &= (~0x4700);
4284}
4285
4286/* FPU move */
4287
4288void helper_ffree_STN(int st_index)
4289{
4290 env->fptags[(env->fpstt + st_index) & 7] = 1;
4291}
4292
4293void helper_fmov_ST0_FT0(void)
4294{
4295 ST0 = FT0;
4296}
4297
4298void helper_fmov_FT0_STN(int st_index)
4299{
4300 FT0 = ST(st_index);
4301}
4302
4303void helper_fmov_ST0_STN(int st_index)
4304{
4305 ST0 = ST(st_index);
4306}
4307
4308void helper_fmov_STN_ST0(int st_index)
4309{
4310 ST(st_index) = ST0;
4311}
4312
4313void helper_fxchg_ST0_STN(int st_index)
4314{
4315 CPU86_LDouble tmp;
4316 tmp = ST(st_index);
4317 ST(st_index) = ST0;
4318 ST0 = tmp;
4319}
4320
4321/* FPU operations */
4322
4323static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4324
4325void helper_fcom_ST0_FT0(void)
4326{
4327 int ret;
4328
4329 ret = floatx_compare(ST0, FT0, &env->fp_status);
4330 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4331 FORCE_RET();
4332}
4333
4334void helper_fucom_ST0_FT0(void)
4335{
4336 int ret;
4337
4338 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4339 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4340 FORCE_RET();
4341}
4342
4343static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4344
4345void helper_fcomi_ST0_FT0(void)
4346{
4347 int eflags;
4348 int ret;
4349
4350 ret = floatx_compare(ST0, FT0, &env->fp_status);
4351 eflags = cc_table[CC_OP].compute_all();
4352 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4353 CC_SRC = eflags;
4354 FORCE_RET();
4355}
4356
4357void helper_fucomi_ST0_FT0(void)
4358{
4359 int eflags;
4360 int ret;
4361
4362 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4363 eflags = cc_table[CC_OP].compute_all();
4364 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4365 CC_SRC = eflags;
4366 FORCE_RET();
4367}
4368
4369void helper_fadd_ST0_FT0(void)
4370{
4371 ST0 += FT0;
4372}
4373
4374void helper_fmul_ST0_FT0(void)
4375{
4376 ST0 *= FT0;
4377}
4378
4379void helper_fsub_ST0_FT0(void)
4380{
4381 ST0 -= FT0;
4382}
4383
4384void helper_fsubr_ST0_FT0(void)
4385{
4386 ST0 = FT0 - ST0;
4387}
4388
4389void helper_fdiv_ST0_FT0(void)
4390{
4391 ST0 = helper_fdiv(ST0, FT0);
4392}
4393
4394void helper_fdivr_ST0_FT0(void)
4395{
4396 ST0 = helper_fdiv(FT0, ST0);
4397}
4398
4399/* fp operations between STN and ST0 */
4400
4401void helper_fadd_STN_ST0(int st_index)
4402{
4403 ST(st_index) += ST0;
4404}
4405
4406void helper_fmul_STN_ST0(int st_index)
4407{
4408 ST(st_index) *= ST0;
4409}
4410
4411void helper_fsub_STN_ST0(int st_index)
4412{
4413 ST(st_index) -= ST0;
4414}
4415
4416void helper_fsubr_STN_ST0(int st_index)
4417{
4418 CPU86_LDouble *p;
4419 p = &ST(st_index);
4420 *p = ST0 - *p;
4421}
4422
4423void helper_fdiv_STN_ST0(int st_index)
4424{
4425 CPU86_LDouble *p;
4426 p = &ST(st_index);
4427 *p = helper_fdiv(*p, ST0);
4428}
4429
4430void helper_fdivr_STN_ST0(int st_index)
4431{
4432 CPU86_LDouble *p;
4433 p = &ST(st_index);
4434 *p = helper_fdiv(ST0, *p);
4435}
4436
4437/* misc FPU operations */
4438void helper_fchs_ST0(void)
4439{
4440 ST0 = floatx_chs(ST0);
4441}
4442
4443void helper_fabs_ST0(void)
4444{
4445 ST0 = floatx_abs(ST0);
4446}
4447
4448void helper_fld1_ST0(void)
4449{
4450 ST0 = f15rk[1];
4451}
4452
4453void helper_fldl2t_ST0(void)
4454{
4455 ST0 = f15rk[6];
4456}
4457
4458void helper_fldl2e_ST0(void)
4459{
4460 ST0 = f15rk[5];
4461}
4462
4463void helper_fldpi_ST0(void)
4464{
4465 ST0 = f15rk[2];
4466}
4467
4468void helper_fldlg2_ST0(void)
4469{
4470 ST0 = f15rk[3];
4471}
4472
4473void helper_fldln2_ST0(void)
4474{
4475 ST0 = f15rk[4];
4476}
4477
4478void helper_fldz_ST0(void)
4479{
4480 ST0 = f15rk[0];
4481}
4482
4483void helper_fldz_FT0(void)
4484{
4485 FT0 = f15rk[0];
4486}
4487
4488#ifndef VBOX
4489uint32_t helper_fnstsw(void)
4490#else
4491RTCCUINTREG helper_fnstsw(void)
4492#endif
4493{
4494 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4495}
4496
4497#ifndef VBOX
4498uint32_t helper_fnstcw(void)
4499#else
4500RTCCUINTREG helper_fnstcw(void)
4501#endif
4502{
4503 return env->fpuc;
4504}
4505
4506static void update_fp_status(void)
4507{
4508 int rnd_type;
4509
4510 /* set rounding mode */
4511 switch(env->fpuc & RC_MASK) {
4512 default:
4513 case RC_NEAR:
4514 rnd_type = float_round_nearest_even;
4515 break;
4516 case RC_DOWN:
4517 rnd_type = float_round_down;
4518 break;
4519 case RC_UP:
4520 rnd_type = float_round_up;
4521 break;
4522 case RC_CHOP:
4523 rnd_type = float_round_to_zero;
4524 break;
4525 }
4526 set_float_rounding_mode(rnd_type, &env->fp_status);
4527#ifdef FLOATX80
4528 switch((env->fpuc >> 8) & 3) {
4529 case 0:
4530 rnd_type = 32;
4531 break;
4532 case 2:
4533 rnd_type = 64;
4534 break;
4535 case 3:
4536 default:
4537 rnd_type = 80;
4538 break;
4539 }
4540 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4541#endif
4542}
4543
4544void helper_fldcw(uint32_t val)
4545{
4546 env->fpuc = val;
4547 update_fp_status();
4548}
4549
4550void helper_fclex(void)
4551{
4552 env->fpus &= 0x7f00;
4553}
4554
4555void helper_fwait(void)
4556{
4557 if (env->fpus & FPUS_SE)
4558 fpu_raise_exception();
4559 FORCE_RET();
4560}
4561
4562void helper_fninit(void)
4563{
4564 env->fpus = 0;
4565 env->fpstt = 0;
4566 env->fpuc = 0x37f;
4567 env->fptags[0] = 1;
4568 env->fptags[1] = 1;
4569 env->fptags[2] = 1;
4570 env->fptags[3] = 1;
4571 env->fptags[4] = 1;
4572 env->fptags[5] = 1;
4573 env->fptags[6] = 1;
4574 env->fptags[7] = 1;
4575}
4576
4577/* BCD ops */
4578
4579void helper_fbld_ST0(target_ulong ptr)
4580{
4581 CPU86_LDouble tmp;
4582 uint64_t val;
4583 unsigned int v;
4584 int i;
4585
4586 val = 0;
4587 for(i = 8; i >= 0; i--) {
4588 v = ldub(ptr + i);
4589 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4590 }
4591 tmp = val;
4592 if (ldub(ptr + 9) & 0x80)
4593 tmp = -tmp;
4594 fpush();
4595 ST0 = tmp;
4596}
4597
4598void helper_fbst_ST0(target_ulong ptr)
4599{
4600 int v;
4601 target_ulong mem_ref, mem_end;
4602 int64_t val;
4603
4604 val = floatx_to_int64(ST0, &env->fp_status);
4605 mem_ref = ptr;
4606 mem_end = mem_ref + 9;
4607 if (val < 0) {
4608 stb(mem_end, 0x80);
4609 val = -val;
4610 } else {
4611 stb(mem_end, 0x00);
4612 }
4613 while (mem_ref < mem_end) {
4614 if (val == 0)
4615 break;
4616 v = val % 100;
4617 val = val / 100;
4618 v = ((v / 10) << 4) | (v % 10);
4619 stb(mem_ref++, v);
4620 }
4621 while (mem_ref < mem_end) {
4622 stb(mem_ref++, 0);
4623 }
4624}
4625
4626void helper_f2xm1(void)
4627{
4628 ST0 = pow(2.0,ST0) - 1.0;
4629}
4630
4631void helper_fyl2x(void)
4632{
4633 CPU86_LDouble fptemp;
4634
4635 fptemp = ST0;
4636 if (fptemp>0.0){
4637 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4638 ST1 *= fptemp;
4639 fpop();
4640 } else {
4641 env->fpus &= (~0x4700);
4642 env->fpus |= 0x400;
4643 }
4644}
4645
4646void helper_fptan(void)
4647{
4648 CPU86_LDouble fptemp;
4649
4650 fptemp = ST0;
4651 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4652 env->fpus |= 0x400;
4653 } else {
4654 ST0 = tan(fptemp);
4655 fpush();
4656 ST0 = 1.0;
4657 env->fpus &= (~0x400); /* C2 <-- 0 */
4658 /* the above code is for |arg| < 2**52 only */
4659 }
4660}
4661
4662void helper_fpatan(void)
4663{
4664 CPU86_LDouble fptemp, fpsrcop;
4665
4666 fpsrcop = ST1;
4667 fptemp = ST0;
4668 ST1 = atan2(fpsrcop,fptemp);
4669 fpop();
4670}
4671
4672void helper_fxtract(void)
4673{
4674 CPU86_LDoubleU temp;
4675 unsigned int expdif;
4676
4677 temp.d = ST0;
4678 expdif = EXPD(temp) - EXPBIAS;
4679 /*DP exponent bias*/
4680 ST0 = expdif;
4681 fpush();
4682 BIASEXPONENT(temp);
4683 ST0 = temp.d;
4684}
4685
4686#ifdef VBOX
4687#ifdef _MSC_VER
4688/* MSC cannot divide by zero */
4689extern double _Nan;
4690#define NaN _Nan
4691#else
4692#define NaN (0.0 / 0.0)
4693#endif
4694#endif /* VBOX */
4695
4696void helper_fprem1(void)
4697{
4698 CPU86_LDouble dblq, fpsrcop, fptemp;
4699 CPU86_LDoubleU fpsrcop1, fptemp1;
4700 int expdif;
4701 signed long long int q;
4702
4703#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4704 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4705#else
4706 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4707#endif
4708 ST0 = 0.0 / 0.0; /* NaN */
4709 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4710 return;
4711 }
4712
4713 fpsrcop = ST0;
4714 fptemp = ST1;
4715 fpsrcop1.d = fpsrcop;
4716 fptemp1.d = fptemp;
4717 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4718
4719 if (expdif < 0) {
4720 /* optimisation? taken from the AMD docs */
4721 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4722 /* ST0 is unchanged */
4723 return;
4724 }
4725
4726 if (expdif < 53) {
4727 dblq = fpsrcop / fptemp;
4728 /* round dblq towards nearest integer */
4729 dblq = rint(dblq);
4730 ST0 = fpsrcop - fptemp * dblq;
4731
4732 /* convert dblq to q by truncating towards zero */
4733 if (dblq < 0.0)
4734 q = (signed long long int)(-dblq);
4735 else
4736 q = (signed long long int)dblq;
4737
4738 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4739 /* (C0,C3,C1) <-- (q2,q1,q0) */
4740 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4741 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4742 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4743 } else {
4744 env->fpus |= 0x400; /* C2 <-- 1 */
4745 fptemp = pow(2.0, expdif - 50);
4746 fpsrcop = (ST0 / ST1) / fptemp;
4747 /* fpsrcop = integer obtained by chopping */
4748 fpsrcop = (fpsrcop < 0.0) ?
4749 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4750 ST0 -= (ST1 * fpsrcop * fptemp);
4751 }
4752}
4753
4754void helper_fprem(void)
4755{
4756 CPU86_LDouble dblq, fpsrcop, fptemp;
4757 CPU86_LDoubleU fpsrcop1, fptemp1;
4758 int expdif;
4759 signed long long int q;
4760
4761#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4762 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4763#else
4764 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4765#endif
4766 ST0 = 0.0 / 0.0; /* NaN */
4767 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4768 return;
4769 }
4770
4771 fpsrcop = (CPU86_LDouble)ST0;
4772 fptemp = (CPU86_LDouble)ST1;
4773 fpsrcop1.d = fpsrcop;
4774 fptemp1.d = fptemp;
4775 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4776
4777 if (expdif < 0) {
4778 /* optimisation? taken from the AMD docs */
4779 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4780 /* ST0 is unchanged */
4781 return;
4782 }
4783
4784 if ( expdif < 53 ) {
4785 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4786 /* round dblq towards zero */
4787 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4788 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4789
4790 /* convert dblq to q by truncating towards zero */
4791 if (dblq < 0.0)
4792 q = (signed long long int)(-dblq);
4793 else
4794 q = (signed long long int)dblq;
4795
4796 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4797 /* (C0,C3,C1) <-- (q2,q1,q0) */
4798 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4799 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4800 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4801 } else {
4802 int N = 32 + (expdif % 32); /* as per AMD docs */
4803 env->fpus |= 0x400; /* C2 <-- 1 */
4804 fptemp = pow(2.0, (double)(expdif - N));
4805 fpsrcop = (ST0 / ST1) / fptemp;
4806 /* fpsrcop = integer obtained by chopping */
4807 fpsrcop = (fpsrcop < 0.0) ?
4808 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4809 ST0 -= (ST1 * fpsrcop * fptemp);
4810 }
4811}
4812
4813void helper_fyl2xp1(void)
4814{
4815 CPU86_LDouble fptemp;
4816
4817 fptemp = ST0;
4818 if ((fptemp+1.0)>0.0) {
4819 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4820 ST1 *= fptemp;
4821 fpop();
4822 } else {
4823 env->fpus &= (~0x4700);
4824 env->fpus |= 0x400;
4825 }
4826}
4827
4828void helper_fsqrt(void)
4829{
4830 CPU86_LDouble fptemp;
4831
4832 fptemp = ST0;
4833 if (fptemp<0.0) {
4834 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4835 env->fpus |= 0x400;
4836 }
4837 ST0 = sqrt(fptemp);
4838}
4839
4840void helper_fsincos(void)
4841{
4842 CPU86_LDouble fptemp;
4843
4844 fptemp = ST0;
4845 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4846 env->fpus |= 0x400;
4847 } else {
4848 ST0 = sin(fptemp);
4849 fpush();
4850 ST0 = cos(fptemp);
4851 env->fpus &= (~0x400); /* C2 <-- 0 */
4852 /* the above code is for |arg| < 2**63 only */
4853 }
4854}
4855
4856void helper_frndint(void)
4857{
4858 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4859}
4860
4861void helper_fscale(void)
4862{
4863 ST0 = ldexp (ST0, (int)(ST1));
4864}
4865
4866void helper_fsin(void)
4867{
4868 CPU86_LDouble fptemp;
4869
4870 fptemp = ST0;
4871 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4872 env->fpus |= 0x400;
4873 } else {
4874 ST0 = sin(fptemp);
4875 env->fpus &= (~0x400); /* C2 <-- 0 */
4876 /* the above code is for |arg| < 2**53 only */
4877 }
4878}
4879
4880void helper_fcos(void)
4881{
4882 CPU86_LDouble fptemp;
4883
4884 fptemp = ST0;
4885 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4886 env->fpus |= 0x400;
4887 } else {
4888 ST0 = cos(fptemp);
4889 env->fpus &= (~0x400); /* C2 <-- 0 */
4890 /* the above code is for |arg5 < 2**63 only */
4891 }
4892}
4893
4894void helper_fxam_ST0(void)
4895{
4896 CPU86_LDoubleU temp;
4897 int expdif;
4898
4899 temp.d = ST0;
4900
4901 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4902 if (SIGND(temp))
4903 env->fpus |= 0x200; /* C1 <-- 1 */
4904
4905 /* XXX: test fptags too */
4906 expdif = EXPD(temp);
4907 if (expdif == MAXEXPD) {
4908#ifdef USE_X86LDOUBLE
4909 if (MANTD(temp) == 0x8000000000000000ULL)
4910#else
4911 if (MANTD(temp) == 0)
4912#endif
4913 env->fpus |= 0x500 /*Infinity*/;
4914 else
4915 env->fpus |= 0x100 /*NaN*/;
4916 } else if (expdif == 0) {
4917 if (MANTD(temp) == 0)
4918 env->fpus |= 0x4000 /*Zero*/;
4919 else
4920 env->fpus |= 0x4400 /*Denormal*/;
4921 } else {
4922 env->fpus |= 0x400;
4923 }
4924}
4925
4926void helper_fstenv(target_ulong ptr, int data32)
4927{
4928 int fpus, fptag, exp, i;
4929 uint64_t mant;
4930 CPU86_LDoubleU tmp;
4931
4932 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4933 fptag = 0;
4934 for (i=7; i>=0; i--) {
4935 fptag <<= 2;
4936 if (env->fptags[i]) {
4937 fptag |= 3;
4938 } else {
4939 tmp.d = env->fpregs[i].d;
4940 exp = EXPD(tmp);
4941 mant = MANTD(tmp);
4942 if (exp == 0 && mant == 0) {
4943 /* zero */
4944 fptag |= 1;
4945 } else if (exp == 0 || exp == MAXEXPD
4946#ifdef USE_X86LDOUBLE
4947 || (mant & (1LL << 63)) == 0
4948#endif
4949 ) {
4950 /* NaNs, infinity, denormal */
4951 fptag |= 2;
4952 }
4953 }
4954 }
4955 if (data32) {
4956 /* 32 bit */
4957 stl(ptr, env->fpuc);
4958 stl(ptr + 4, fpus);
4959 stl(ptr + 8, fptag);
4960 stl(ptr + 12, 0); /* fpip */
4961 stl(ptr + 16, 0); /* fpcs */
4962 stl(ptr + 20, 0); /* fpoo */
4963 stl(ptr + 24, 0); /* fpos */
4964 } else {
4965 /* 16 bit */
4966 stw(ptr, env->fpuc);
4967 stw(ptr + 2, fpus);
4968 stw(ptr + 4, fptag);
4969 stw(ptr + 6, 0);
4970 stw(ptr + 8, 0);
4971 stw(ptr + 10, 0);
4972 stw(ptr + 12, 0);
4973 }
4974}
4975
4976void helper_fldenv(target_ulong ptr, int data32)
4977{
4978 int i, fpus, fptag;
4979
4980 if (data32) {
4981 env->fpuc = lduw(ptr);
4982 fpus = lduw(ptr + 4);
4983 fptag = lduw(ptr + 8);
4984 }
4985 else {
4986 env->fpuc = lduw(ptr);
4987 fpus = lduw(ptr + 2);
4988 fptag = lduw(ptr + 4);
4989 }
4990 env->fpstt = (fpus >> 11) & 7;
4991 env->fpus = fpus & ~0x3800;
4992 for(i = 0;i < 8; i++) {
4993 env->fptags[i] = ((fptag & 3) == 3);
4994 fptag >>= 2;
4995 }
4996}
4997
4998void helper_fsave(target_ulong ptr, int data32)
4999{
5000 CPU86_LDouble tmp;
5001 int i;
5002
5003 helper_fstenv(ptr, data32);
5004
5005 ptr += (14 << data32);
5006 for(i = 0;i < 8; i++) {
5007 tmp = ST(i);
5008 helper_fstt(tmp, ptr);
5009 ptr += 10;
5010 }
5011
5012 /* fninit */
5013 env->fpus = 0;
5014 env->fpstt = 0;
5015 env->fpuc = 0x37f;
5016 env->fptags[0] = 1;
5017 env->fptags[1] = 1;
5018 env->fptags[2] = 1;
5019 env->fptags[3] = 1;
5020 env->fptags[4] = 1;
5021 env->fptags[5] = 1;
5022 env->fptags[6] = 1;
5023 env->fptags[7] = 1;
5024}
5025
5026void helper_frstor(target_ulong ptr, int data32)
5027{
5028 CPU86_LDouble tmp;
5029 int i;
5030
5031 helper_fldenv(ptr, data32);
5032 ptr += (14 << data32);
5033
5034 for(i = 0;i < 8; i++) {
5035 tmp = helper_fldt(ptr);
5036 ST(i) = tmp;
5037 ptr += 10;
5038 }
5039}
5040
5041void helper_fxsave(target_ulong ptr, int data64)
5042{
5043 int fpus, fptag, i, nb_xmm_regs;
5044 CPU86_LDouble tmp;
5045 target_ulong addr;
5046
5047 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5048 fptag = 0;
5049 for(i = 0; i < 8; i++) {
5050 fptag |= (env->fptags[i] << i);
5051 }
5052 stw(ptr, env->fpuc);
5053 stw(ptr + 2, fpus);
5054 stw(ptr + 4, fptag ^ 0xff);
5055#ifdef TARGET_X86_64
5056 if (data64) {
5057 stq(ptr + 0x08, 0); /* rip */
5058 stq(ptr + 0x10, 0); /* rdp */
5059 } else
5060#endif
5061 {
5062 stl(ptr + 0x08, 0); /* eip */
5063 stl(ptr + 0x0c, 0); /* sel */
5064 stl(ptr + 0x10, 0); /* dp */
5065 stl(ptr + 0x14, 0); /* sel */
5066 }
5067
5068 addr = ptr + 0x20;
5069 for(i = 0;i < 8; i++) {
5070 tmp = ST(i);
5071 helper_fstt(tmp, addr);
5072 addr += 16;
5073 }
5074
5075 if (env->cr[4] & CR4_OSFXSR_MASK) {
5076 /* XXX: finish it */
5077 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5078 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5079 if (env->hflags & HF_CS64_MASK)
5080 nb_xmm_regs = 16;
5081 else
5082 nb_xmm_regs = 8;
5083 addr = ptr + 0xa0;
5084 for(i = 0; i < nb_xmm_regs; i++) {
5085 stq(addr, env->xmm_regs[i].XMM_Q(0));
5086 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5087 addr += 16;
5088 }
5089 }
5090}
5091
5092void helper_fxrstor(target_ulong ptr, int data64)
5093{
5094 int i, fpus, fptag, nb_xmm_regs;
5095 CPU86_LDouble tmp;
5096 target_ulong addr;
5097
5098 env->fpuc = lduw(ptr);
5099 fpus = lduw(ptr + 2);
5100 fptag = lduw(ptr + 4);
5101 env->fpstt = (fpus >> 11) & 7;
5102 env->fpus = fpus & ~0x3800;
5103 fptag ^= 0xff;
5104 for(i = 0;i < 8; i++) {
5105 env->fptags[i] = ((fptag >> i) & 1);
5106 }
5107
5108 addr = ptr + 0x20;
5109 for(i = 0;i < 8; i++) {
5110 tmp = helper_fldt(addr);
5111 ST(i) = tmp;
5112 addr += 16;
5113 }
5114
5115 if (env->cr[4] & CR4_OSFXSR_MASK) {
5116 /* XXX: finish it */
5117 env->mxcsr = ldl(ptr + 0x18);
5118 //ldl(ptr + 0x1c);
5119 if (env->hflags & HF_CS64_MASK)
5120 nb_xmm_regs = 16;
5121 else
5122 nb_xmm_regs = 8;
5123 addr = ptr + 0xa0;
5124 for(i = 0; i < nb_xmm_regs; i++) {
5125#if !defined(VBOX) || __GNUC__ < 4
5126 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5127 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5128#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5129# if 1
5130 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5131 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5132 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5133 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5134# else
5135 /* this works fine on Mac OS X, gcc 4.0.1 */
5136 uint64_t u64 = ldq(addr);
5137 env->xmm_regs[i].XMM_Q(0);
5138 u64 = ldq(addr + 4);
5139 env->xmm_regs[i].XMM_Q(1) = u64;
5140# endif
5141#endif
5142 addr += 16;
5143 }
5144 }
5145}
5146
5147#ifndef USE_X86LDOUBLE
5148
5149void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5150{
5151 CPU86_LDoubleU temp;
5152 int e;
5153
5154 temp.d = f;
5155 /* mantissa */
5156 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5157 /* exponent + sign */
5158 e = EXPD(temp) - EXPBIAS + 16383;
5159 e |= SIGND(temp) >> 16;
5160 *pexp = e;
5161}
5162
5163CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5164{
5165 CPU86_LDoubleU temp;
5166 int e;
5167 uint64_t ll;
5168
5169 /* XXX: handle overflow ? */
5170 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5171 e |= (upper >> 4) & 0x800; /* sign */
5172 ll = (mant >> 11) & ((1LL << 52) - 1);
5173#ifdef __arm__
5174 temp.l.upper = (e << 20) | (ll >> 32);
5175 temp.l.lower = ll;
5176#else
5177 temp.ll = ll | ((uint64_t)e << 52);
5178#endif
5179 return temp.d;
5180}
5181
5182#else
5183
5184void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5185{
5186 CPU86_LDoubleU temp;
5187
5188 temp.d = f;
5189 *pmant = temp.l.lower;
5190 *pexp = temp.l.upper;
5191}
5192
5193CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5194{
5195 CPU86_LDoubleU temp;
5196
5197 temp.l.upper = upper;
5198 temp.l.lower = mant;
5199 return temp.d;
5200}
5201#endif
5202
5203#ifdef TARGET_X86_64
5204
5205//#define DEBUG_MULDIV
5206
5207static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5208{
5209 *plow += a;
5210 /* carry test */
5211 if (*plow < a)
5212 (*phigh)++;
5213 *phigh += b;
5214}
5215
5216static void neg128(uint64_t *plow, uint64_t *phigh)
5217{
5218 *plow = ~ *plow;
5219 *phigh = ~ *phigh;
5220 add128(plow, phigh, 1, 0);
5221}
5222
5223/* return TRUE if overflow */
5224static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5225{
5226 uint64_t q, r, a1, a0;
5227 int i, qb, ab;
5228
5229 a0 = *plow;
5230 a1 = *phigh;
5231 if (a1 == 0) {
5232 q = a0 / b;
5233 r = a0 % b;
5234 *plow = q;
5235 *phigh = r;
5236 } else {
5237 if (a1 >= b)
5238 return 1;
5239 /* XXX: use a better algorithm */
5240 for(i = 0; i < 64; i++) {
5241 ab = a1 >> 63;
5242 a1 = (a1 << 1) | (a0 >> 63);
5243 if (ab || a1 >= b) {
5244 a1 -= b;
5245 qb = 1;
5246 } else {
5247 qb = 0;
5248 }
5249 a0 = (a0 << 1) | qb;
5250 }
5251#if defined(DEBUG_MULDIV)
5252 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5253 *phigh, *plow, b, a0, a1);
5254#endif
5255 *plow = a0;
5256 *phigh = a1;
5257 }
5258 return 0;
5259}
5260
5261/* return TRUE if overflow */
5262static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5263{
5264 int sa, sb;
5265 sa = ((int64_t)*phigh < 0);
5266 if (sa)
5267 neg128(plow, phigh);
5268 sb = (b < 0);
5269 if (sb)
5270 b = -b;
5271 if (div64(plow, phigh, b) != 0)
5272 return 1;
5273 if (sa ^ sb) {
5274 if (*plow > (1ULL << 63))
5275 return 1;
5276 *plow = - *plow;
5277 } else {
5278 if (*plow >= (1ULL << 63))
5279 return 1;
5280 }
5281 if (sa)
5282 *phigh = - *phigh;
5283 return 0;
5284}
5285
5286void helper_mulq_EAX_T0(target_ulong t0)
5287{
5288 uint64_t r0, r1;
5289
5290 mulu64(&r0, &r1, EAX, t0);
5291 EAX = r0;
5292 EDX = r1;
5293 CC_DST = r0;
5294 CC_SRC = r1;
5295}
5296
5297void helper_imulq_EAX_T0(target_ulong t0)
5298{
5299 uint64_t r0, r1;
5300
5301 muls64(&r0, &r1, EAX, t0);
5302 EAX = r0;
5303 EDX = r1;
5304 CC_DST = r0;
5305 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5306}
5307
5308target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5309{
5310 uint64_t r0, r1;
5311
5312 muls64(&r0, &r1, t0, t1);
5313 CC_DST = r0;
5314 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5315 return r0;
5316}
5317
5318void helper_divq_EAX(target_ulong t0)
5319{
5320 uint64_t r0, r1;
5321 if (t0 == 0) {
5322 raise_exception(EXCP00_DIVZ);
5323 }
5324 r0 = EAX;
5325 r1 = EDX;
5326 if (div64(&r0, &r1, t0))
5327 raise_exception(EXCP00_DIVZ);
5328 EAX = r0;
5329 EDX = r1;
5330}
5331
5332void helper_idivq_EAX(target_ulong t0)
5333{
5334 uint64_t r0, r1;
5335 if (t0 == 0) {
5336 raise_exception(EXCP00_DIVZ);
5337 }
5338 r0 = EAX;
5339 r1 = EDX;
5340 if (idiv64(&r0, &r1, t0))
5341 raise_exception(EXCP00_DIVZ);
5342 EAX = r0;
5343 EDX = r1;
5344}
5345#endif
5346
5347static void do_hlt(void)
5348{
5349 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5350 env->halted = 1;
5351 env->exception_index = EXCP_HLT;
5352 cpu_loop_exit();
5353}
5354
5355void helper_hlt(int next_eip_addend)
5356{
5357 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5358 EIP += next_eip_addend;
5359
5360 do_hlt();
5361}
5362
5363void helper_monitor(target_ulong ptr)
5364{
5365 if ((uint32_t)ECX != 0)
5366 raise_exception(EXCP0D_GPF);
5367 /* XXX: store address ? */
5368 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5369}
5370
5371void helper_mwait(int next_eip_addend)
5372{
5373 if ((uint32_t)ECX != 0)
5374 raise_exception(EXCP0D_GPF);
5375#ifdef VBOX
5376 helper_hlt(next_eip_addend);
5377#else
5378 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5379 EIP += next_eip_addend;
5380
5381 /* XXX: not complete but not completely erroneous */
5382 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5383 /* more than one CPU: do not sleep because another CPU may
5384 wake this one */
5385 } else {
5386 do_hlt();
5387 }
5388#endif
5389}
5390
5391void helper_debug(void)
5392{
5393 env->exception_index = EXCP_DEBUG;
5394 cpu_loop_exit();
5395}
5396
5397void helper_raise_interrupt(int intno, int next_eip_addend)
5398{
5399 raise_interrupt(intno, 1, 0, next_eip_addend);
5400}
5401
5402void helper_raise_exception(int exception_index)
5403{
5404 raise_exception(exception_index);
5405}
5406
5407void helper_cli(void)
5408{
5409 env->eflags &= ~IF_MASK;
5410}
5411
5412void helper_sti(void)
5413{
5414 env->eflags |= IF_MASK;
5415}
5416
5417#ifdef VBOX
5418void helper_cli_vme(void)
5419{
5420 env->eflags &= ~VIF_MASK;
5421}
5422
5423void helper_sti_vme(void)
5424{
5425 /* First check, then change eflags according to the AMD manual */
5426 if (env->eflags & VIP_MASK) {
5427 raise_exception(EXCP0D_GPF);
5428 }
5429 env->eflags |= VIF_MASK;
5430}
5431#endif
5432
5433#if 0
5434/* vm86plus instructions */
5435void helper_cli_vm(void)
5436{
5437 env->eflags &= ~VIF_MASK;
5438}
5439
5440void helper_sti_vm(void)
5441{
5442 env->eflags |= VIF_MASK;
5443 if (env->eflags & VIP_MASK) {
5444 raise_exception(EXCP0D_GPF);
5445 }
5446}
5447#endif
5448
5449void helper_set_inhibit_irq(void)
5450{
5451 env->hflags |= HF_INHIBIT_IRQ_MASK;
5452}
5453
5454void helper_reset_inhibit_irq(void)
5455{
5456 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5457}
5458
5459void helper_boundw(target_ulong a0, int v)
5460{
5461 int low, high;
5462 low = ldsw(a0);
5463 high = ldsw(a0 + 2);
5464 v = (int16_t)v;
5465 if (v < low || v > high) {
5466 raise_exception(EXCP05_BOUND);
5467 }
5468 FORCE_RET();
5469}
5470
5471void helper_boundl(target_ulong a0, int v)
5472{
5473 int low, high;
5474 low = ldl(a0);
5475 high = ldl(a0 + 4);
5476 if (v < low || v > high) {
5477 raise_exception(EXCP05_BOUND);
5478 }
5479 FORCE_RET();
5480}
5481
5482static float approx_rsqrt(float a)
5483{
5484 return 1.0 / sqrt(a);
5485}
5486
5487static float approx_rcp(float a)
5488{
5489 return 1.0 / a;
5490}
5491
5492#if !defined(CONFIG_USER_ONLY)
5493
5494#define MMUSUFFIX _mmu
5495
5496#define SHIFT 0
5497#include "softmmu_template.h"
5498
5499#define SHIFT 1
5500#include "softmmu_template.h"
5501
5502#define SHIFT 2
5503#include "softmmu_template.h"
5504
5505#define SHIFT 3
5506#include "softmmu_template.h"
5507
5508#endif
5509
5510#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5511/* This code assumes real physical address always fit into host CPU reg,
5512 which is wrong in general, but true for our current use cases. */
5513RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5514{
5515 return remR3PhysReadS8(addr);
5516}
5517RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5518{
5519 return remR3PhysReadU8(addr);
5520}
5521void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5522{
5523 remR3PhysWriteU8(addr, val);
5524}
5525RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5526{
5527 return remR3PhysReadS16(addr);
5528}
5529RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5530{
5531 return remR3PhysReadU16(addr);
5532}
5533void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5534{
5535 remR3PhysWriteU16(addr, val);
5536}
5537RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5538{
5539 return remR3PhysReadS32(addr);
5540}
5541RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5542{
5543 return remR3PhysReadU32(addr);
5544}
5545void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5546{
5547 remR3PhysWriteU32(addr, val);
5548}
5549uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5550{
5551 return remR3PhysReadU64(addr);
5552}
5553void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5554{
5555 remR3PhysWriteU64(addr, val);
5556}
5557#endif
5558
5559/* try to fill the TLB and return an exception if error. If retaddr is
5560 NULL, it means that the function was called in C code (i.e. not
5561 from generated code or from helper.c) */
5562/* XXX: fix it to restore all registers */
5563void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5564{
5565 TranslationBlock *tb;
5566 int ret;
5567 unsigned long pc;
5568 CPUX86State *saved_env;
5569
5570 /* XXX: hack to restore env in all cases, even if not called from
5571 generated code */
5572 saved_env = env;
5573 env = cpu_single_env;
5574
5575 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5576 if (ret) {
5577 if (retaddr) {
5578 /* now we have a real cpu fault */
5579 pc = (unsigned long)retaddr;
5580 tb = tb_find_pc(pc);
5581 if (tb) {
5582 /* the PC is inside the translated code. It means that we have
5583 a virtual CPU fault */
5584 cpu_restore_state(tb, env, pc, NULL);
5585 }
5586 }
5587 raise_exception_err(env->exception_index, env->error_code);
5588 }
5589 env = saved_env;
5590}
5591
5592#ifdef VBOX
5593
5594/**
5595 * Correctly computes the eflags.
5596 * @returns eflags.
5597 * @param env1 CPU environment.
5598 */
5599uint32_t raw_compute_eflags(CPUX86State *env1)
5600{
5601 CPUX86State *savedenv = env;
5602 uint32_t efl;
5603 env = env1;
5604 efl = compute_eflags();
5605 env = savedenv;
5606 return efl;
5607}
5608
5609/**
5610 * Reads byte from virtual address in guest memory area.
5611 * XXX: is it working for any addresses? swapped out pages?
5612 * @returns readed data byte.
5613 * @param env1 CPU environment.
5614 * @param pvAddr GC Virtual address.
5615 */
5616uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5617{
5618 CPUX86State *savedenv = env;
5619 uint8_t u8;
5620 env = env1;
5621 u8 = ldub_kernel(addr);
5622 env = savedenv;
5623 return u8;
5624}
5625
5626/**
5627 * Reads byte from virtual address in guest memory area.
5628 * XXX: is it working for any addresses? swapped out pages?
5629 * @returns readed data byte.
5630 * @param env1 CPU environment.
5631 * @param pvAddr GC Virtual address.
5632 */
5633uint16_t read_word(CPUX86State *env1, target_ulong addr)
5634{
5635 CPUX86State *savedenv = env;
5636 uint16_t u16;
5637 env = env1;
5638 u16 = lduw_kernel(addr);
5639 env = savedenv;
5640 return u16;
5641}
5642
5643/**
5644 * Reads byte from virtual address in guest memory area.
5645 * XXX: is it working for any addresses? swapped out pages?
5646 * @returns readed data byte.
5647 * @param env1 CPU environment.
5648 * @param pvAddr GC Virtual address.
5649 */
5650uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5651{
5652 CPUX86State *savedenv = env;
5653 uint32_t u32;
5654 env = env1;
5655 u32 = ldl_kernel(addr);
5656 env = savedenv;
5657 return u32;
5658}
5659
5660/**
5661 * Writes byte to virtual address in guest memory area.
5662 * XXX: is it working for any addresses? swapped out pages?
5663 * @returns readed data byte.
5664 * @param env1 CPU environment.
5665 * @param pvAddr GC Virtual address.
5666 * @param val byte value
5667 */
5668void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5669{
5670 CPUX86State *savedenv = env;
5671 env = env1;
5672 stb(addr, val);
5673 env = savedenv;
5674}
5675
5676void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5677{
5678 CPUX86State *savedenv = env;
5679 env = env1;
5680 stw(addr, val);
5681 env = savedenv;
5682}
5683
5684void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5685{
5686 CPUX86State *savedenv = env;
5687 env = env1;
5688 stl(addr, val);
5689 env = savedenv;
5690}
5691
5692/**
5693 * Correctly loads selector into segment register with updating internal
5694 * qemu data/caches.
5695 * @param env1 CPU environment.
5696 * @param seg_reg Segment register.
5697 * @param selector Selector to load.
5698 */
5699void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5700{
5701 CPUX86State *savedenv = env;
5702#ifdef FORCE_SEGMENT_SYNC
5703 jmp_buf old_buf;
5704#endif
5705
5706 env = env1;
5707
5708 if ( env->eflags & X86_EFL_VM
5709 || !(env->cr[0] & X86_CR0_PE))
5710 {
5711 load_seg_vm(seg_reg, selector);
5712
5713 env = savedenv;
5714
5715 /* Successful sync. */
5716 env1->segs[seg_reg].newselector = 0;
5717 }
5718 else
5719 {
5720 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5721 time critical - let's not do that */
5722#ifdef FORCE_SEGMENT_SYNC
5723 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5724#endif
5725 if (setjmp(env1->jmp_env) == 0)
5726 {
5727 if (seg_reg == R_CS)
5728 {
5729 uint32_t e1, e2;
5730 e1 = e2 = 0;
5731 load_segment(&e1, &e2, selector);
5732 cpu_x86_load_seg_cache(env, R_CS, selector,
5733 get_seg_base(e1, e2),
5734 get_seg_limit(e1, e2),
5735 e2);
5736 }
5737 else
5738 helper_load_seg(seg_reg, selector);
5739 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5740 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5741
5742 env = savedenv;
5743
5744 /* Successful sync. */
5745 env1->segs[seg_reg].newselector = 0;
5746 }
5747 else
5748 {
5749 env = savedenv;
5750
5751 /* Postpone sync until the guest uses the selector. */
5752 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5753 env1->segs[seg_reg].newselector = selector;
5754 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5755 env1->exception_index = -1;
5756 env1->error_code = 0;
5757 env1->old_exception = -1;
5758 }
5759#ifdef FORCE_SEGMENT_SYNC
5760 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5761#endif
5762 }
5763
5764}
5765
5766DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5767{
5768 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5769}
5770
5771
5772int emulate_single_instr(CPUX86State *env1)
5773{
5774 TranslationBlock *tb;
5775 TranslationBlock *current;
5776 int flags;
5777 uint8_t *tc_ptr;
5778 target_ulong old_eip;
5779
5780 /* ensures env is loaded! */
5781 CPUX86State *savedenv = env;
5782 env = env1;
5783
5784 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5785
5786 current = env->current_tb;
5787 env->current_tb = NULL;
5788 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5789
5790 /*
5791 * Translate only one instruction.
5792 */
5793 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5794 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5795 env->segs[R_CS].base, flags, 0);
5796
5797 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5798
5799
5800 /* tb_link_phys: */
5801 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5802 tb->jmp_next[0] = NULL;
5803 tb->jmp_next[1] = NULL;
5804 Assert(tb->jmp_next[0] == NULL);
5805 Assert(tb->jmp_next[1] == NULL);
5806 if (tb->tb_next_offset[0] != 0xffff)
5807 tb_reset_jump(tb, 0);
5808 if (tb->tb_next_offset[1] != 0xffff)
5809 tb_reset_jump(tb, 1);
5810
5811 /*
5812 * Execute it using emulation
5813 */
5814 old_eip = env->eip;
5815 env->current_tb = tb;
5816
5817 /*
5818 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5819 * perhaps not a very safe hack
5820 */
5821 while(old_eip == env->eip)
5822 {
5823 tc_ptr = tb->tc_ptr;
5824
5825#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5826 int fake_ret;
5827 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5828#else
5829 tcg_qemu_tb_exec(tc_ptr);
5830#endif
5831 /*
5832 * Exit once we detect an external interrupt and interrupts are enabled
5833 */
5834 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5835 ( (env->eflags & IF_MASK) &&
5836 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5837 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5838 {
5839 break;
5840 }
5841 }
5842 env->current_tb = current;
5843
5844 tb_phys_invalidate(tb, -1);
5845 tb_free(tb);
5846/*
5847 Assert(tb->tb_next_offset[0] == 0xffff);
5848 Assert(tb->tb_next_offset[1] == 0xffff);
5849 Assert(tb->tb_next[0] == 0xffff);
5850 Assert(tb->tb_next[1] == 0xffff);
5851 Assert(tb->jmp_next[0] == NULL);
5852 Assert(tb->jmp_next[1] == NULL);
5853 Assert(tb->jmp_first == NULL); */
5854
5855 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5856
5857 /*
5858 * Execute the next instruction when we encounter instruction fusing.
5859 */
5860 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5861 {
5862 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5863 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5864 emulate_single_instr(env);
5865 }
5866
5867 env = savedenv;
5868 return 0;
5869}
5870
5871/**
5872 * Correctly loads a new ldtr selector.
5873 *
5874 * @param env1 CPU environment.
5875 * @param selector Selector to load.
5876 */
5877void sync_ldtr(CPUX86State *env1, int selector)
5878{
5879 CPUX86State *saved_env = env;
5880 if (setjmp(env1->jmp_env) == 0)
5881 {
5882 env = env1;
5883 helper_lldt(selector);
5884 env = saved_env;
5885 }
5886 else
5887 {
5888 env = saved_env;
5889#ifdef VBOX_STRICT
5890 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5891#endif
5892 }
5893}
5894
5895int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5896 uint32_t *esp_ptr, int dpl)
5897{
5898 int type, index, shift;
5899
5900 CPUX86State *savedenv = env;
5901 env = env1;
5902
5903 if (!(env->tr.flags & DESC_P_MASK))
5904 cpu_abort(env, "invalid tss");
5905 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5906 if ((type & 7) != 1)
5907 cpu_abort(env, "invalid tss type %d", type);
5908 shift = type >> 3;
5909 index = (dpl * 4 + 2) << shift;
5910 if (index + (4 << shift) - 1 > env->tr.limit)
5911 {
5912 env = savedenv;
5913 return 0;
5914 }
5915 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5916
5917 if (shift == 0) {
5918 *esp_ptr = lduw_kernel(env->tr.base + index);
5919 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5920 } else {
5921 *esp_ptr = ldl_kernel(env->tr.base + index);
5922 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5923 }
5924
5925 env = savedenv;
5926 return 1;
5927}
5928
5929//*****************************************************************************
5930// Needs to be at the bottom of the file (overriding macros)
5931
5932#ifndef VBOX
5933static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5934#else /* VBOX */
5935DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5936#endif /* VBOX */
5937{
5938 return *(CPU86_LDouble *)ptr;
5939}
5940
5941#ifndef VBOX
5942static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5943#else /* VBOX */
5944DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5945#endif /* VBOX */
5946{
5947 *(CPU86_LDouble *)ptr = f;
5948}
5949
5950#undef stw
5951#undef stl
5952#undef stq
5953#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5954#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5955#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5956
5957//*****************************************************************************
5958void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5959{
5960 int fpus, fptag, i, nb_xmm_regs;
5961 CPU86_LDouble tmp;
5962 uint8_t *addr;
5963 int data64 = !!(env->hflags & HF_LMA_MASK);
5964
5965 if (env->cpuid_features & CPUID_FXSR)
5966 {
5967 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5968 fptag = 0;
5969 for(i = 0; i < 8; i++) {
5970 fptag |= (env->fptags[i] << i);
5971 }
5972 stw(ptr, env->fpuc);
5973 stw(ptr + 2, fpus);
5974 stw(ptr + 4, fptag ^ 0xff);
5975
5976 addr = ptr + 0x20;
5977 for(i = 0;i < 8; i++) {
5978 tmp = ST(i);
5979 helper_fstt_raw(tmp, addr);
5980 addr += 16;
5981 }
5982
5983 if (env->cr[4] & CR4_OSFXSR_MASK) {
5984 /* XXX: finish it */
5985 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5986 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5987 nb_xmm_regs = 8 << data64;
5988 addr = ptr + 0xa0;
5989 for(i = 0; i < nb_xmm_regs; i++) {
5990#if __GNUC__ < 4
5991 stq(addr, env->xmm_regs[i].XMM_Q(0));
5992 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5993#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5994 stl(addr, env->xmm_regs[i].XMM_L(0));
5995 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5996 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5997 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5998#endif
5999 addr += 16;
6000 }
6001 }
6002 }
6003 else
6004 {
6005 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6006 int fptag;
6007
6008 fp->FCW = env->fpuc;
6009 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6010 fptag = 0;
6011 for (i=7; i>=0; i--) {
6012 fptag <<= 2;
6013 if (env->fptags[i]) {
6014 fptag |= 3;
6015 } else {
6016 /* the FPU automatically computes it */
6017 }
6018 }
6019 fp->FTW = fptag;
6020
6021 for(i = 0;i < 8; i++) {
6022 tmp = ST(i);
6023 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6024 }
6025 }
6026}
6027
6028//*****************************************************************************
6029#undef lduw
6030#undef ldl
6031#undef ldq
6032#define lduw(a) *(uint16_t *)(a)
6033#define ldl(a) *(uint32_t *)(a)
6034#define ldq(a) *(uint64_t *)(a)
6035//*****************************************************************************
6036void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6037{
6038 int i, fpus, fptag, nb_xmm_regs;
6039 CPU86_LDouble tmp;
6040 uint8_t *addr;
6041 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6042
6043 if (env->cpuid_features & CPUID_FXSR)
6044 {
6045 env->fpuc = lduw(ptr);
6046 fpus = lduw(ptr + 2);
6047 fptag = lduw(ptr + 4);
6048 env->fpstt = (fpus >> 11) & 7;
6049 env->fpus = fpus & ~0x3800;
6050 fptag ^= 0xff;
6051 for(i = 0;i < 8; i++) {
6052 env->fptags[i] = ((fptag >> i) & 1);
6053 }
6054
6055 addr = ptr + 0x20;
6056 for(i = 0;i < 8; i++) {
6057 tmp = helper_fldt_raw(addr);
6058 ST(i) = tmp;
6059 addr += 16;
6060 }
6061
6062 if (env->cr[4] & CR4_OSFXSR_MASK) {
6063 /* XXX: finish it, endianness */
6064 env->mxcsr = ldl(ptr + 0x18);
6065 //ldl(ptr + 0x1c);
6066 nb_xmm_regs = 8 << data64;
6067 addr = ptr + 0xa0;
6068 for(i = 0; i < nb_xmm_regs; i++) {
6069#if HC_ARCH_BITS == 32
6070 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6071 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6072 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6073 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6074 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6075#else
6076 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6077 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6078#endif
6079 addr += 16;
6080 }
6081 }
6082 }
6083 else
6084 {
6085 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6086 int fptag, j;
6087
6088 env->fpuc = fp->FCW;
6089 env->fpstt = (fp->FSW >> 11) & 7;
6090 env->fpus = fp->FSW & ~0x3800;
6091 fptag = fp->FTW;
6092 for(i = 0;i < 8; i++) {
6093 env->fptags[i] = ((fptag & 3) == 3);
6094 fptag >>= 2;
6095 }
6096 j = env->fpstt;
6097 for(i = 0;i < 8; i++) {
6098 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6099 ST(i) = tmp;
6100 }
6101 }
6102}
6103//*****************************************************************************
6104//*****************************************************************************
6105
6106#endif /* VBOX */
6107
6108/* Secure Virtual Machine helpers */
6109
6110#if defined(CONFIG_USER_ONLY)
6111
6112void helper_vmrun(int aflag, int next_eip_addend)
6113{
6114}
6115void helper_vmmcall(void)
6116{
6117}
6118void helper_vmload(int aflag)
6119{
6120}
6121void helper_vmsave(int aflag)
6122{
6123}
6124void helper_stgi(void)
6125{
6126}
6127void helper_clgi(void)
6128{
6129}
6130void helper_skinit(void)
6131{
6132}
6133void helper_invlpga(int aflag)
6134{
6135}
6136void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6137{
6138}
6139void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6140{
6141}
6142
6143void helper_svm_check_io(uint32_t port, uint32_t param,
6144 uint32_t next_eip_addend)
6145{
6146}
6147#else
6148
6149#ifndef VBOX
6150static inline void svm_save_seg(target_phys_addr_t addr,
6151#else /* VBOX */
6152DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6153#endif /* VBOX */
6154 const SegmentCache *sc)
6155{
6156 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6157 sc->selector);
6158 stq_phys(addr + offsetof(struct vmcb_seg, base),
6159 sc->base);
6160 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6161 sc->limit);
6162 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6163 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6164}
6165
6166#ifndef VBOX
6167static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6168#else /* VBOX */
6169DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6170#endif /* VBOX */
6171{
6172 unsigned int flags;
6173
6174 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6175 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6176 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6177 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6178 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6179}
6180
6181#ifndef VBOX
6182static inline void svm_load_seg_cache(target_phys_addr_t addr,
6183#else /* VBOX */
6184DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6185#endif /* VBOX */
6186 CPUState *env, int seg_reg)
6187{
6188 SegmentCache sc1, *sc = &sc1;
6189 svm_load_seg(addr, sc);
6190 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6191 sc->base, sc->limit, sc->flags);
6192}
6193
6194void helper_vmrun(int aflag, int next_eip_addend)
6195{
6196 target_ulong addr;
6197 uint32_t event_inj;
6198 uint32_t int_ctl;
6199
6200 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6201
6202 if (aflag == 2)
6203 addr = EAX;
6204 else
6205 addr = (uint32_t)EAX;
6206
6207 if (loglevel & CPU_LOG_TB_IN_ASM)
6208 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6209
6210 env->vm_vmcb = addr;
6211
6212 /* save the current CPU state in the hsave page */
6213 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6214 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6215
6216 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6217 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6218
6219 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6220 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6221 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6222 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6223 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6224 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6225
6226 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6227 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6228
6229 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6230 &env->segs[R_ES]);
6231 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6232 &env->segs[R_CS]);
6233 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6234 &env->segs[R_SS]);
6235 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6236 &env->segs[R_DS]);
6237
6238 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6239 EIP + next_eip_addend);
6240 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6241 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6242
6243 /* load the interception bitmaps so we do not need to access the
6244 vmcb in svm mode */
6245 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6246 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6247 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6248 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6249 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6250 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6251
6252 /* enable intercepts */
6253 env->hflags |= HF_SVMI_MASK;
6254
6255 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6256
6257 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6258 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6259
6260 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6261 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6262
6263 /* clear exit_info_2 so we behave like the real hardware */
6264 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6265
6266 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6267 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6268 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6269 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6270 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6271 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6272 if (int_ctl & V_INTR_MASKING_MASK) {
6273 env->v_tpr = int_ctl & V_TPR_MASK;
6274 env->hflags2 |= HF2_VINTR_MASK;
6275 if (env->eflags & IF_MASK)
6276 env->hflags2 |= HF2_HIF_MASK;
6277 }
6278
6279 cpu_load_efer(env,
6280 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6281 env->eflags = 0;
6282 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6283 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6284 CC_OP = CC_OP_EFLAGS;
6285
6286 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6287 env, R_ES);
6288 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6289 env, R_CS);
6290 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6291 env, R_SS);
6292 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6293 env, R_DS);
6294
6295 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6296 env->eip = EIP;
6297 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6298 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6299 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6300 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6301 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6302
6303 /* FIXME: guest state consistency checks */
6304
6305 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6306 case TLB_CONTROL_DO_NOTHING:
6307 break;
6308 case TLB_CONTROL_FLUSH_ALL_ASID:
6309 /* FIXME: this is not 100% correct but should work for now */
6310 tlb_flush(env, 1);
6311 break;
6312 }
6313
6314 env->hflags2 |= HF2_GIF_MASK;
6315
6316 if (int_ctl & V_IRQ_MASK) {
6317 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6318 }
6319
6320 /* maybe we need to inject an event */
6321 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6322 if (event_inj & SVM_EVTINJ_VALID) {
6323 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6324 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6325 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6326 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6327
6328 if (loglevel & CPU_LOG_TB_IN_ASM)
6329 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6330 /* FIXME: need to implement valid_err */
6331 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6332 case SVM_EVTINJ_TYPE_INTR:
6333 env->exception_index = vector;
6334 env->error_code = event_inj_err;
6335 env->exception_is_int = 0;
6336 env->exception_next_eip = -1;
6337 if (loglevel & CPU_LOG_TB_IN_ASM)
6338 fprintf(logfile, "INTR");
6339 /* XXX: is it always correct ? */
6340 do_interrupt(vector, 0, 0, 0, 1);
6341 break;
6342 case SVM_EVTINJ_TYPE_NMI:
6343 env->exception_index = EXCP02_NMI;
6344 env->error_code = event_inj_err;
6345 env->exception_is_int = 0;
6346 env->exception_next_eip = EIP;
6347 if (loglevel & CPU_LOG_TB_IN_ASM)
6348 fprintf(logfile, "NMI");
6349 cpu_loop_exit();
6350 break;
6351 case SVM_EVTINJ_TYPE_EXEPT:
6352 env->exception_index = vector;
6353 env->error_code = event_inj_err;
6354 env->exception_is_int = 0;
6355 env->exception_next_eip = -1;
6356 if (loglevel & CPU_LOG_TB_IN_ASM)
6357 fprintf(logfile, "EXEPT");
6358 cpu_loop_exit();
6359 break;
6360 case SVM_EVTINJ_TYPE_SOFT:
6361 env->exception_index = vector;
6362 env->error_code = event_inj_err;
6363 env->exception_is_int = 1;
6364 env->exception_next_eip = EIP;
6365 if (loglevel & CPU_LOG_TB_IN_ASM)
6366 fprintf(logfile, "SOFT");
6367 cpu_loop_exit();
6368 break;
6369 }
6370 if (loglevel & CPU_LOG_TB_IN_ASM)
6371 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6372 }
6373}
6374
6375void helper_vmmcall(void)
6376{
6377 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6378 raise_exception(EXCP06_ILLOP);
6379}
6380
6381void helper_vmload(int aflag)
6382{
6383 target_ulong addr;
6384 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6385
6386 if (aflag == 2)
6387 addr = EAX;
6388 else
6389 addr = (uint32_t)EAX;
6390
6391 if (loglevel & CPU_LOG_TB_IN_ASM)
6392 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6393 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6394 env->segs[R_FS].base);
6395
6396 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6397 env, R_FS);
6398 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6399 env, R_GS);
6400 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6401 &env->tr);
6402 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6403 &env->ldt);
6404
6405#ifdef TARGET_X86_64
6406 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6407 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6408 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6409 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6410#endif
6411 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6412 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6413 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6414 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6415}
6416
6417void helper_vmsave(int aflag)
6418{
6419 target_ulong addr;
6420 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6421
6422 if (aflag == 2)
6423 addr = EAX;
6424 else
6425 addr = (uint32_t)EAX;
6426
6427 if (loglevel & CPU_LOG_TB_IN_ASM)
6428 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6429 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6430 env->segs[R_FS].base);
6431
6432 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6433 &env->segs[R_FS]);
6434 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6435 &env->segs[R_GS]);
6436 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6437 &env->tr);
6438 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6439 &env->ldt);
6440
6441#ifdef TARGET_X86_64
6442 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6443 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6444 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6445 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6446#endif
6447 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6448 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6449 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6450 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6451}
6452
6453void helper_stgi(void)
6454{
6455 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6456 env->hflags2 |= HF2_GIF_MASK;
6457}
6458
6459void helper_clgi(void)
6460{
6461 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6462 env->hflags2 &= ~HF2_GIF_MASK;
6463}
6464
6465void helper_skinit(void)
6466{
6467 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6468 /* XXX: not implemented */
6469 raise_exception(EXCP06_ILLOP);
6470}
6471
6472void helper_invlpga(int aflag)
6473{
6474 target_ulong addr;
6475 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6476
6477 if (aflag == 2)
6478 addr = EAX;
6479 else
6480 addr = (uint32_t)EAX;
6481
6482 /* XXX: could use the ASID to see if it is needed to do the
6483 flush */
6484 tlb_flush_page(env, addr);
6485}
6486
6487void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6488{
6489 if (likely(!(env->hflags & HF_SVMI_MASK)))
6490 return;
6491#ifndef VBOX
6492 switch(type) {
6493#ifndef VBOX
6494 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6495#else
6496 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6497 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6498 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6499#endif
6500 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6501 helper_vmexit(type, param);
6502 }
6503 break;
6504#ifndef VBOX
6505 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6506#else
6507 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6508 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6509 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6510#endif
6511 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6512 helper_vmexit(type, param);
6513 }
6514 break;
6515 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6516 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6517 helper_vmexit(type, param);
6518 }
6519 break;
6520 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6521 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6522 helper_vmexit(type, param);
6523 }
6524 break;
6525 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6526 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6527 helper_vmexit(type, param);
6528 }
6529 break;
6530 case SVM_EXIT_MSR:
6531 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6532 /* FIXME: this should be read in at vmrun (faster this way?) */
6533 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6534 uint32_t t0, t1;
6535 switch((uint32_t)ECX) {
6536 case 0 ... 0x1fff:
6537 t0 = (ECX * 2) % 8;
6538 t1 = ECX / 8;
6539 break;
6540 case 0xc0000000 ... 0xc0001fff:
6541 t0 = (8192 + ECX - 0xc0000000) * 2;
6542 t1 = (t0 / 8);
6543 t0 %= 8;
6544 break;
6545 case 0xc0010000 ... 0xc0011fff:
6546 t0 = (16384 + ECX - 0xc0010000) * 2;
6547 t1 = (t0 / 8);
6548 t0 %= 8;
6549 break;
6550 default:
6551 helper_vmexit(type, param);
6552 t0 = 0;
6553 t1 = 0;
6554 break;
6555 }
6556 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6557 helper_vmexit(type, param);
6558 }
6559 break;
6560 default:
6561 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6562 helper_vmexit(type, param);
6563 }
6564 break;
6565 }
6566#else
6567 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6568#endif
6569}
6570
6571void helper_svm_check_io(uint32_t port, uint32_t param,
6572 uint32_t next_eip_addend)
6573{
6574 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6575 /* FIXME: this should be read in at vmrun (faster this way?) */
6576 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6577 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6578 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6579 /* next EIP */
6580 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6581 env->eip + next_eip_addend);
6582 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6583 }
6584 }
6585}
6586
6587/* Note: currently only 32 bits of exit_code are used */
6588void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6589{
6590 uint32_t int_ctl;
6591
6592 if (loglevel & CPU_LOG_TB_IN_ASM)
6593 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6594 exit_code, exit_info_1,
6595 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6596 EIP);
6597
6598 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6599 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6600 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6601 } else {
6602 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6603 }
6604
6605 /* Save the VM state in the vmcb */
6606 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6607 &env->segs[R_ES]);
6608 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6609 &env->segs[R_CS]);
6610 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6611 &env->segs[R_SS]);
6612 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6613 &env->segs[R_DS]);
6614
6615 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6616 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6617
6618 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6619 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6620
6621 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6622 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6623 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6625 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6626
6627 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6628 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6629 int_ctl |= env->v_tpr & V_TPR_MASK;
6630 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6631 int_ctl |= V_IRQ_MASK;
6632 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6633
6634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6637 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6639 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6640 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6641
6642 /* Reload the host state from vm_hsave */
6643 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6644 env->hflags &= ~HF_SVMI_MASK;
6645 env->intercept = 0;
6646 env->intercept_exceptions = 0;
6647 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6648 env->tsc_offset = 0;
6649
6650 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6651 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6652
6653 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6654 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6655
6656 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6657 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6658 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6659 /* we need to set the efer after the crs so the hidden flags get
6660 set properly */
6661 cpu_load_efer(env,
6662 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6663 env->eflags = 0;
6664 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6665 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6666 CC_OP = CC_OP_EFLAGS;
6667
6668 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6669 env, R_ES);
6670 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6671 env, R_CS);
6672 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6673 env, R_SS);
6674 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6675 env, R_DS);
6676
6677 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6678 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6679 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6680
6681 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6682 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6683
6684 /* other setups */
6685 cpu_x86_set_cpl(env, 0);
6686 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6687 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6688
6689 env->hflags2 &= ~HF2_GIF_MASK;
6690 /* FIXME: Resets the current ASID register to zero (host ASID). */
6691
6692 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6693
6694 /* Clears the TSC_OFFSET inside the processor. */
6695
6696 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6697 from the page table indicated the host's CR3. If the PDPEs contain
6698 illegal state, the processor causes a shutdown. */
6699
6700 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6701 env->cr[0] |= CR0_PE_MASK;
6702 env->eflags &= ~VM_MASK;
6703
6704 /* Disables all breakpoints in the host DR7 register. */
6705
6706 /* Checks the reloaded host state for consistency. */
6707
6708 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6709 host's code segment or non-canonical (in the case of long mode), a
6710 #GP fault is delivered inside the host.) */
6711
6712 /* remove any pending exception */
6713 env->exception_index = -1;
6714 env->error_code = 0;
6715 env->old_exception = -1;
6716
6717 cpu_loop_exit();
6718}
6719
6720#endif
6721
6722/* MMX/SSE */
6723/* XXX: optimize by storing fptt and fptags in the static cpu state */
6724void helper_enter_mmx(void)
6725{
6726 env->fpstt = 0;
6727 *(uint32_t *)(env->fptags) = 0;
6728 *(uint32_t *)(env->fptags + 4) = 0;
6729}
6730
6731void helper_emms(void)
6732{
6733 /* set to empty state */
6734 *(uint32_t *)(env->fptags) = 0x01010101;
6735 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6736}
6737
6738/* XXX: suppress */
6739void helper_movq(uint64_t *d, uint64_t *s)
6740{
6741 *d = *s;
6742}
6743
6744#define SHIFT 0
6745#include "ops_sse.h"
6746
6747#define SHIFT 1
6748#include "ops_sse.h"
6749
6750#define SHIFT 0
6751#include "helper_template.h"
6752#undef SHIFT
6753
6754#define SHIFT 1
6755#include "helper_template.h"
6756#undef SHIFT
6757
6758#define SHIFT 2
6759#include "helper_template.h"
6760#undef SHIFT
6761
6762#ifdef TARGET_X86_64
6763
6764#define SHIFT 3
6765#include "helper_template.h"
6766#undef SHIFT
6767
6768#endif
6769
6770/* bit operations */
6771target_ulong helper_bsf(target_ulong t0)
6772{
6773 int count;
6774 target_ulong res;
6775
6776 res = t0;
6777 count = 0;
6778 while ((res & 1) == 0) {
6779 count++;
6780 res >>= 1;
6781 }
6782 return count;
6783}
6784
6785target_ulong helper_bsr(target_ulong t0)
6786{
6787 int count;
6788 target_ulong res, mask;
6789
6790 res = t0;
6791 count = TARGET_LONG_BITS - 1;
6792 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6793 while ((res & mask) == 0) {
6794 count--;
6795 res <<= 1;
6796 }
6797 return count;
6798}
6799
6800
6801static int compute_all_eflags(void)
6802{
6803 return CC_SRC;
6804}
6805
6806static int compute_c_eflags(void)
6807{
6808 return CC_SRC & CC_C;
6809}
6810
6811#ifndef VBOX
6812CCTable cc_table[CC_OP_NB] = {
6813 [CC_OP_DYNAMIC] = { /* should never happen */ },
6814
6815 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6816
6817 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6818 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6819 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6820
6821 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6822 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6823 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6824
6825 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6826 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6827 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6828
6829 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6830 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6831 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6832
6833 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6834 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6835 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6836
6837 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6838 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6839 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6840
6841 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6842 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6843 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6844
6845 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6846 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6847 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6848
6849 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6850 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6851 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6852
6853 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6854 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6855 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6856
6857#ifdef TARGET_X86_64
6858 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6859
6860 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6861
6862 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6863
6864 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6865
6866 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6867
6868 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6869
6870 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6871
6872 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6873
6874 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6875
6876 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6877#endif
6878};
6879#else /* VBOX */
6880/* Sync carefully with cpu.h */
6881CCTable cc_table[CC_OP_NB] = {
6882 /* CC_OP_DYNAMIC */ { 0, 0 },
6883
6884 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6885
6886 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6887 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6888 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6889#ifdef TARGET_X86_64
6890 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6891#else
6892 /* CC_OP_MULQ */ { 0, 0 },
6893#endif
6894
6895 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6896 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6897 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6898#ifdef TARGET_X86_64
6899 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6900#else
6901 /* CC_OP_ADDQ */ { 0, 0 },
6902#endif
6903
6904 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6905 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6906 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6907#ifdef TARGET_X86_64
6908 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6909#else
6910 /* CC_OP_ADCQ */ { 0, 0 },
6911#endif
6912
6913 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6914 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6915 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6916#ifdef TARGET_X86_64
6917 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6918#else
6919 /* CC_OP_SUBQ */ { 0, 0 },
6920#endif
6921
6922 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6923 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6924 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6925#ifdef TARGET_X86_64
6926 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6927#else
6928 /* CC_OP_SBBQ */ { 0, 0 },
6929#endif
6930
6931 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6932 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6933 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6934#ifdef TARGET_X86_64
6935 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6936#else
6937 /* CC_OP_LOGICQ */ { 0, 0 },
6938#endif
6939
6940 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6941 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6942 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6943#ifdef TARGET_X86_64
6944 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6945#else
6946 /* CC_OP_INCQ */ { 0, 0 },
6947#endif
6948
6949 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6950 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6951 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6952#ifdef TARGET_X86_64
6953 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6954#else
6955 /* CC_OP_DECQ */ { 0, 0 },
6956#endif
6957
6958 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6959 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6960 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6961#ifdef TARGET_X86_64
6962 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6963#else
6964 /* CC_OP_SHLQ */ { 0, 0 },
6965#endif
6966
6967 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6968 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6969 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6970#ifdef TARGET_X86_64
6971 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6972#else
6973 /* CC_OP_SARQ */ { 0, 0 },
6974#endif
6975};
6976#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette