VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 25465

Last change on this file since 25465 was 23640, checked in by vboxsync, 15 years ago

load_seg_vm: correct V86 segment default segment flags

  • Property svn:eol-style set to native
File size: 194.6 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
273 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
274 flags |= (3 << DESC_DPL_SHIFT);
275
276 cpu_x86_load_seg_cache(env, seg, selector,
277 (selector << 4), 0xffff, flags);
278#else
279 cpu_x86_load_seg_cache(env, seg, selector,
280 (selector << 4), 0xffff, 0);
281#endif
282}
283
284#ifndef VBOX
285static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
286#else /* VBOX */
287DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
288#endif /* VBOX */
289 uint32_t *esp_ptr, int dpl)
290{
291#ifndef VBOX
292 int type, index, shift;
293#else
294 unsigned int type, index, shift;
295#endif
296
297#if 0
298 {
299 int i;
300 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
301 for(i=0;i<env->tr.limit;i++) {
302 printf("%02x ", env->tr.base[i]);
303 if ((i & 7) == 7) printf("\n");
304 }
305 printf("\n");
306 }
307#endif
308
309 if (!(env->tr.flags & DESC_P_MASK))
310 cpu_abort(env, "invalid tss");
311 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
312 if ((type & 7) != 1)
313 cpu_abort(env, "invalid tss type");
314 shift = type >> 3;
315 index = (dpl * 4 + 2) << shift;
316 if (index + (4 << shift) - 1 > env->tr.limit)
317 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
318 if (shift == 0) {
319 *esp_ptr = lduw_kernel(env->tr.base + index);
320 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
321 } else {
322 *esp_ptr = ldl_kernel(env->tr.base + index);
323 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
324 }
325}
326
327/* XXX: merge with load_seg() */
328static void tss_load_seg(int seg_reg, int selector)
329{
330 uint32_t e1, e2;
331 int rpl, dpl, cpl;
332
333#ifdef VBOX
334 e1 = e2 = 0;
335 cpl = env->hflags & HF_CPL_MASK;
336 /* Trying to load a selector with CPL=1? */
337 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
338 {
339 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
340 selector = selector & 0xfffc;
341 }
342#endif
343
344 if ((selector & 0xfffc) != 0) {
345 if (load_segment(&e1, &e2, selector) != 0)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 if (!(e2 & DESC_S_MASK))
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 rpl = selector & 3;
350 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
351 cpl = env->hflags & HF_CPL_MASK;
352 if (seg_reg == R_CS) {
353 if (!(e2 & DESC_CS_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 /* XXX: is it correct ? */
356 if (dpl != rpl)
357 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358 if ((e2 & DESC_C_MASK) && dpl > rpl)
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 } else if (seg_reg == R_SS) {
361 /* SS must be writable data */
362 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
363 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
364 if (dpl != cpl || dpl != rpl)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 } else {
367 /* not readable code */
368 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 /* if data or non conforming code, checks the rights */
371 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
372 if (dpl < cpl || dpl < rpl)
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 }
375 }
376 if (!(e2 & DESC_P_MASK))
377 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
378 cpu_x86_load_seg_cache(env, seg_reg, selector,
379 get_seg_base(e1, e2),
380 get_seg_limit(e1, e2),
381 e2);
382 } else {
383 if (seg_reg == R_SS || seg_reg == R_CS)
384 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
385#ifdef VBOX
386#if 0
387 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 0, 0, 0);
390#endif
391#endif
392 }
393}
394
395#define SWITCH_TSS_JMP 0
396#define SWITCH_TSS_IRET 1
397#define SWITCH_TSS_CALL 2
398
399/* XXX: restore CPU state in registers (PowerPC case) */
400static void switch_tss(int tss_selector,
401 uint32_t e1, uint32_t e2, int source,
402 uint32_t next_eip)
403{
404 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
405 target_ulong tss_base;
406 uint32_t new_regs[8], new_segs[6];
407 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
408 uint32_t old_eflags, eflags_mask;
409 SegmentCache *dt;
410#ifndef VBOX
411 int index;
412#else
413 unsigned int index;
414#endif
415 target_ulong ptr;
416
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418#ifdef DEBUG_PCALL
419 if (loglevel & CPU_LOG_PCALL)
420 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
421#endif
422
423#if defined(VBOX) && defined(DEBUG)
424 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
425#endif
426
427 /* if task gate, we read the TSS segment and we load it */
428 if (type == 5) {
429 if (!(e2 & DESC_P_MASK))
430 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
431 tss_selector = e1 >> 16;
432 if (tss_selector & 4)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 if (load_segment(&e1, &e2, tss_selector) != 0)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 if (e2 & DESC_S_MASK)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
439 if ((type & 7) != 1)
440 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
441 }
442
443 if (!(e2 & DESC_P_MASK))
444 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
445
446 if (type & 8)
447 tss_limit_max = 103;
448 else
449 tss_limit_max = 43;
450 tss_limit = get_seg_limit(e1, e2);
451 tss_base = get_seg_base(e1, e2);
452 if ((tss_selector & 4) != 0 ||
453 tss_limit < tss_limit_max)
454 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
455 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
456 if (old_type & 8)
457 old_tss_limit_max = 103;
458 else
459 old_tss_limit_max = 43;
460
461 /* read all the registers from the new TSS */
462 if (type & 8) {
463 /* 32 bit */
464 new_cr3 = ldl_kernel(tss_base + 0x1c);
465 new_eip = ldl_kernel(tss_base + 0x20);
466 new_eflags = ldl_kernel(tss_base + 0x24);
467 for(i = 0; i < 8; i++)
468 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
469 for(i = 0; i < 6; i++)
470 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
471 new_ldt = lduw_kernel(tss_base + 0x60);
472 new_trap = ldl_kernel(tss_base + 0x64);
473 } else {
474 /* 16 bit */
475 new_cr3 = 0;
476 new_eip = lduw_kernel(tss_base + 0x0e);
477 new_eflags = lduw_kernel(tss_base + 0x10);
478 for(i = 0; i < 8; i++)
479 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
480 for(i = 0; i < 4; i++)
481 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
482 new_ldt = lduw_kernel(tss_base + 0x2a);
483 new_segs[R_FS] = 0;
484 new_segs[R_GS] = 0;
485 new_trap = 0;
486 }
487
488 /* NOTE: we must avoid memory exceptions during the task switch,
489 so we make dummy accesses before */
490 /* XXX: it can still fail in some cases, so a bigger hack is
491 necessary to valid the TLB after having done the accesses */
492
493 v1 = ldub_kernel(env->tr.base);
494 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
495 stb_kernel(env->tr.base, v1);
496 stb_kernel(env->tr.base + old_tss_limit_max, v2);
497
498 /* clear busy bit (it is restartable) */
499 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
500 target_ulong ptr;
501 uint32_t e2;
502 ptr = env->gdt.base + (env->tr.selector & ~7);
503 e2 = ldl_kernel(ptr + 4);
504 e2 &= ~DESC_TSS_BUSY_MASK;
505 stl_kernel(ptr + 4, e2);
506 }
507 old_eflags = compute_eflags();
508 if (source == SWITCH_TSS_IRET)
509 old_eflags &= ~NT_MASK;
510
511 /* save the current state in the old TSS */
512 if (type & 8) {
513 /* 32 bit */
514 stl_kernel(env->tr.base + 0x20, next_eip);
515 stl_kernel(env->tr.base + 0x24, old_eflags);
516 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
517 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
518 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
519 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
520 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
521 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
522 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
523 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
524 for(i = 0; i < 6; i++)
525 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
526#if defined(VBOX) && defined(DEBUG)
527 printf("TSS 32 bits switch\n");
528 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
529#endif
530 } else {
531 /* 16 bit */
532 stw_kernel(env->tr.base + 0x0e, next_eip);
533 stw_kernel(env->tr.base + 0x10, old_eflags);
534 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
535 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
536 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
537 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
538 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
539 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
540 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
541 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
542 for(i = 0; i < 4; i++)
543 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
544 }
545
546 /* now if an exception occurs, it will occurs in the next task
547 context */
548
549 if (source == SWITCH_TSS_CALL) {
550 stw_kernel(tss_base, env->tr.selector);
551 new_eflags |= NT_MASK;
552 }
553
554 /* set busy bit */
555 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
556 target_ulong ptr;
557 uint32_t e2;
558 ptr = env->gdt.base + (tss_selector & ~7);
559 e2 = ldl_kernel(ptr + 4);
560 e2 |= DESC_TSS_BUSY_MASK;
561 stl_kernel(ptr + 4, e2);
562 }
563
564 /* set the new CPU state */
565 /* from this point, any exception which occurs can give problems */
566 env->cr[0] |= CR0_TS_MASK;
567 env->hflags |= HF_TS_MASK;
568 env->tr.selector = tss_selector;
569 env->tr.base = tss_base;
570 env->tr.limit = tss_limit;
571 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
572
573 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
574 cpu_x86_update_cr3(env, new_cr3);
575 }
576
577 /* load all registers without an exception, then reload them with
578 possible exception */
579 env->eip = new_eip;
580 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
581 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
582 if (!(type & 8))
583 eflags_mask &= 0xffff;
584 load_eflags(new_eflags, eflags_mask);
585 /* XXX: what to do in 16 bit case ? */
586 EAX = new_regs[0];
587 ECX = new_regs[1];
588 EDX = new_regs[2];
589 EBX = new_regs[3];
590 ESP = new_regs[4];
591 EBP = new_regs[5];
592 ESI = new_regs[6];
593 EDI = new_regs[7];
594 if (new_eflags & VM_MASK) {
595 for(i = 0; i < 6; i++)
596 load_seg_vm(i, new_segs[i]);
597 /* in vm86, CPL is always 3 */
598 cpu_x86_set_cpl(env, 3);
599 } else {
600 /* CPL is set the RPL of CS */
601 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
602 /* first just selectors as the rest may trigger exceptions */
603 for(i = 0; i < 6; i++)
604 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
605 }
606
607 env->ldt.selector = new_ldt & ~4;
608 env->ldt.base = 0;
609 env->ldt.limit = 0;
610 env->ldt.flags = 0;
611
612 /* load the LDT */
613 if (new_ldt & 4)
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615
616 if ((new_ldt & 0xfffc) != 0) {
617 dt = &env->gdt;
618 index = new_ldt & ~7;
619 if ((index + 7) > dt->limit)
620 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
621 ptr = dt->base + index;
622 e1 = ldl_kernel(ptr);
623 e2 = ldl_kernel(ptr + 4);
624 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
625 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
626 if (!(e2 & DESC_P_MASK))
627 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
628 load_seg_cache_raw_dt(&env->ldt, e1, e2);
629 }
630
631 /* load the segments */
632 if (!(new_eflags & VM_MASK)) {
633 tss_load_seg(R_CS, new_segs[R_CS]);
634 tss_load_seg(R_SS, new_segs[R_SS]);
635 tss_load_seg(R_ES, new_segs[R_ES]);
636 tss_load_seg(R_DS, new_segs[R_DS]);
637 tss_load_seg(R_FS, new_segs[R_FS]);
638 tss_load_seg(R_GS, new_segs[R_GS]);
639 }
640
641 /* check that EIP is in the CS segment limits */
642 if (new_eip > env->segs[R_CS].limit) {
643 /* XXX: different exception if CALL ? */
644 raise_exception_err(EXCP0D_GPF, 0);
645 }
646}
647
648/* check if Port I/O is allowed in TSS */
649#ifndef VBOX
650static inline void check_io(int addr, int size)
651{
652 int io_offset, val, mask;
653
654#else /* VBOX */
655DECLINLINE(void) check_io(int addr, int size)
656{
657 int val, mask;
658 unsigned int io_offset;
659#endif /* VBOX */
660 /* TSS must be a valid 32 bit one */
661 if (!(env->tr.flags & DESC_P_MASK) ||
662 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
663 env->tr.limit < 103)
664 goto fail;
665 io_offset = lduw_kernel(env->tr.base + 0x66);
666 io_offset += (addr >> 3);
667 /* Note: the check needs two bytes */
668 if ((io_offset + 1) > env->tr.limit)
669 goto fail;
670 val = lduw_kernel(env->tr.base + io_offset);
671 val >>= (addr & 7);
672 mask = (1 << size) - 1;
673 /* all bits must be zero to allow the I/O */
674 if ((val & mask) != 0) {
675 fail:
676 raise_exception_err(EXCP0D_GPF, 0);
677 }
678}
679
680#ifdef VBOX
681/* Keep in sync with gen_check_external_event() */
682void helper_check_external_event()
683{
684 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
685 | CPU_INTERRUPT_EXTERNAL_TIMER
686 | CPU_INTERRUPT_EXTERNAL_DMA))
687 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
688 && (env->eflags & IF_MASK)
689 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
690 {
691 helper_external_event();
692 }
693
694}
695
696void helper_sync_seg(uint32_t reg)
697{
698 if (env->segs[reg].newselector)
699 sync_seg(env, reg, env->segs[reg].newselector);
700}
701#endif
702
703void helper_check_iob(uint32_t t0)
704{
705 check_io(t0, 1);
706}
707
708void helper_check_iow(uint32_t t0)
709{
710 check_io(t0, 2);
711}
712
713void helper_check_iol(uint32_t t0)
714{
715 check_io(t0, 4);
716}
717
718void helper_outb(uint32_t port, uint32_t data)
719{
720 cpu_outb(env, port, data & 0xff);
721}
722
723target_ulong helper_inb(uint32_t port)
724{
725 return cpu_inb(env, port);
726}
727
728void helper_outw(uint32_t port, uint32_t data)
729{
730 cpu_outw(env, port, data & 0xffff);
731}
732
733target_ulong helper_inw(uint32_t port)
734{
735 return cpu_inw(env, port);
736}
737
738void helper_outl(uint32_t port, uint32_t data)
739{
740 cpu_outl(env, port, data);
741}
742
743target_ulong helper_inl(uint32_t port)
744{
745 return cpu_inl(env, port);
746}
747
748#ifndef VBOX
749static inline unsigned int get_sp_mask(unsigned int e2)
750#else /* VBOX */
751DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
752#endif /* VBOX */
753{
754 if (e2 & DESC_B_MASK)
755 return 0xffffffff;
756 else
757 return 0xffff;
758}
759
760#ifdef TARGET_X86_64
761#define SET_ESP(val, sp_mask)\
762do {\
763 if ((sp_mask) == 0xffff)\
764 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
765 else if ((sp_mask) == 0xffffffffLL)\
766 ESP = (uint32_t)(val);\
767 else\
768 ESP = (val);\
769} while (0)
770#else
771#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
772#endif
773
774/* in 64-bit machines, this can overflow. So this segment addition macro
775 * can be used to trim the value to 32-bit whenever needed */
776#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
777
778/* XXX: add a is_user flag to have proper security support */
779#define PUSHW(ssp, sp, sp_mask, val)\
780{\
781 sp -= 2;\
782 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
783}
784
785#define PUSHL(ssp, sp, sp_mask, val)\
786{\
787 sp -= 4;\
788 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
789}
790
791#define POPW(ssp, sp, sp_mask, val)\
792{\
793 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
794 sp += 2;\
795}
796
797#define POPL(ssp, sp, sp_mask, val)\
798{\
799 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
800 sp += 4;\
801}
802
803/* protected mode interrupt */
804static void do_interrupt_protected(int intno, int is_int, int error_code,
805 unsigned int next_eip, int is_hw)
806{
807 SegmentCache *dt;
808 target_ulong ptr, ssp;
809 int type, dpl, selector, ss_dpl, cpl;
810 int has_error_code, new_stack, shift;
811 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
812 uint32_t old_eip, sp_mask;
813
814#ifdef VBOX
815 ss = ss_e1 = ss_e2 = 0;
816# ifdef VBOX_WITH_VMI
817 if ( intno == 6
818 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
819 {
820 env->exception_index = EXCP_PARAV_CALL;
821 cpu_loop_exit();
822 }
823# endif
824 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
825 cpu_loop_exit();
826#endif
827
828 has_error_code = 0;
829 if (!is_int && !is_hw) {
830 switch(intno) {
831 case 8:
832 case 10:
833 case 11:
834 case 12:
835 case 13:
836 case 14:
837 case 17:
838 has_error_code = 1;
839 break;
840 }
841 }
842 if (is_int)
843 old_eip = next_eip;
844 else
845 old_eip = env->eip;
846
847 dt = &env->idt;
848#ifndef VBOX
849 if (intno * 8 + 7 > dt->limit)
850#else
851 if ((unsigned)intno * 8 + 7 > dt->limit)
852#endif
853 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
854 ptr = dt->base + intno * 8;
855 e1 = ldl_kernel(ptr);
856 e2 = ldl_kernel(ptr + 4);
857 /* check gate type */
858 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
859 switch(type) {
860 case 5: /* task gate */
861 /* must do that check here to return the correct error code */
862 if (!(e2 & DESC_P_MASK))
863 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
864 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
865 if (has_error_code) {
866 int type;
867 uint32_t mask;
868 /* push the error code */
869 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
870 shift = type >> 3;
871 if (env->segs[R_SS].flags & DESC_B_MASK)
872 mask = 0xffffffff;
873 else
874 mask = 0xffff;
875 esp = (ESP - (2 << shift)) & mask;
876 ssp = env->segs[R_SS].base + esp;
877 if (shift)
878 stl_kernel(ssp, error_code);
879 else
880 stw_kernel(ssp, error_code);
881 SET_ESP(esp, mask);
882 }
883 return;
884 case 6: /* 286 interrupt gate */
885 case 7: /* 286 trap gate */
886 case 14: /* 386 interrupt gate */
887 case 15: /* 386 trap gate */
888 break;
889 default:
890 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
891 break;
892 }
893 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
894 cpl = env->hflags & HF_CPL_MASK;
895 /* check privilege if software int */
896 if (is_int && dpl < cpl)
897 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
898 /* check valid bit */
899 if (!(e2 & DESC_P_MASK))
900 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
901 selector = e1 >> 16;
902 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
903 if ((selector & 0xfffc) == 0)
904 raise_exception_err(EXCP0D_GPF, 0);
905
906 if (load_segment(&e1, &e2, selector) != 0)
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911 if (dpl > cpl)
912 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913 if (!(e2 & DESC_P_MASK))
914 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
916 /* to inner privilege */
917 get_ss_esp_from_tss(&ss, &esp, dpl);
918 if ((ss & 0xfffc) == 0)
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920 if ((ss & 3) != dpl)
921 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
922 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
923 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
924 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
925 if (ss_dpl != dpl)
926 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
927 if (!(ss_e2 & DESC_S_MASK) ||
928 (ss_e2 & DESC_CS_MASK) ||
929 !(ss_e2 & DESC_W_MASK))
930 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
931 if (!(ss_e2 & DESC_P_MASK))
932#ifdef VBOX /* See page 3-477 of 253666.pdf */
933 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
934#else
935 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
936#endif
937 new_stack = 1;
938 sp_mask = get_sp_mask(ss_e2);
939 ssp = get_seg_base(ss_e1, ss_e2);
940#if defined(VBOX) && defined(DEBUG)
941 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
942#endif
943 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
944 /* to same privilege */
945 if (env->eflags & VM_MASK)
946 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947 new_stack = 0;
948 sp_mask = get_sp_mask(env->segs[R_SS].flags);
949 ssp = env->segs[R_SS].base;
950 esp = ESP;
951 dpl = cpl;
952 } else {
953 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954 new_stack = 0; /* avoid warning */
955 sp_mask = 0; /* avoid warning */
956 ssp = 0; /* avoid warning */
957 esp = 0; /* avoid warning */
958 }
959
960 shift = type >> 3;
961
962#if 0
963 /* XXX: check that enough room is available */
964 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
965 if (env->eflags & VM_MASK)
966 push_size += 8;
967 push_size <<= shift;
968#endif
969 if (shift == 1) {
970 if (new_stack) {
971 if (env->eflags & VM_MASK) {
972 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
973 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
974 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
975 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
976 }
977 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
978 PUSHL(ssp, esp, sp_mask, ESP);
979 }
980 PUSHL(ssp, esp, sp_mask, compute_eflags());
981 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
982 PUSHL(ssp, esp, sp_mask, old_eip);
983 if (has_error_code) {
984 PUSHL(ssp, esp, sp_mask, error_code);
985 }
986 } else {
987 if (new_stack) {
988 if (env->eflags & VM_MASK) {
989 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
990 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
991 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
992 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
993 }
994 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
995 PUSHW(ssp, esp, sp_mask, ESP);
996 }
997 PUSHW(ssp, esp, sp_mask, compute_eflags());
998 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
999 PUSHW(ssp, esp, sp_mask, old_eip);
1000 if (has_error_code) {
1001 PUSHW(ssp, esp, sp_mask, error_code);
1002 }
1003 }
1004
1005 if (new_stack) {
1006 if (env->eflags & VM_MASK) {
1007 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1008 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1009 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1010 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1011 }
1012 ss = (ss & ~3) | dpl;
1013 cpu_x86_load_seg_cache(env, R_SS, ss,
1014 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1015 }
1016 SET_ESP(esp, sp_mask);
1017
1018 selector = (selector & ~3) | dpl;
1019 cpu_x86_load_seg_cache(env, R_CS, selector,
1020 get_seg_base(e1, e2),
1021 get_seg_limit(e1, e2),
1022 e2);
1023 cpu_x86_set_cpl(env, dpl);
1024 env->eip = offset;
1025
1026 /* interrupt gate clear IF mask */
1027 if ((type & 1) == 0) {
1028 env->eflags &= ~IF_MASK;
1029 }
1030#ifndef VBOX
1031 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1032#else
1033 /*
1034 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1035 * gets confused by seeingingly changed EFLAGS. See #3491 and
1036 * public bug #2341.
1037 */
1038 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1039#endif
1040}
1041#ifdef VBOX
1042
1043/* check if VME interrupt redirection is enabled in TSS */
1044DECLINLINE(bool) is_vme_irq_redirected(int intno)
1045{
1046 unsigned int io_offset, intredir_offset;
1047 unsigned char val, mask;
1048
1049 /* TSS must be a valid 32 bit one */
1050 if (!(env->tr.flags & DESC_P_MASK) ||
1051 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1052 env->tr.limit < 103)
1053 goto fail;
1054 io_offset = lduw_kernel(env->tr.base + 0x66);
1055 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1056 if (io_offset < 0x68 + 0x20)
1057 io_offset = 0x68 + 0x20;
1058 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1059 intredir_offset = io_offset - 0x20;
1060
1061 intredir_offset += (intno >> 3);
1062 if ((intredir_offset) > env->tr.limit)
1063 goto fail;
1064
1065 val = ldub_kernel(env->tr.base + intredir_offset);
1066 mask = 1 << (unsigned char)(intno & 7);
1067
1068 /* bit set means no redirection. */
1069 if ((val & mask) != 0) {
1070 return false;
1071 }
1072 return true;
1073
1074fail:
1075 raise_exception_err(EXCP0D_GPF, 0);
1076 return true;
1077}
1078
1079/* V86 mode software interrupt with CR4.VME=1 */
1080static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1081{
1082 target_ulong ptr, ssp;
1083 int selector;
1084 uint32_t offset, esp;
1085 uint32_t old_cs, old_eflags;
1086 uint32_t iopl;
1087
1088 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1089
1090 if (!is_vme_irq_redirected(intno))
1091 {
1092 if (iopl == 3)
1093 {
1094 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1095 return;
1096 }
1097 else
1098 raise_exception_err(EXCP0D_GPF, 0);
1099 }
1100
1101 /* virtual mode idt is at linear address 0 */
1102 ptr = 0 + intno * 4;
1103 offset = lduw_kernel(ptr);
1104 selector = lduw_kernel(ptr + 2);
1105 esp = ESP;
1106 ssp = env->segs[R_SS].base;
1107 old_cs = env->segs[R_CS].selector;
1108
1109 old_eflags = compute_eflags();
1110 if (iopl < 3)
1111 {
1112 /* copy VIF into IF and set IOPL to 3 */
1113 if (env->eflags & VIF_MASK)
1114 old_eflags |= IF_MASK;
1115 else
1116 old_eflags &= ~IF_MASK;
1117
1118 old_eflags |= (3 << IOPL_SHIFT);
1119 }
1120
1121 /* XXX: use SS segment size ? */
1122 PUSHW(ssp, esp, 0xffff, old_eflags);
1123 PUSHW(ssp, esp, 0xffff, old_cs);
1124 PUSHW(ssp, esp, 0xffff, next_eip);
1125
1126 /* update processor state */
1127 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1128 env->eip = offset;
1129 env->segs[R_CS].selector = selector;
1130 env->segs[R_CS].base = (selector << 4);
1131 env->eflags &= ~(TF_MASK | RF_MASK);
1132
1133 if (iopl < 3)
1134 env->eflags &= ~VIF_MASK;
1135 else
1136 env->eflags &= ~IF_MASK;
1137}
1138#endif /* VBOX */
1139
1140#ifdef TARGET_X86_64
1141
1142#define PUSHQ(sp, val)\
1143{\
1144 sp -= 8;\
1145 stq_kernel(sp, (val));\
1146}
1147
1148#define POPQ(sp, val)\
1149{\
1150 val = ldq_kernel(sp);\
1151 sp += 8;\
1152}
1153
1154#ifndef VBOX
1155static inline target_ulong get_rsp_from_tss(int level)
1156#else /* VBOX */
1157DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1158#endif /* VBOX */
1159{
1160 int index;
1161
1162#if 0
1163 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1164 env->tr.base, env->tr.limit);
1165#endif
1166
1167 if (!(env->tr.flags & DESC_P_MASK))
1168 cpu_abort(env, "invalid tss");
1169 index = 8 * level + 4;
1170 if ((index + 7) > env->tr.limit)
1171 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1172 return ldq_kernel(env->tr.base + index);
1173}
1174
1175/* 64 bit interrupt */
1176static void do_interrupt64(int intno, int is_int, int error_code,
1177 target_ulong next_eip, int is_hw)
1178{
1179 SegmentCache *dt;
1180 target_ulong ptr;
1181 int type, dpl, selector, cpl, ist;
1182 int has_error_code, new_stack;
1183 uint32_t e1, e2, e3, ss;
1184 target_ulong old_eip, esp, offset;
1185
1186#ifdef VBOX
1187 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1188 cpu_loop_exit();
1189#endif
1190
1191 has_error_code = 0;
1192 if (!is_int && !is_hw) {
1193 switch(intno) {
1194 case 8:
1195 case 10:
1196 case 11:
1197 case 12:
1198 case 13:
1199 case 14:
1200 case 17:
1201 has_error_code = 1;
1202 break;
1203 }
1204 }
1205 if (is_int)
1206 old_eip = next_eip;
1207 else
1208 old_eip = env->eip;
1209
1210 dt = &env->idt;
1211 if (intno * 16 + 15 > dt->limit)
1212 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1213 ptr = dt->base + intno * 16;
1214 e1 = ldl_kernel(ptr);
1215 e2 = ldl_kernel(ptr + 4);
1216 e3 = ldl_kernel(ptr + 8);
1217 /* check gate type */
1218 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1219 switch(type) {
1220 case 14: /* 386 interrupt gate */
1221 case 15: /* 386 trap gate */
1222 break;
1223 default:
1224 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1225 break;
1226 }
1227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1228 cpl = env->hflags & HF_CPL_MASK;
1229 /* check privilege if software int */
1230 if (is_int && dpl < cpl)
1231 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1232 /* check valid bit */
1233 if (!(e2 & DESC_P_MASK))
1234 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1235 selector = e1 >> 16;
1236 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1237 ist = e2 & 7;
1238 if ((selector & 0xfffc) == 0)
1239 raise_exception_err(EXCP0D_GPF, 0);
1240
1241 if (load_segment(&e1, &e2, selector) != 0)
1242 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1243 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1244 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1245 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1246 if (dpl > cpl)
1247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1248 if (!(e2 & DESC_P_MASK))
1249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1250 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1251 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1252 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1253 /* to inner privilege */
1254 if (ist != 0)
1255 esp = get_rsp_from_tss(ist + 3);
1256 else
1257 esp = get_rsp_from_tss(dpl);
1258 esp &= ~0xfLL; /* align stack */
1259 ss = 0;
1260 new_stack = 1;
1261 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1262 /* to same privilege */
1263 if (env->eflags & VM_MASK)
1264 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1265 new_stack = 0;
1266 if (ist != 0)
1267 esp = get_rsp_from_tss(ist + 3);
1268 else
1269 esp = ESP;
1270 esp &= ~0xfLL; /* align stack */
1271 dpl = cpl;
1272 } else {
1273 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1274 new_stack = 0; /* avoid warning */
1275 esp = 0; /* avoid warning */
1276 }
1277
1278 PUSHQ(esp, env->segs[R_SS].selector);
1279 PUSHQ(esp, ESP);
1280 PUSHQ(esp, compute_eflags());
1281 PUSHQ(esp, env->segs[R_CS].selector);
1282 PUSHQ(esp, old_eip);
1283 if (has_error_code) {
1284 PUSHQ(esp, error_code);
1285 }
1286
1287 if (new_stack) {
1288 ss = 0 | dpl;
1289 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1290 }
1291 ESP = esp;
1292
1293 selector = (selector & ~3) | dpl;
1294 cpu_x86_load_seg_cache(env, R_CS, selector,
1295 get_seg_base(e1, e2),
1296 get_seg_limit(e1, e2),
1297 e2);
1298 cpu_x86_set_cpl(env, dpl);
1299 env->eip = offset;
1300
1301 /* interrupt gate clear IF mask */
1302 if ((type & 1) == 0) {
1303 env->eflags &= ~IF_MASK;
1304 }
1305
1306#ifndef VBOX
1307 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1308#else
1309 /*
1310 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1311 * gets confused by seeingingly changed EFLAGS. See #3491 and
1312 * public bug #2341.
1313 */
1314 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1315#endif
1316}
1317#endif
1318
1319#if defined(CONFIG_USER_ONLY)
1320void helper_syscall(int next_eip_addend)
1321{
1322 env->exception_index = EXCP_SYSCALL;
1323 env->exception_next_eip = env->eip + next_eip_addend;
1324 cpu_loop_exit();
1325}
1326#else
1327void helper_syscall(int next_eip_addend)
1328{
1329 int selector;
1330
1331 if (!(env->efer & MSR_EFER_SCE)) {
1332 raise_exception_err(EXCP06_ILLOP, 0);
1333 }
1334 selector = (env->star >> 32) & 0xffff;
1335#ifdef TARGET_X86_64
1336 if (env->hflags & HF_LMA_MASK) {
1337 int code64;
1338
1339 ECX = env->eip + next_eip_addend;
1340 env->regs[11] = compute_eflags();
1341
1342 code64 = env->hflags & HF_CS64_MASK;
1343
1344 cpu_x86_set_cpl(env, 0);
1345 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1346 0, 0xffffffff,
1347 DESC_G_MASK | DESC_P_MASK |
1348 DESC_S_MASK |
1349 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1350 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1351 0, 0xffffffff,
1352 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1353 DESC_S_MASK |
1354 DESC_W_MASK | DESC_A_MASK);
1355 env->eflags &= ~env->fmask;
1356 load_eflags(env->eflags, 0);
1357 if (code64)
1358 env->eip = env->lstar;
1359 else
1360 env->eip = env->cstar;
1361 } else
1362#endif
1363 {
1364 ECX = (uint32_t)(env->eip + next_eip_addend);
1365
1366 cpu_x86_set_cpl(env, 0);
1367 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1368 0, 0xffffffff,
1369 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1370 DESC_S_MASK |
1371 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1372 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1373 0, 0xffffffff,
1374 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1375 DESC_S_MASK |
1376 DESC_W_MASK | DESC_A_MASK);
1377 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1378 env->eip = (uint32_t)env->star;
1379 }
1380}
1381#endif
1382
1383void helper_sysret(int dflag)
1384{
1385 int cpl, selector;
1386
1387 if (!(env->efer & MSR_EFER_SCE)) {
1388 raise_exception_err(EXCP06_ILLOP, 0);
1389 }
1390 cpl = env->hflags & HF_CPL_MASK;
1391 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1392 raise_exception_err(EXCP0D_GPF, 0);
1393 }
1394 selector = (env->star >> 48) & 0xffff;
1395#ifdef TARGET_X86_64
1396 if (env->hflags & HF_LMA_MASK) {
1397 if (dflag == 2) {
1398 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1399 0, 0xffffffff,
1400 DESC_G_MASK | DESC_P_MASK |
1401 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1402 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1403 DESC_L_MASK);
1404 env->eip = ECX;
1405 } else {
1406 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1407 0, 0xffffffff,
1408 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1409 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1410 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1411 env->eip = (uint32_t)ECX;
1412 }
1413 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1414 0, 0xffffffff,
1415 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1416 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1417 DESC_W_MASK | DESC_A_MASK);
1418 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1419 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1420 cpu_x86_set_cpl(env, 3);
1421 } else
1422#endif
1423 {
1424 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1425 0, 0xffffffff,
1426 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1427 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1428 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1429 env->eip = (uint32_t)ECX;
1430 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1431 0, 0xffffffff,
1432 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1433 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1434 DESC_W_MASK | DESC_A_MASK);
1435 env->eflags |= IF_MASK;
1436 cpu_x86_set_cpl(env, 3);
1437 }
1438#ifdef USE_KQEMU
1439 if (kqemu_is_ok(env)) {
1440 if (env->hflags & HF_LMA_MASK)
1441 CC_OP = CC_OP_EFLAGS;
1442 env->exception_index = -1;
1443 cpu_loop_exit();
1444 }
1445#endif
1446}
1447
1448#ifdef VBOX
1449/**
1450 * Checks and processes external VMM events.
1451 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1452 */
1453void helper_external_event(void)
1454{
1455#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1456 uintptr_t uSP;
1457# ifdef RT_ARCH_AMD64
1458 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1459# else
1460 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1461# endif
1462 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1463#endif
1464 /* Keep in sync with flags checked by gen_check_external_event() */
1465 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1466 {
1467 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1468 ~CPU_INTERRUPT_EXTERNAL_HARD);
1469 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1470 }
1471 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1472 {
1473 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1474 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1475 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1476 }
1477 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1478 {
1479 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1480 ~CPU_INTERRUPT_EXTERNAL_DMA);
1481 remR3DmaRun(env);
1482 }
1483 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1484 {
1485 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1486 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1487 remR3TimersRun(env);
1488 }
1489}
1490/* helper for recording call instruction addresses for later scanning */
1491void helper_record_call()
1492{
1493 if ( !(env->state & CPU_RAW_RING0)
1494 && (env->cr[0] & CR0_PG_MASK)
1495 && !(env->eflags & X86_EFL_IF))
1496 remR3RecordCall(env);
1497}
1498#endif /* VBOX */
1499
1500/* real mode interrupt */
1501static void do_interrupt_real(int intno, int is_int, int error_code,
1502 unsigned int next_eip)
1503{
1504 SegmentCache *dt;
1505 target_ulong ptr, ssp;
1506 int selector;
1507 uint32_t offset, esp;
1508 uint32_t old_cs, old_eip;
1509
1510 /* real mode (simpler !) */
1511 dt = &env->idt;
1512#ifndef VBOX
1513 if (intno * 4 + 3 > dt->limit)
1514#else
1515 if ((unsigned)intno * 4 + 3 > dt->limit)
1516#endif
1517 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1518 ptr = dt->base + intno * 4;
1519 offset = lduw_kernel(ptr);
1520 selector = lduw_kernel(ptr + 2);
1521 esp = ESP;
1522 ssp = env->segs[R_SS].base;
1523 if (is_int)
1524 old_eip = next_eip;
1525 else
1526 old_eip = env->eip;
1527 old_cs = env->segs[R_CS].selector;
1528 /* XXX: use SS segment size ? */
1529 PUSHW(ssp, esp, 0xffff, compute_eflags());
1530 PUSHW(ssp, esp, 0xffff, old_cs);
1531 PUSHW(ssp, esp, 0xffff, old_eip);
1532
1533 /* update processor state */
1534 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1535 env->eip = offset;
1536 env->segs[R_CS].selector = selector;
1537 env->segs[R_CS].base = (selector << 4);
1538 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1539}
1540
1541/* fake user mode interrupt */
1542void do_interrupt_user(int intno, int is_int, int error_code,
1543 target_ulong next_eip)
1544{
1545 SegmentCache *dt;
1546 target_ulong ptr;
1547 int dpl, cpl, shift;
1548 uint32_t e2;
1549
1550 dt = &env->idt;
1551 if (env->hflags & HF_LMA_MASK) {
1552 shift = 4;
1553 } else {
1554 shift = 3;
1555 }
1556 ptr = dt->base + (intno << shift);
1557 e2 = ldl_kernel(ptr + 4);
1558
1559 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1560 cpl = env->hflags & HF_CPL_MASK;
1561 /* check privilege if software int */
1562 if (is_int && dpl < cpl)
1563 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1564
1565 /* Since we emulate only user space, we cannot do more than
1566 exiting the emulation with the suitable exception and error
1567 code */
1568 if (is_int)
1569 EIP = next_eip;
1570}
1571
1572/*
1573 * Begin execution of an interruption. is_int is TRUE if coming from
1574 * the int instruction. next_eip is the EIP value AFTER the interrupt
1575 * instruction. It is only relevant if is_int is TRUE.
1576 */
1577void do_interrupt(int intno, int is_int, int error_code,
1578 target_ulong next_eip, int is_hw)
1579{
1580 if (loglevel & CPU_LOG_INT) {
1581 if ((env->cr[0] & CR0_PE_MASK)) {
1582 static int count;
1583 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1584 count, intno, error_code, is_int,
1585 env->hflags & HF_CPL_MASK,
1586 env->segs[R_CS].selector, EIP,
1587 (int)env->segs[R_CS].base + EIP,
1588 env->segs[R_SS].selector, ESP);
1589 if (intno == 0x0e) {
1590 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1591 } else {
1592 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1593 }
1594 fprintf(logfile, "\n");
1595 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1596#if 0
1597 {
1598 int i;
1599 uint8_t *ptr;
1600 fprintf(logfile, " code=");
1601 ptr = env->segs[R_CS].base + env->eip;
1602 for(i = 0; i < 16; i++) {
1603 fprintf(logfile, " %02x", ldub(ptr + i));
1604 }
1605 fprintf(logfile, "\n");
1606 }
1607#endif
1608 count++;
1609 }
1610 }
1611 if (env->cr[0] & CR0_PE_MASK) {
1612#ifdef TARGET_X86_64
1613 if (env->hflags & HF_LMA_MASK) {
1614 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1615 } else
1616#endif
1617 {
1618#ifdef VBOX
1619 /* int xx *, v86 code and VME enabled? */
1620 if ( (env->eflags & VM_MASK)
1621 && (env->cr[4] & CR4_VME_MASK)
1622 && is_int
1623 && !is_hw
1624 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1625 )
1626 do_soft_interrupt_vme(intno, error_code, next_eip);
1627 else
1628#endif /* VBOX */
1629 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1630 }
1631 } else {
1632 do_interrupt_real(intno, is_int, error_code, next_eip);
1633 }
1634}
1635
1636/*
1637 * Check nested exceptions and change to double or triple fault if
1638 * needed. It should only be called, if this is not an interrupt.
1639 * Returns the new exception number.
1640 */
1641static int check_exception(int intno, int *error_code)
1642{
1643 int first_contributory = env->old_exception == 0 ||
1644 (env->old_exception >= 10 &&
1645 env->old_exception <= 13);
1646 int second_contributory = intno == 0 ||
1647 (intno >= 10 && intno <= 13);
1648
1649 if (loglevel & CPU_LOG_INT)
1650 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1651 env->old_exception, intno);
1652
1653 if (env->old_exception == EXCP08_DBLE)
1654 cpu_abort(env, "triple fault");
1655
1656 if ((first_contributory && second_contributory)
1657 || (env->old_exception == EXCP0E_PAGE &&
1658 (second_contributory || (intno == EXCP0E_PAGE)))) {
1659 intno = EXCP08_DBLE;
1660 *error_code = 0;
1661 }
1662
1663 if (second_contributory || (intno == EXCP0E_PAGE) ||
1664 (intno == EXCP08_DBLE))
1665 env->old_exception = intno;
1666
1667 return intno;
1668}
1669
1670/*
1671 * Signal an interruption. It is executed in the main CPU loop.
1672 * is_int is TRUE if coming from the int instruction. next_eip is the
1673 * EIP value AFTER the interrupt instruction. It is only relevant if
1674 * is_int is TRUE.
1675 */
1676void raise_interrupt(int intno, int is_int, int error_code,
1677 int next_eip_addend)
1678{
1679#if defined(VBOX) && defined(DEBUG)
1680 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend));
1681#endif
1682 if (!is_int) {
1683 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1684 intno = check_exception(intno, &error_code);
1685 } else {
1686 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1687 }
1688
1689 env->exception_index = intno;
1690 env->error_code = error_code;
1691 env->exception_is_int = is_int;
1692 env->exception_next_eip = env->eip + next_eip_addend;
1693 cpu_loop_exit();
1694}
1695
1696/* shortcuts to generate exceptions */
1697
1698void (raise_exception_err)(int exception_index, int error_code)
1699{
1700 raise_interrupt(exception_index, 0, error_code, 0);
1701}
1702
1703void raise_exception(int exception_index)
1704{
1705 raise_interrupt(exception_index, 0, 0, 0);
1706}
1707
1708/* SMM support */
1709
1710#if defined(CONFIG_USER_ONLY)
1711
1712void do_smm_enter(void)
1713{
1714}
1715
1716void helper_rsm(void)
1717{
1718}
1719
1720#else
1721
1722#ifdef TARGET_X86_64
1723#define SMM_REVISION_ID 0x00020064
1724#else
1725#define SMM_REVISION_ID 0x00020000
1726#endif
1727
1728void do_smm_enter(void)
1729{
1730 target_ulong sm_state;
1731 SegmentCache *dt;
1732 int i, offset;
1733
1734 if (loglevel & CPU_LOG_INT) {
1735 fprintf(logfile, "SMM: enter\n");
1736 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1737 }
1738
1739 env->hflags |= HF_SMM_MASK;
1740 cpu_smm_update(env);
1741
1742 sm_state = env->smbase + 0x8000;
1743
1744#ifdef TARGET_X86_64
1745 for(i = 0; i < 6; i++) {
1746 dt = &env->segs[i];
1747 offset = 0x7e00 + i * 16;
1748 stw_phys(sm_state + offset, dt->selector);
1749 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1750 stl_phys(sm_state + offset + 4, dt->limit);
1751 stq_phys(sm_state + offset + 8, dt->base);
1752 }
1753
1754 stq_phys(sm_state + 0x7e68, env->gdt.base);
1755 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1756
1757 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1758 stq_phys(sm_state + 0x7e78, env->ldt.base);
1759 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1760 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1761
1762 stq_phys(sm_state + 0x7e88, env->idt.base);
1763 stl_phys(sm_state + 0x7e84, env->idt.limit);
1764
1765 stw_phys(sm_state + 0x7e90, env->tr.selector);
1766 stq_phys(sm_state + 0x7e98, env->tr.base);
1767 stl_phys(sm_state + 0x7e94, env->tr.limit);
1768 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1769
1770 stq_phys(sm_state + 0x7ed0, env->efer);
1771
1772 stq_phys(sm_state + 0x7ff8, EAX);
1773 stq_phys(sm_state + 0x7ff0, ECX);
1774 stq_phys(sm_state + 0x7fe8, EDX);
1775 stq_phys(sm_state + 0x7fe0, EBX);
1776 stq_phys(sm_state + 0x7fd8, ESP);
1777 stq_phys(sm_state + 0x7fd0, EBP);
1778 stq_phys(sm_state + 0x7fc8, ESI);
1779 stq_phys(sm_state + 0x7fc0, EDI);
1780 for(i = 8; i < 16; i++)
1781 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1782 stq_phys(sm_state + 0x7f78, env->eip);
1783 stl_phys(sm_state + 0x7f70, compute_eflags());
1784 stl_phys(sm_state + 0x7f68, env->dr[6]);
1785 stl_phys(sm_state + 0x7f60, env->dr[7]);
1786
1787 stl_phys(sm_state + 0x7f48, env->cr[4]);
1788 stl_phys(sm_state + 0x7f50, env->cr[3]);
1789 stl_phys(sm_state + 0x7f58, env->cr[0]);
1790
1791 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1792 stl_phys(sm_state + 0x7f00, env->smbase);
1793#else
1794 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1795 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1796 stl_phys(sm_state + 0x7ff4, compute_eflags());
1797 stl_phys(sm_state + 0x7ff0, env->eip);
1798 stl_phys(sm_state + 0x7fec, EDI);
1799 stl_phys(sm_state + 0x7fe8, ESI);
1800 stl_phys(sm_state + 0x7fe4, EBP);
1801 stl_phys(sm_state + 0x7fe0, ESP);
1802 stl_phys(sm_state + 0x7fdc, EBX);
1803 stl_phys(sm_state + 0x7fd8, EDX);
1804 stl_phys(sm_state + 0x7fd4, ECX);
1805 stl_phys(sm_state + 0x7fd0, EAX);
1806 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1807 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1808
1809 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1810 stl_phys(sm_state + 0x7f64, env->tr.base);
1811 stl_phys(sm_state + 0x7f60, env->tr.limit);
1812 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1813
1814 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1815 stl_phys(sm_state + 0x7f80, env->ldt.base);
1816 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1817 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1818
1819 stl_phys(sm_state + 0x7f74, env->gdt.base);
1820 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1821
1822 stl_phys(sm_state + 0x7f58, env->idt.base);
1823 stl_phys(sm_state + 0x7f54, env->idt.limit);
1824
1825 for(i = 0; i < 6; i++) {
1826 dt = &env->segs[i];
1827 if (i < 3)
1828 offset = 0x7f84 + i * 12;
1829 else
1830 offset = 0x7f2c + (i - 3) * 12;
1831 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1832 stl_phys(sm_state + offset + 8, dt->base);
1833 stl_phys(sm_state + offset + 4, dt->limit);
1834 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1835 }
1836 stl_phys(sm_state + 0x7f14, env->cr[4]);
1837
1838 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1839 stl_phys(sm_state + 0x7ef8, env->smbase);
1840#endif
1841 /* init SMM cpu state */
1842
1843#ifdef TARGET_X86_64
1844 cpu_load_efer(env, 0);
1845#endif
1846 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1847 env->eip = 0x00008000;
1848 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1849 0xffffffff, 0);
1850 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1851 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1852 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1853 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1854 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1855
1856 cpu_x86_update_cr0(env,
1857 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1858 cpu_x86_update_cr4(env, 0);
1859 env->dr[7] = 0x00000400;
1860 CC_OP = CC_OP_EFLAGS;
1861}
1862
1863void helper_rsm(void)
1864{
1865#ifdef VBOX
1866 cpu_abort(env, "helper_rsm");
1867#else /* !VBOX */
1868 target_ulong sm_
1869
1870 target_ulong sm_state;
1871 int i, offset;
1872 uint32_t val;
1873
1874 sm_state = env->smbase + 0x8000;
1875#ifdef TARGET_X86_64
1876 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1877
1878 for(i = 0; i < 6; i++) {
1879 offset = 0x7e00 + i * 16;
1880 cpu_x86_load_seg_cache(env, i,
1881 lduw_phys(sm_state + offset),
1882 ldq_phys(sm_state + offset + 8),
1883 ldl_phys(sm_state + offset + 4),
1884 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1885 }
1886
1887 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1888 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1889
1890 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1891 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1892 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1893 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1894
1895 env->idt.base = ldq_phys(sm_state + 0x7e88);
1896 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1897
1898 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1899 env->tr.base = ldq_phys(sm_state + 0x7e98);
1900 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1901 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1902
1903 EAX = ldq_phys(sm_state + 0x7ff8);
1904 ECX = ldq_phys(sm_state + 0x7ff0);
1905 EDX = ldq_phys(sm_state + 0x7fe8);
1906 EBX = ldq_phys(sm_state + 0x7fe0);
1907 ESP = ldq_phys(sm_state + 0x7fd8);
1908 EBP = ldq_phys(sm_state + 0x7fd0);
1909 ESI = ldq_phys(sm_state + 0x7fc8);
1910 EDI = ldq_phys(sm_state + 0x7fc0);
1911 for(i = 8; i < 16; i++)
1912 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1913 env->eip = ldq_phys(sm_state + 0x7f78);
1914 load_eflags(ldl_phys(sm_state + 0x7f70),
1915 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1916 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1917 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1918
1919 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1920 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1921 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1922
1923 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1924 if (val & 0x20000) {
1925 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1926 }
1927#else
1928 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1929 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1930 load_eflags(ldl_phys(sm_state + 0x7ff4),
1931 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1932 env->eip = ldl_phys(sm_state + 0x7ff0);
1933 EDI = ldl_phys(sm_state + 0x7fec);
1934 ESI = ldl_phys(sm_state + 0x7fe8);
1935 EBP = ldl_phys(sm_state + 0x7fe4);
1936 ESP = ldl_phys(sm_state + 0x7fe0);
1937 EBX = ldl_phys(sm_state + 0x7fdc);
1938 EDX = ldl_phys(sm_state + 0x7fd8);
1939 ECX = ldl_phys(sm_state + 0x7fd4);
1940 EAX = ldl_phys(sm_state + 0x7fd0);
1941 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1942 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1943
1944 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1945 env->tr.base = ldl_phys(sm_state + 0x7f64);
1946 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1947 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1948
1949 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1950 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1951 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1952 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1953
1954 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1955 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1956
1957 env->idt.base = ldl_phys(sm_state + 0x7f58);
1958 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1959
1960 for(i = 0; i < 6; i++) {
1961 if (i < 3)
1962 offset = 0x7f84 + i * 12;
1963 else
1964 offset = 0x7f2c + (i - 3) * 12;
1965 cpu_x86_load_seg_cache(env, i,
1966 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1967 ldl_phys(sm_state + offset + 8),
1968 ldl_phys(sm_state + offset + 4),
1969 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1970 }
1971 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1972
1973 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1974 if (val & 0x20000) {
1975 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1976 }
1977#endif
1978 CC_OP = CC_OP_EFLAGS;
1979 env->hflags &= ~HF_SMM_MASK;
1980 cpu_smm_update(env);
1981
1982 if (loglevel & CPU_LOG_INT) {
1983 fprintf(logfile, "SMM: after RSM\n");
1984 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1985 }
1986#endif /* !VBOX */
1987}
1988
1989#endif /* !CONFIG_USER_ONLY */
1990
1991
1992/* division, flags are undefined */
1993
1994void helper_divb_AL(target_ulong t0)
1995{
1996 unsigned int num, den, q, r;
1997
1998 num = (EAX & 0xffff);
1999 den = (t0 & 0xff);
2000 if (den == 0) {
2001 raise_exception(EXCP00_DIVZ);
2002 }
2003 q = (num / den);
2004 if (q > 0xff)
2005 raise_exception(EXCP00_DIVZ);
2006 q &= 0xff;
2007 r = (num % den) & 0xff;
2008 EAX = (EAX & ~0xffff) | (r << 8) | q;
2009}
2010
2011void helper_idivb_AL(target_ulong t0)
2012{
2013 int num, den, q, r;
2014
2015 num = (int16_t)EAX;
2016 den = (int8_t)t0;
2017 if (den == 0) {
2018 raise_exception(EXCP00_DIVZ);
2019 }
2020 q = (num / den);
2021 if (q != (int8_t)q)
2022 raise_exception(EXCP00_DIVZ);
2023 q &= 0xff;
2024 r = (num % den) & 0xff;
2025 EAX = (EAX & ~0xffff) | (r << 8) | q;
2026}
2027
2028void helper_divw_AX(target_ulong t0)
2029{
2030 unsigned int num, den, q, r;
2031
2032 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2033 den = (t0 & 0xffff);
2034 if (den == 0) {
2035 raise_exception(EXCP00_DIVZ);
2036 }
2037 q = (num / den);
2038 if (q > 0xffff)
2039 raise_exception(EXCP00_DIVZ);
2040 q &= 0xffff;
2041 r = (num % den) & 0xffff;
2042 EAX = (EAX & ~0xffff) | q;
2043 EDX = (EDX & ~0xffff) | r;
2044}
2045
2046void helper_idivw_AX(target_ulong t0)
2047{
2048 int num, den, q, r;
2049
2050 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2051 den = (int16_t)t0;
2052 if (den == 0) {
2053 raise_exception(EXCP00_DIVZ);
2054 }
2055 q = (num / den);
2056 if (q != (int16_t)q)
2057 raise_exception(EXCP00_DIVZ);
2058 q &= 0xffff;
2059 r = (num % den) & 0xffff;
2060 EAX = (EAX & ~0xffff) | q;
2061 EDX = (EDX & ~0xffff) | r;
2062}
2063
2064void helper_divl_EAX(target_ulong t0)
2065{
2066 unsigned int den, r;
2067 uint64_t num, q;
2068
2069 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2070 den = t0;
2071 if (den == 0) {
2072 raise_exception(EXCP00_DIVZ);
2073 }
2074 q = (num / den);
2075 r = (num % den);
2076 if (q > 0xffffffff)
2077 raise_exception(EXCP00_DIVZ);
2078 EAX = (uint32_t)q;
2079 EDX = (uint32_t)r;
2080}
2081
2082void helper_idivl_EAX(target_ulong t0)
2083{
2084 int den, r;
2085 int64_t num, q;
2086
2087 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2088 den = t0;
2089 if (den == 0) {
2090 raise_exception(EXCP00_DIVZ);
2091 }
2092 q = (num / den);
2093 r = (num % den);
2094 if (q != (int32_t)q)
2095 raise_exception(EXCP00_DIVZ);
2096 EAX = (uint32_t)q;
2097 EDX = (uint32_t)r;
2098}
2099
2100/* bcd */
2101
2102/* XXX: exception */
2103void helper_aam(int base)
2104{
2105 int al, ah;
2106 al = EAX & 0xff;
2107 ah = al / base;
2108 al = al % base;
2109 EAX = (EAX & ~0xffff) | al | (ah << 8);
2110 CC_DST = al;
2111}
2112
2113void helper_aad(int base)
2114{
2115 int al, ah;
2116 al = EAX & 0xff;
2117 ah = (EAX >> 8) & 0xff;
2118 al = ((ah * base) + al) & 0xff;
2119 EAX = (EAX & ~0xffff) | al;
2120 CC_DST = al;
2121}
2122
2123void helper_aaa(void)
2124{
2125 int icarry;
2126 int al, ah, af;
2127 int eflags;
2128
2129 eflags = cc_table[CC_OP].compute_all();
2130 af = eflags & CC_A;
2131 al = EAX & 0xff;
2132 ah = (EAX >> 8) & 0xff;
2133
2134 icarry = (al > 0xf9);
2135 if (((al & 0x0f) > 9 ) || af) {
2136 al = (al + 6) & 0x0f;
2137 ah = (ah + 1 + icarry) & 0xff;
2138 eflags |= CC_C | CC_A;
2139 } else {
2140 eflags &= ~(CC_C | CC_A);
2141 al &= 0x0f;
2142 }
2143 EAX = (EAX & ~0xffff) | al | (ah << 8);
2144 CC_SRC = eflags;
2145 FORCE_RET();
2146}
2147
2148void helper_aas(void)
2149{
2150 int icarry;
2151 int al, ah, af;
2152 int eflags;
2153
2154 eflags = cc_table[CC_OP].compute_all();
2155 af = eflags & CC_A;
2156 al = EAX & 0xff;
2157 ah = (EAX >> 8) & 0xff;
2158
2159 icarry = (al < 6);
2160 if (((al & 0x0f) > 9 ) || af) {
2161 al = (al - 6) & 0x0f;
2162 ah = (ah - 1 - icarry) & 0xff;
2163 eflags |= CC_C | CC_A;
2164 } else {
2165 eflags &= ~(CC_C | CC_A);
2166 al &= 0x0f;
2167 }
2168 EAX = (EAX & ~0xffff) | al | (ah << 8);
2169 CC_SRC = eflags;
2170 FORCE_RET();
2171}
2172
2173void helper_daa(void)
2174{
2175 int al, af, cf;
2176 int eflags;
2177
2178 eflags = cc_table[CC_OP].compute_all();
2179 cf = eflags & CC_C;
2180 af = eflags & CC_A;
2181 al = EAX & 0xff;
2182
2183 eflags = 0;
2184 if (((al & 0x0f) > 9 ) || af) {
2185 al = (al + 6) & 0xff;
2186 eflags |= CC_A;
2187 }
2188 if ((al > 0x9f) || cf) {
2189 al = (al + 0x60) & 0xff;
2190 eflags |= CC_C;
2191 }
2192 EAX = (EAX & ~0xff) | al;
2193 /* well, speed is not an issue here, so we compute the flags by hand */
2194 eflags |= (al == 0) << 6; /* zf */
2195 eflags |= parity_table[al]; /* pf */
2196 eflags |= (al & 0x80); /* sf */
2197 CC_SRC = eflags;
2198 FORCE_RET();
2199}
2200
2201void helper_das(void)
2202{
2203 int al, al1, af, cf;
2204 int eflags;
2205
2206 eflags = cc_table[CC_OP].compute_all();
2207 cf = eflags & CC_C;
2208 af = eflags & CC_A;
2209 al = EAX & 0xff;
2210
2211 eflags = 0;
2212 al1 = al;
2213 if (((al & 0x0f) > 9 ) || af) {
2214 eflags |= CC_A;
2215 if (al < 6 || cf)
2216 eflags |= CC_C;
2217 al = (al - 6) & 0xff;
2218 }
2219 if ((al1 > 0x99) || cf) {
2220 al = (al - 0x60) & 0xff;
2221 eflags |= CC_C;
2222 }
2223 EAX = (EAX & ~0xff) | al;
2224 /* well, speed is not an issue here, so we compute the flags by hand */
2225 eflags |= (al == 0) << 6; /* zf */
2226 eflags |= parity_table[al]; /* pf */
2227 eflags |= (al & 0x80); /* sf */
2228 CC_SRC = eflags;
2229 FORCE_RET();
2230}
2231
2232void helper_into(int next_eip_addend)
2233{
2234 int eflags;
2235 eflags = cc_table[CC_OP].compute_all();
2236 if (eflags & CC_O) {
2237 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2238 }
2239}
2240
2241void helper_cmpxchg8b(target_ulong a0)
2242{
2243 uint64_t d;
2244 int eflags;
2245
2246 eflags = cc_table[CC_OP].compute_all();
2247 d = ldq(a0);
2248 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2249 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2250 eflags |= CC_Z;
2251 } else {
2252 /* always do the store */
2253 stq(a0, d);
2254 EDX = (uint32_t)(d >> 32);
2255 EAX = (uint32_t)d;
2256 eflags &= ~CC_Z;
2257 }
2258 CC_SRC = eflags;
2259}
2260
2261#ifdef TARGET_X86_64
2262void helper_cmpxchg16b(target_ulong a0)
2263{
2264 uint64_t d0, d1;
2265 int eflags;
2266
2267 if ((a0 & 0xf) != 0)
2268 raise_exception(EXCP0D_GPF);
2269 eflags = cc_table[CC_OP].compute_all();
2270 d0 = ldq(a0);
2271 d1 = ldq(a0 + 8);
2272 if (d0 == EAX && d1 == EDX) {
2273 stq(a0, EBX);
2274 stq(a0 + 8, ECX);
2275 eflags |= CC_Z;
2276 } else {
2277 /* always do the store */
2278 stq(a0, d0);
2279 stq(a0 + 8, d1);
2280 EDX = d1;
2281 EAX = d0;
2282 eflags &= ~CC_Z;
2283 }
2284 CC_SRC = eflags;
2285}
2286#endif
2287
2288void helper_single_step(void)
2289{
2290 env->dr[6] |= 0x4000;
2291 raise_exception(EXCP01_SSTP);
2292}
2293
2294void helper_cpuid(void)
2295{
2296#ifndef VBOX
2297 uint32_t index;
2298
2299 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2300
2301 index = (uint32_t)EAX;
2302 /* test if maximum index reached */
2303 if (index & 0x80000000) {
2304 if (index > env->cpuid_xlevel)
2305 index = env->cpuid_level;
2306 } else {
2307 if (index > env->cpuid_level)
2308 index = env->cpuid_level;
2309 }
2310
2311 switch(index) {
2312 case 0:
2313 EAX = env->cpuid_level;
2314 EBX = env->cpuid_vendor1;
2315 EDX = env->cpuid_vendor2;
2316 ECX = env->cpuid_vendor3;
2317 break;
2318 case 1:
2319 EAX = env->cpuid_version;
2320 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2321 ECX = env->cpuid_ext_features;
2322 EDX = env->cpuid_features;
2323 break;
2324 case 2:
2325 /* cache info: needed for Pentium Pro compatibility */
2326 EAX = 1;
2327 EBX = 0;
2328 ECX = 0;
2329 EDX = 0x2c307d;
2330 break;
2331 case 4:
2332 /* cache info: needed for Core compatibility */
2333 switch (ECX) {
2334 case 0: /* L1 dcache info */
2335 EAX = 0x0000121;
2336 EBX = 0x1c0003f;
2337 ECX = 0x000003f;
2338 EDX = 0x0000001;
2339 break;
2340 case 1: /* L1 icache info */
2341 EAX = 0x0000122;
2342 EBX = 0x1c0003f;
2343 ECX = 0x000003f;
2344 EDX = 0x0000001;
2345 break;
2346 case 2: /* L2 cache info */
2347 EAX = 0x0000143;
2348 EBX = 0x3c0003f;
2349 ECX = 0x0000fff;
2350 EDX = 0x0000001;
2351 break;
2352 default: /* end of info */
2353 EAX = 0;
2354 EBX = 0;
2355 ECX = 0;
2356 EDX = 0;
2357 break;
2358 }
2359
2360 break;
2361 case 5:
2362 /* mwait info: needed for Core compatibility */
2363 EAX = 0; /* Smallest monitor-line size in bytes */
2364 EBX = 0; /* Largest monitor-line size in bytes */
2365 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2366 EDX = 0;
2367 break;
2368 case 6:
2369 /* Thermal and Power Leaf */
2370 EAX = 0;
2371 EBX = 0;
2372 ECX = 0;
2373 EDX = 0;
2374 break;
2375 case 9:
2376 /* Direct Cache Access Information Leaf */
2377 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2378 EBX = 0;
2379 ECX = 0;
2380 EDX = 0;
2381 break;
2382 case 0xA:
2383 /* Architectural Performance Monitoring Leaf */
2384 EAX = 0;
2385 EBX = 0;
2386 ECX = 0;
2387 EDX = 0;
2388 break;
2389 case 0x80000000:
2390 EAX = env->cpuid_xlevel;
2391 EBX = env->cpuid_vendor1;
2392 EDX = env->cpuid_vendor2;
2393 ECX = env->cpuid_vendor3;
2394 break;
2395 case 0x80000001:
2396 EAX = env->cpuid_features;
2397 EBX = 0;
2398 ECX = env->cpuid_ext3_features;
2399 EDX = env->cpuid_ext2_features;
2400 break;
2401 case 0x80000002:
2402 case 0x80000003:
2403 case 0x80000004:
2404 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2405 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2406 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2407 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2408 break;
2409 case 0x80000005:
2410 /* cache info (L1 cache) */
2411 EAX = 0x01ff01ff;
2412 EBX = 0x01ff01ff;
2413 ECX = 0x40020140;
2414 EDX = 0x40020140;
2415 break;
2416 case 0x80000006:
2417 /* cache info (L2 cache) */
2418 EAX = 0;
2419 EBX = 0x42004200;
2420 ECX = 0x02008140;
2421 EDX = 0;
2422 break;
2423 case 0x80000008:
2424 /* virtual & phys address size in low 2 bytes. */
2425/* XXX: This value must match the one used in the MMU code. */
2426 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2427 /* 64 bit processor */
2428#if defined(USE_KQEMU)
2429 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2430#else
2431/* XXX: The physical address space is limited to 42 bits in exec.c. */
2432 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2433#endif
2434 } else {
2435#if defined(USE_KQEMU)
2436 EAX = 0x00000020; /* 32 bits physical */
2437#else
2438 if (env->cpuid_features & CPUID_PSE36)
2439 EAX = 0x00000024; /* 36 bits physical */
2440 else
2441 EAX = 0x00000020; /* 32 bits physical */
2442#endif
2443 }
2444 EBX = 0;
2445 ECX = 0;
2446 EDX = 0;
2447 break;
2448 case 0x8000000A:
2449 EAX = 0x00000001;
2450 EBX = 0;
2451 ECX = 0;
2452 EDX = 0;
2453 break;
2454 default:
2455 /* reserved values: zero */
2456 EAX = 0;
2457 EBX = 0;
2458 ECX = 0;
2459 EDX = 0;
2460 break;
2461 }
2462#else /* VBOX */
2463 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2464#endif /* VBOX */
2465}
2466
2467void helper_enter_level(int level, int data32, target_ulong t1)
2468{
2469 target_ulong ssp;
2470 uint32_t esp_mask, esp, ebp;
2471
2472 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2473 ssp = env->segs[R_SS].base;
2474 ebp = EBP;
2475 esp = ESP;
2476 if (data32) {
2477 /* 32 bit */
2478 esp -= 4;
2479 while (--level) {
2480 esp -= 4;
2481 ebp -= 4;
2482 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2483 }
2484 esp -= 4;
2485 stl(ssp + (esp & esp_mask), t1);
2486 } else {
2487 /* 16 bit */
2488 esp -= 2;
2489 while (--level) {
2490 esp -= 2;
2491 ebp -= 2;
2492 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2493 }
2494 esp -= 2;
2495 stw(ssp + (esp & esp_mask), t1);
2496 }
2497}
2498
2499#ifdef TARGET_X86_64
2500void helper_enter64_level(int level, int data64, target_ulong t1)
2501{
2502 target_ulong esp, ebp;
2503 ebp = EBP;
2504 esp = ESP;
2505
2506 if (data64) {
2507 /* 64 bit */
2508 esp -= 8;
2509 while (--level) {
2510 esp -= 8;
2511 ebp -= 8;
2512 stq(esp, ldq(ebp));
2513 }
2514 esp -= 8;
2515 stq(esp, t1);
2516 } else {
2517 /* 16 bit */
2518 esp -= 2;
2519 while (--level) {
2520 esp -= 2;
2521 ebp -= 2;
2522 stw(esp, lduw(ebp));
2523 }
2524 esp -= 2;
2525 stw(esp, t1);
2526 }
2527}
2528#endif
2529
2530void helper_lldt(int selector)
2531{
2532 SegmentCache *dt;
2533 uint32_t e1, e2;
2534#ifndef VBOX
2535 int index, entry_limit;
2536#else
2537 unsigned int index, entry_limit;
2538#endif
2539 target_ulong ptr;
2540
2541#ifdef VBOX
2542 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2543 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2544#endif
2545
2546 selector &= 0xffff;
2547 if ((selector & 0xfffc) == 0) {
2548 /* XXX: NULL selector case: invalid LDT */
2549 env->ldt.base = 0;
2550 env->ldt.limit = 0;
2551 } else {
2552 if (selector & 0x4)
2553 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2554 dt = &env->gdt;
2555 index = selector & ~7;
2556#ifdef TARGET_X86_64
2557 if (env->hflags & HF_LMA_MASK)
2558 entry_limit = 15;
2559 else
2560#endif
2561 entry_limit = 7;
2562 if ((index + entry_limit) > dt->limit)
2563 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2564 ptr = dt->base + index;
2565 e1 = ldl_kernel(ptr);
2566 e2 = ldl_kernel(ptr + 4);
2567 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2568 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2569 if (!(e2 & DESC_P_MASK))
2570 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2571#ifdef TARGET_X86_64
2572 if (env->hflags & HF_LMA_MASK) {
2573 uint32_t e3;
2574 e3 = ldl_kernel(ptr + 8);
2575 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2576 env->ldt.base |= (target_ulong)e3 << 32;
2577 } else
2578#endif
2579 {
2580 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2581 }
2582 }
2583 env->ldt.selector = selector;
2584#ifdef VBOX
2585 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2586 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2587#endif
2588}
2589
2590void helper_ltr(int selector)
2591{
2592 SegmentCache *dt;
2593 uint32_t e1, e2;
2594#ifndef VBOX
2595 int index, type, entry_limit;
2596#else
2597 unsigned int index;
2598 int type, entry_limit;
2599#endif
2600 target_ulong ptr;
2601
2602#ifdef VBOX
2603 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2604 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2605 env->tr.flags, (RTSEL)(selector & 0xffff)));
2606#endif
2607 selector &= 0xffff;
2608 if ((selector & 0xfffc) == 0) {
2609 /* NULL selector case: invalid TR */
2610 env->tr.base = 0;
2611 env->tr.limit = 0;
2612 env->tr.flags = 0;
2613 } else {
2614 if (selector & 0x4)
2615 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2616 dt = &env->gdt;
2617 index = selector & ~7;
2618#ifdef TARGET_X86_64
2619 if (env->hflags & HF_LMA_MASK)
2620 entry_limit = 15;
2621 else
2622#endif
2623 entry_limit = 7;
2624 if ((index + entry_limit) > dt->limit)
2625 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2626 ptr = dt->base + index;
2627 e1 = ldl_kernel(ptr);
2628 e2 = ldl_kernel(ptr + 4);
2629 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2630 if ((e2 & DESC_S_MASK) ||
2631 (type != 1 && type != 9))
2632 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2633 if (!(e2 & DESC_P_MASK))
2634 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2635#ifdef TARGET_X86_64
2636 if (env->hflags & HF_LMA_MASK) {
2637 uint32_t e3, e4;
2638 e3 = ldl_kernel(ptr + 8);
2639 e4 = ldl_kernel(ptr + 12);
2640 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2641 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2642 load_seg_cache_raw_dt(&env->tr, e1, e2);
2643 env->tr.base |= (target_ulong)e3 << 32;
2644 } else
2645#endif
2646 {
2647 load_seg_cache_raw_dt(&env->tr, e1, e2);
2648 }
2649 e2 |= DESC_TSS_BUSY_MASK;
2650 stl_kernel(ptr + 4, e2);
2651 }
2652 env->tr.selector = selector;
2653#ifdef VBOX
2654 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2655 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2656 env->tr.flags, (RTSEL)(selector & 0xffff)));
2657#endif
2658}
2659
2660/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2661void helper_load_seg(int seg_reg, int selector)
2662{
2663 uint32_t e1, e2;
2664 int cpl, dpl, rpl;
2665 SegmentCache *dt;
2666#ifndef VBOX
2667 int index;
2668#else
2669 unsigned int index;
2670#endif
2671 target_ulong ptr;
2672
2673 selector &= 0xffff;
2674 cpl = env->hflags & HF_CPL_MASK;
2675
2676#ifdef VBOX
2677 /* Trying to load a selector with CPL=1? */
2678 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2679 {
2680 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2681 selector = selector & 0xfffc;
2682 }
2683#endif
2684 if ((selector & 0xfffc) == 0) {
2685 /* null selector case */
2686 if (seg_reg == R_SS
2687#ifdef TARGET_X86_64
2688 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2689#endif
2690 )
2691 raise_exception_err(EXCP0D_GPF, 0);
2692 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2693 } else {
2694
2695 if (selector & 0x4)
2696 dt = &env->ldt;
2697 else
2698 dt = &env->gdt;
2699 index = selector & ~7;
2700 if ((index + 7) > dt->limit)
2701 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2702 ptr = dt->base + index;
2703 e1 = ldl_kernel(ptr);
2704 e2 = ldl_kernel(ptr + 4);
2705
2706 if (!(e2 & DESC_S_MASK))
2707 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2708 rpl = selector & 3;
2709 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2710 if (seg_reg == R_SS) {
2711 /* must be writable segment */
2712 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2713 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2714 if (rpl != cpl || dpl != cpl)
2715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2716 } else {
2717 /* must be readable segment */
2718 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2719 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2720
2721 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2722 /* if not conforming code, test rights */
2723 if (dpl < cpl || dpl < rpl)
2724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2725 }
2726 }
2727
2728 if (!(e2 & DESC_P_MASK)) {
2729 if (seg_reg == R_SS)
2730 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2731 else
2732 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2733 }
2734
2735 /* set the access bit if not already set */
2736 if (!(e2 & DESC_A_MASK)) {
2737 e2 |= DESC_A_MASK;
2738 stl_kernel(ptr + 4, e2);
2739 }
2740
2741 cpu_x86_load_seg_cache(env, seg_reg, selector,
2742 get_seg_base(e1, e2),
2743 get_seg_limit(e1, e2),
2744 e2);
2745#if 0
2746 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2747 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2748#endif
2749 }
2750}
2751
2752/* protected mode jump */
2753void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2754 int next_eip_addend)
2755{
2756 int gate_cs, type;
2757 uint32_t e1, e2, cpl, dpl, rpl, limit;
2758 target_ulong next_eip;
2759
2760#ifdef VBOX
2761 e1 = e2 = 0;
2762#endif
2763 if ((new_cs & 0xfffc) == 0)
2764 raise_exception_err(EXCP0D_GPF, 0);
2765 if (load_segment(&e1, &e2, new_cs) != 0)
2766 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2767 cpl = env->hflags & HF_CPL_MASK;
2768 if (e2 & DESC_S_MASK) {
2769 if (!(e2 & DESC_CS_MASK))
2770 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2771 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2772 if (e2 & DESC_C_MASK) {
2773 /* conforming code segment */
2774 if (dpl > cpl)
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 } else {
2777 /* non conforming code segment */
2778 rpl = new_cs & 3;
2779 if (rpl > cpl)
2780 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2781 if (dpl != cpl)
2782 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2783 }
2784 if (!(e2 & DESC_P_MASK))
2785 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2786 limit = get_seg_limit(e1, e2);
2787 if (new_eip > limit &&
2788 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2789 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2790 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2791 get_seg_base(e1, e2), limit, e2);
2792 EIP = new_eip;
2793 } else {
2794 /* jump to call or task gate */
2795 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2796 rpl = new_cs & 3;
2797 cpl = env->hflags & HF_CPL_MASK;
2798 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2799 switch(type) {
2800 case 1: /* 286 TSS */
2801 case 9: /* 386 TSS */
2802 case 5: /* task gate */
2803 if (dpl < cpl || dpl < rpl)
2804 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2805 next_eip = env->eip + next_eip_addend;
2806 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2807 CC_OP = CC_OP_EFLAGS;
2808 break;
2809 case 4: /* 286 call gate */
2810 case 12: /* 386 call gate */
2811 if ((dpl < cpl) || (dpl < rpl))
2812 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2813 if (!(e2 & DESC_P_MASK))
2814 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2815 gate_cs = e1 >> 16;
2816 new_eip = (e1 & 0xffff);
2817 if (type == 12)
2818 new_eip |= (e2 & 0xffff0000);
2819 if (load_segment(&e1, &e2, gate_cs) != 0)
2820 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2821 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2822 /* must be code segment */
2823 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2824 (DESC_S_MASK | DESC_CS_MASK)))
2825 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2826 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2827 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2828 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2829 if (!(e2 & DESC_P_MASK))
2830#ifdef VBOX /* See page 3-514 of 253666.pdf */
2831 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2832#else
2833 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2834#endif
2835 limit = get_seg_limit(e1, e2);
2836 if (new_eip > limit)
2837 raise_exception_err(EXCP0D_GPF, 0);
2838 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2839 get_seg_base(e1, e2), limit, e2);
2840 EIP = new_eip;
2841 break;
2842 default:
2843 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2844 break;
2845 }
2846 }
2847}
2848
2849/* real mode call */
2850void helper_lcall_real(int new_cs, target_ulong new_eip1,
2851 int shift, int next_eip)
2852{
2853 int new_eip;
2854 uint32_t esp, esp_mask;
2855 target_ulong ssp;
2856
2857 new_eip = new_eip1;
2858 esp = ESP;
2859 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2860 ssp = env->segs[R_SS].base;
2861 if (shift) {
2862 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2863 PUSHL(ssp, esp, esp_mask, next_eip);
2864 } else {
2865 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2866 PUSHW(ssp, esp, esp_mask, next_eip);
2867 }
2868
2869 SET_ESP(esp, esp_mask);
2870 env->eip = new_eip;
2871 env->segs[R_CS].selector = new_cs;
2872 env->segs[R_CS].base = (new_cs << 4);
2873}
2874
2875/* protected mode call */
2876void helper_lcall_protected(int new_cs, target_ulong new_eip,
2877 int shift, int next_eip_addend)
2878{
2879 int new_stack, i;
2880 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2881 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2882 uint32_t val, limit, old_sp_mask;
2883 target_ulong ssp, old_ssp, next_eip;
2884
2885#ifdef VBOX
2886 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2887#endif
2888 next_eip = env->eip + next_eip_addend;
2889#ifdef DEBUG_PCALL
2890 if (loglevel & CPU_LOG_PCALL) {
2891 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2892 new_cs, (uint32_t)new_eip, shift);
2893 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2894 }
2895#endif
2896 if ((new_cs & 0xfffc) == 0)
2897 raise_exception_err(EXCP0D_GPF, 0);
2898 if (load_segment(&e1, &e2, new_cs) != 0)
2899 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2900 cpl = env->hflags & HF_CPL_MASK;
2901#ifdef DEBUG_PCALL
2902 if (loglevel & CPU_LOG_PCALL) {
2903 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2904 }
2905#endif
2906 if (e2 & DESC_S_MASK) {
2907 if (!(e2 & DESC_CS_MASK))
2908 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2909 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2910 if (e2 & DESC_C_MASK) {
2911 /* conforming code segment */
2912 if (dpl > cpl)
2913 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2914 } else {
2915 /* non conforming code segment */
2916 rpl = new_cs & 3;
2917 if (rpl > cpl)
2918 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2919 if (dpl != cpl)
2920 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2921 }
2922 if (!(e2 & DESC_P_MASK))
2923 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2924
2925#ifdef TARGET_X86_64
2926 /* XXX: check 16/32 bit cases in long mode */
2927 if (shift == 2) {
2928 target_ulong rsp;
2929 /* 64 bit case */
2930 rsp = ESP;
2931 PUSHQ(rsp, env->segs[R_CS].selector);
2932 PUSHQ(rsp, next_eip);
2933 /* from this point, not restartable */
2934 ESP = rsp;
2935 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2936 get_seg_base(e1, e2),
2937 get_seg_limit(e1, e2), e2);
2938 EIP = new_eip;
2939 } else
2940#endif
2941 {
2942 sp = ESP;
2943 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2944 ssp = env->segs[R_SS].base;
2945 if (shift) {
2946 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2947 PUSHL(ssp, sp, sp_mask, next_eip);
2948 } else {
2949 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2950 PUSHW(ssp, sp, sp_mask, next_eip);
2951 }
2952
2953 limit = get_seg_limit(e1, e2);
2954 if (new_eip > limit)
2955 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2956 /* from this point, not restartable */
2957 SET_ESP(sp, sp_mask);
2958 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2959 get_seg_base(e1, e2), limit, e2);
2960 EIP = new_eip;
2961 }
2962 } else {
2963 /* check gate type */
2964 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2965 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2966 rpl = new_cs & 3;
2967 switch(type) {
2968 case 1: /* available 286 TSS */
2969 case 9: /* available 386 TSS */
2970 case 5: /* task gate */
2971 if (dpl < cpl || dpl < rpl)
2972 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2973 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2974 CC_OP = CC_OP_EFLAGS;
2975 return;
2976 case 4: /* 286 call gate */
2977 case 12: /* 386 call gate */
2978 break;
2979 default:
2980 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2981 break;
2982 }
2983 shift = type >> 3;
2984
2985 if (dpl < cpl || dpl < rpl)
2986 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2987 /* check valid bit */
2988 if (!(e2 & DESC_P_MASK))
2989 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2990 selector = e1 >> 16;
2991 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2992 param_count = e2 & 0x1f;
2993 if ((selector & 0xfffc) == 0)
2994 raise_exception_err(EXCP0D_GPF, 0);
2995
2996 if (load_segment(&e1, &e2, selector) != 0)
2997 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2998 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3000 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3001 if (dpl > cpl)
3002 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3003 if (!(e2 & DESC_P_MASK))
3004 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3005
3006 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3007 /* to inner privilege */
3008 get_ss_esp_from_tss(&ss, &sp, dpl);
3009#ifdef DEBUG_PCALL
3010 if (loglevel & CPU_LOG_PCALL)
3011 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3012 ss, sp, param_count, ESP);
3013#endif
3014 if ((ss & 0xfffc) == 0)
3015 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3016 if ((ss & 3) != dpl)
3017 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3018 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3019 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3020 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3021 if (ss_dpl != dpl)
3022 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3023 if (!(ss_e2 & DESC_S_MASK) ||
3024 (ss_e2 & DESC_CS_MASK) ||
3025 !(ss_e2 & DESC_W_MASK))
3026 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3027 if (!(ss_e2 & DESC_P_MASK))
3028#ifdef VBOX /* See page 3-99 of 253666.pdf */
3029 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3030#else
3031 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3032#endif
3033
3034 // push_size = ((param_count * 2) + 8) << shift;
3035
3036 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3037 old_ssp = env->segs[R_SS].base;
3038
3039 sp_mask = get_sp_mask(ss_e2);
3040 ssp = get_seg_base(ss_e1, ss_e2);
3041 if (shift) {
3042 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3043 PUSHL(ssp, sp, sp_mask, ESP);
3044 for(i = param_count - 1; i >= 0; i--) {
3045 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3046 PUSHL(ssp, sp, sp_mask, val);
3047 }
3048 } else {
3049 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3050 PUSHW(ssp, sp, sp_mask, ESP);
3051 for(i = param_count - 1; i >= 0; i--) {
3052 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3053 PUSHW(ssp, sp, sp_mask, val);
3054 }
3055 }
3056 new_stack = 1;
3057 } else {
3058 /* to same privilege */
3059 sp = ESP;
3060 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3061 ssp = env->segs[R_SS].base;
3062 // push_size = (4 << shift);
3063 new_stack = 0;
3064 }
3065
3066 if (shift) {
3067 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3068 PUSHL(ssp, sp, sp_mask, next_eip);
3069 } else {
3070 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3071 PUSHW(ssp, sp, sp_mask, next_eip);
3072 }
3073
3074 /* from this point, not restartable */
3075
3076 if (new_stack) {
3077 ss = (ss & ~3) | dpl;
3078 cpu_x86_load_seg_cache(env, R_SS, ss,
3079 ssp,
3080 get_seg_limit(ss_e1, ss_e2),
3081 ss_e2);
3082 }
3083
3084 selector = (selector & ~3) | dpl;
3085 cpu_x86_load_seg_cache(env, R_CS, selector,
3086 get_seg_base(e1, e2),
3087 get_seg_limit(e1, e2),
3088 e2);
3089 cpu_x86_set_cpl(env, dpl);
3090 SET_ESP(sp, sp_mask);
3091 EIP = offset;
3092 }
3093#ifdef USE_KQEMU
3094 if (kqemu_is_ok(env)) {
3095 env->exception_index = -1;
3096 cpu_loop_exit();
3097 }
3098#endif
3099}
3100
3101/* real and vm86 mode iret */
3102void helper_iret_real(int shift)
3103{
3104 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3105 target_ulong ssp;
3106 int eflags_mask;
3107#ifdef VBOX
3108 bool fVME = false;
3109
3110 remR3TrapClear(env->pVM);
3111#endif /* VBOX */
3112
3113 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3114 sp = ESP;
3115 ssp = env->segs[R_SS].base;
3116 if (shift == 1) {
3117 /* 32 bits */
3118 POPL(ssp, sp, sp_mask, new_eip);
3119 POPL(ssp, sp, sp_mask, new_cs);
3120 new_cs &= 0xffff;
3121 POPL(ssp, sp, sp_mask, new_eflags);
3122 } else {
3123 /* 16 bits */
3124 POPW(ssp, sp, sp_mask, new_eip);
3125 POPW(ssp, sp, sp_mask, new_cs);
3126 POPW(ssp, sp, sp_mask, new_eflags);
3127 }
3128#ifdef VBOX
3129 if ( (env->eflags & VM_MASK)
3130 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3131 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3132 {
3133 fVME = true;
3134 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3135 /* if TF will be set -> #GP */
3136 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3137 || (new_eflags & TF_MASK))
3138 raise_exception(EXCP0D_GPF);
3139 }
3140#endif /* VBOX */
3141 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3142 env->segs[R_CS].selector = new_cs;
3143 env->segs[R_CS].base = (new_cs << 4);
3144 env->eip = new_eip;
3145#ifdef VBOX
3146 if (fVME)
3147 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3148 else
3149#endif
3150 if (env->eflags & VM_MASK)
3151 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3152 else
3153 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3154 if (shift == 0)
3155 eflags_mask &= 0xffff;
3156 load_eflags(new_eflags, eflags_mask);
3157 env->hflags2 &= ~HF2_NMI_MASK;
3158#ifdef VBOX
3159 if (fVME)
3160 {
3161 if (new_eflags & IF_MASK)
3162 env->eflags |= VIF_MASK;
3163 else
3164 env->eflags &= ~VIF_MASK;
3165 }
3166#endif /* VBOX */
3167}
3168
3169#ifndef VBOX
3170static inline void validate_seg(int seg_reg, int cpl)
3171#else /* VBOX */
3172DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3173#endif /* VBOX */
3174{
3175 int dpl;
3176 uint32_t e2;
3177
3178 /* XXX: on x86_64, we do not want to nullify FS and GS because
3179 they may still contain a valid base. I would be interested to
3180 know how a real x86_64 CPU behaves */
3181 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3182 (env->segs[seg_reg].selector & 0xfffc) == 0)
3183 return;
3184
3185 e2 = env->segs[seg_reg].flags;
3186 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3187 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3188 /* data or non conforming code segment */
3189 if (dpl < cpl) {
3190 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3191 }
3192 }
3193}
3194
3195/* protected mode iret */
3196#ifndef VBOX
3197static inline void helper_ret_protected(int shift, int is_iret, int addend)
3198#else /* VBOX */
3199DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3200#endif /* VBOX */
3201{
3202 uint32_t new_cs, new_eflags, new_ss;
3203 uint32_t new_es, new_ds, new_fs, new_gs;
3204 uint32_t e1, e2, ss_e1, ss_e2;
3205 int cpl, dpl, rpl, eflags_mask, iopl;
3206 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3207
3208#ifdef VBOX
3209 ss_e1 = ss_e2 = e1 = e2 = 0;
3210#endif
3211
3212#ifdef TARGET_X86_64
3213 if (shift == 2)
3214 sp_mask = -1;
3215 else
3216#endif
3217 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3218 sp = ESP;
3219 ssp = env->segs[R_SS].base;
3220 new_eflags = 0; /* avoid warning */
3221#ifdef TARGET_X86_64
3222 if (shift == 2) {
3223 POPQ(sp, new_eip);
3224 POPQ(sp, new_cs);
3225 new_cs &= 0xffff;
3226 if (is_iret) {
3227 POPQ(sp, new_eflags);
3228 }
3229 } else
3230#endif
3231 if (shift == 1) {
3232 /* 32 bits */
3233 POPL(ssp, sp, sp_mask, new_eip);
3234 POPL(ssp, sp, sp_mask, new_cs);
3235 new_cs &= 0xffff;
3236 if (is_iret) {
3237 POPL(ssp, sp, sp_mask, new_eflags);
3238#if defined(VBOX) && defined(DEBUG)
3239 printf("iret: new CS %04X\n", new_cs);
3240 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3241 printf("iret: new EFLAGS %08X\n", new_eflags);
3242 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3243#endif
3244 if (new_eflags & VM_MASK)
3245 goto return_to_vm86;
3246 }
3247#ifdef VBOX
3248 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3249 {
3250#ifdef DEBUG
3251 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3252#endif
3253 new_cs = new_cs & 0xfffc;
3254 }
3255#endif
3256 } else {
3257 /* 16 bits */
3258 POPW(ssp, sp, sp_mask, new_eip);
3259 POPW(ssp, sp, sp_mask, new_cs);
3260 if (is_iret)
3261 POPW(ssp, sp, sp_mask, new_eflags);
3262 }
3263#ifdef DEBUG_PCALL
3264 if (loglevel & CPU_LOG_PCALL) {
3265 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3266 new_cs, new_eip, shift, addend);
3267 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3268 }
3269#endif
3270 if ((new_cs & 0xfffc) == 0)
3271 {
3272#if defined(VBOX) && defined(DEBUG)
3273 printf("new_cs & 0xfffc) == 0\n");
3274#endif
3275 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3276 }
3277 if (load_segment(&e1, &e2, new_cs) != 0)
3278 {
3279#if defined(VBOX) && defined(DEBUG)
3280 printf("load_segment failed\n");
3281#endif
3282 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3283 }
3284 if (!(e2 & DESC_S_MASK) ||
3285 !(e2 & DESC_CS_MASK))
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 printf("e2 mask %08x\n", e2);
3289#endif
3290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3291 }
3292 cpl = env->hflags & HF_CPL_MASK;
3293 rpl = new_cs & 3;
3294 if (rpl < cpl)
3295 {
3296#if defined(VBOX) && defined(DEBUG)
3297 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3298#endif
3299 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3300 }
3301 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3302 if (e2 & DESC_C_MASK) {
3303 if (dpl > rpl)
3304 {
3305#if defined(VBOX) && defined(DEBUG)
3306 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3307#endif
3308 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3309 }
3310 } else {
3311 if (dpl != rpl)
3312 {
3313#if defined(VBOX) && defined(DEBUG)
3314 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3315#endif
3316 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3317 }
3318 }
3319 if (!(e2 & DESC_P_MASK))
3320 {
3321#if defined(VBOX) && defined(DEBUG)
3322 printf("DESC_P_MASK e2=%08x\n", e2);
3323#endif
3324 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3325 }
3326
3327 sp += addend;
3328 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3329 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3330 /* return to same privilege level */
3331 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3332 get_seg_base(e1, e2),
3333 get_seg_limit(e1, e2),
3334 e2);
3335 } else {
3336 /* return to different privilege level */
3337#ifdef TARGET_X86_64
3338 if (shift == 2) {
3339 POPQ(sp, new_esp);
3340 POPQ(sp, new_ss);
3341 new_ss &= 0xffff;
3342 } else
3343#endif
3344 if (shift == 1) {
3345 /* 32 bits */
3346 POPL(ssp, sp, sp_mask, new_esp);
3347 POPL(ssp, sp, sp_mask, new_ss);
3348 new_ss &= 0xffff;
3349 } else {
3350 /* 16 bits */
3351 POPW(ssp, sp, sp_mask, new_esp);
3352 POPW(ssp, sp, sp_mask, new_ss);
3353 }
3354#ifdef DEBUG_PCALL
3355 if (loglevel & CPU_LOG_PCALL) {
3356 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3357 new_ss, new_esp);
3358 }
3359#endif
3360 if ((new_ss & 0xfffc) == 0) {
3361#ifdef TARGET_X86_64
3362 /* NULL ss is allowed in long mode if cpl != 3*/
3363 /* XXX: test CS64 ? */
3364 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3365 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3366 0, 0xffffffff,
3367 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3368 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3369 DESC_W_MASK | DESC_A_MASK);
3370 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3371 } else
3372#endif
3373 {
3374 raise_exception_err(EXCP0D_GPF, 0);
3375 }
3376 } else {
3377 if ((new_ss & 3) != rpl)
3378 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3379 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3380 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3381 if (!(ss_e2 & DESC_S_MASK) ||
3382 (ss_e2 & DESC_CS_MASK) ||
3383 !(ss_e2 & DESC_W_MASK))
3384 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3385 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3386 if (dpl != rpl)
3387 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3388 if (!(ss_e2 & DESC_P_MASK))
3389 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3390 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3391 get_seg_base(ss_e1, ss_e2),
3392 get_seg_limit(ss_e1, ss_e2),
3393 ss_e2);
3394 }
3395
3396 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3397 get_seg_base(e1, e2),
3398 get_seg_limit(e1, e2),
3399 e2);
3400 cpu_x86_set_cpl(env, rpl);
3401 sp = new_esp;
3402#ifdef TARGET_X86_64
3403 if (env->hflags & HF_CS64_MASK)
3404 sp_mask = -1;
3405 else
3406#endif
3407 sp_mask = get_sp_mask(ss_e2);
3408
3409 /* validate data segments */
3410 validate_seg(R_ES, rpl);
3411 validate_seg(R_DS, rpl);
3412 validate_seg(R_FS, rpl);
3413 validate_seg(R_GS, rpl);
3414
3415 sp += addend;
3416 }
3417 SET_ESP(sp, sp_mask);
3418 env->eip = new_eip;
3419 if (is_iret) {
3420 /* NOTE: 'cpl' is the _old_ CPL */
3421 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3422 if (cpl == 0)
3423#ifdef VBOX
3424 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3425#else
3426 eflags_mask |= IOPL_MASK;
3427#endif
3428 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3429 if (cpl <= iopl)
3430 eflags_mask |= IF_MASK;
3431 if (shift == 0)
3432 eflags_mask &= 0xffff;
3433 load_eflags(new_eflags, eflags_mask);
3434 }
3435 return;
3436
3437 return_to_vm86:
3438 POPL(ssp, sp, sp_mask, new_esp);
3439 POPL(ssp, sp, sp_mask, new_ss);
3440 POPL(ssp, sp, sp_mask, new_es);
3441 POPL(ssp, sp, sp_mask, new_ds);
3442 POPL(ssp, sp, sp_mask, new_fs);
3443 POPL(ssp, sp, sp_mask, new_gs);
3444
3445 /* modify processor state */
3446 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3447 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3448 load_seg_vm(R_CS, new_cs & 0xffff);
3449 cpu_x86_set_cpl(env, 3);
3450 load_seg_vm(R_SS, new_ss & 0xffff);
3451 load_seg_vm(R_ES, new_es & 0xffff);
3452 load_seg_vm(R_DS, new_ds & 0xffff);
3453 load_seg_vm(R_FS, new_fs & 0xffff);
3454 load_seg_vm(R_GS, new_gs & 0xffff);
3455
3456 env->eip = new_eip & 0xffff;
3457 ESP = new_esp;
3458}
3459
3460void helper_iret_protected(int shift, int next_eip)
3461{
3462 int tss_selector, type;
3463 uint32_t e1, e2;
3464
3465#ifdef VBOX
3466 e1 = e2 = 0;
3467 remR3TrapClear(env->pVM);
3468#endif
3469
3470 /* specific case for TSS */
3471 if (env->eflags & NT_MASK) {
3472#ifdef TARGET_X86_64
3473 if (env->hflags & HF_LMA_MASK)
3474 raise_exception_err(EXCP0D_GPF, 0);
3475#endif
3476 tss_selector = lduw_kernel(env->tr.base + 0);
3477 if (tss_selector & 4)
3478 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3479 if (load_segment(&e1, &e2, tss_selector) != 0)
3480 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3481 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3482 /* NOTE: we check both segment and busy TSS */
3483 if (type != 3)
3484 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3485 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3486 } else {
3487 helper_ret_protected(shift, 1, 0);
3488 }
3489 env->hflags2 &= ~HF2_NMI_MASK;
3490#ifdef USE_KQEMU
3491 if (kqemu_is_ok(env)) {
3492 CC_OP = CC_OP_EFLAGS;
3493 env->exception_index = -1;
3494 cpu_loop_exit();
3495 }
3496#endif
3497}
3498
3499void helper_lret_protected(int shift, int addend)
3500{
3501 helper_ret_protected(shift, 0, addend);
3502#ifdef USE_KQEMU
3503 if (kqemu_is_ok(env)) {
3504 env->exception_index = -1;
3505 cpu_loop_exit();
3506 }
3507#endif
3508}
3509
3510void helper_sysenter(void)
3511{
3512 if (env->sysenter_cs == 0) {
3513 raise_exception_err(EXCP0D_GPF, 0);
3514 }
3515 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3516 cpu_x86_set_cpl(env, 0);
3517
3518#ifdef TARGET_X86_64
3519 if (env->hflags & HF_LMA_MASK) {
3520 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3521 0, 0xffffffff,
3522 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3523 DESC_S_MASK |
3524 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3525 } else
3526#endif
3527 {
3528 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3529 0, 0xffffffff,
3530 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3531 DESC_S_MASK |
3532 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3533 }
3534 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3535 0, 0xffffffff,
3536 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3537 DESC_S_MASK |
3538 DESC_W_MASK | DESC_A_MASK);
3539 ESP = env->sysenter_esp;
3540 EIP = env->sysenter_eip;
3541}
3542
3543void helper_sysexit(int dflag)
3544{
3545 int cpl;
3546
3547 cpl = env->hflags & HF_CPL_MASK;
3548 if (env->sysenter_cs == 0 || cpl != 0) {
3549 raise_exception_err(EXCP0D_GPF, 0);
3550 }
3551 cpu_x86_set_cpl(env, 3);
3552#ifdef TARGET_X86_64
3553 if (dflag == 2) {
3554 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3555 0, 0xffffffff,
3556 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3557 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3558 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3559 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3560 0, 0xffffffff,
3561 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3562 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3563 DESC_W_MASK | DESC_A_MASK);
3564 } else
3565#endif
3566 {
3567 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3568 0, 0xffffffff,
3569 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3570 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3571 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3572 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3573 0, 0xffffffff,
3574 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3575 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3576 DESC_W_MASK | DESC_A_MASK);
3577 }
3578 ESP = ECX;
3579 EIP = EDX;
3580#ifdef USE_KQEMU
3581 if (kqemu_is_ok(env)) {
3582 env->exception_index = -1;
3583 cpu_loop_exit();
3584 }
3585#endif
3586}
3587
3588#if defined(CONFIG_USER_ONLY)
3589target_ulong helper_read_crN(int reg)
3590{
3591 return 0;
3592}
3593
3594void helper_write_crN(int reg, target_ulong t0)
3595{
3596}
3597#else
3598target_ulong helper_read_crN(int reg)
3599{
3600 target_ulong val;
3601
3602 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3603 switch(reg) {
3604 default:
3605 val = env->cr[reg];
3606 break;
3607 case 8:
3608 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3609 val = cpu_get_apic_tpr(env);
3610 } else {
3611 val = env->v_tpr;
3612 }
3613 break;
3614 }
3615 return val;
3616}
3617
3618void helper_write_crN(int reg, target_ulong t0)
3619{
3620 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3621 switch(reg) {
3622 case 0:
3623 cpu_x86_update_cr0(env, t0);
3624 break;
3625 case 3:
3626 cpu_x86_update_cr3(env, t0);
3627 break;
3628 case 4:
3629 cpu_x86_update_cr4(env, t0);
3630 break;
3631 case 8:
3632 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3633 cpu_set_apic_tpr(env, t0);
3634 }
3635 env->v_tpr = t0 & 0x0f;
3636 break;
3637 default:
3638 env->cr[reg] = t0;
3639 break;
3640 }
3641}
3642#endif
3643
3644void helper_lmsw(target_ulong t0)
3645{
3646 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3647 if already set to one. */
3648 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3649 helper_write_crN(0, t0);
3650}
3651
3652void helper_clts(void)
3653{
3654 env->cr[0] &= ~CR0_TS_MASK;
3655 env->hflags &= ~HF_TS_MASK;
3656}
3657
3658/* XXX: do more */
3659void helper_movl_drN_T0(int reg, target_ulong t0)
3660{
3661 env->dr[reg] = t0;
3662}
3663
3664void helper_invlpg(target_ulong addr)
3665{
3666 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3667 tlb_flush_page(env, addr);
3668}
3669
3670void helper_rdtsc(void)
3671{
3672 uint64_t val;
3673
3674 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3675 raise_exception(EXCP0D_GPF);
3676 }
3677 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3678
3679 val = cpu_get_tsc(env) + env->tsc_offset;
3680 EAX = (uint32_t)(val);
3681 EDX = (uint32_t)(val >> 32);
3682}
3683
3684#ifdef VBOX
3685void helper_rdtscp(void)
3686{
3687 uint64_t val;
3688 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3689 raise_exception(EXCP0D_GPF);
3690 }
3691
3692 val = cpu_get_tsc(env);
3693 EAX = (uint32_t)(val);
3694 EDX = (uint32_t)(val >> 32);
3695 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3696}
3697#endif
3698
3699void helper_rdpmc(void)
3700{
3701#ifdef VBOX
3702 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3703 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3704 raise_exception(EXCP0D_GPF);
3705 }
3706 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3707 EAX = 0;
3708 EDX = 0;
3709#else
3710 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3711 raise_exception(EXCP0D_GPF);
3712 }
3713 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3714
3715 /* currently unimplemented */
3716 raise_exception_err(EXCP06_ILLOP, 0);
3717#endif
3718}
3719
3720#if defined(CONFIG_USER_ONLY)
3721void helper_wrmsr(void)
3722{
3723}
3724
3725void helper_rdmsr(void)
3726{
3727}
3728#else
3729void helper_wrmsr(void)
3730{
3731 uint64_t val;
3732
3733 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3734
3735 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3736
3737 switch((uint32_t)ECX) {
3738 case MSR_IA32_SYSENTER_CS:
3739 env->sysenter_cs = val & 0xffff;
3740 break;
3741 case MSR_IA32_SYSENTER_ESP:
3742 env->sysenter_esp = val;
3743 break;
3744 case MSR_IA32_SYSENTER_EIP:
3745 env->sysenter_eip = val;
3746 break;
3747 case MSR_IA32_APICBASE:
3748 cpu_set_apic_base(env, val);
3749 break;
3750 case MSR_EFER:
3751 {
3752 uint64_t update_mask;
3753 update_mask = 0;
3754 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3755 update_mask |= MSR_EFER_SCE;
3756 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3757 update_mask |= MSR_EFER_LME;
3758 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3759 update_mask |= MSR_EFER_FFXSR;
3760 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3761 update_mask |= MSR_EFER_NXE;
3762 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3763 update_mask |= MSR_EFER_SVME;
3764 cpu_load_efer(env, (env->efer & ~update_mask) |
3765 (val & update_mask));
3766 }
3767 break;
3768 case MSR_STAR:
3769 env->star = val;
3770 break;
3771 case MSR_PAT:
3772 env->pat = val;
3773 break;
3774 case MSR_VM_HSAVE_PA:
3775 env->vm_hsave = val;
3776 break;
3777#ifdef TARGET_X86_64
3778 case MSR_LSTAR:
3779 env->lstar = val;
3780 break;
3781 case MSR_CSTAR:
3782 env->cstar = val;
3783 break;
3784 case MSR_FMASK:
3785 env->fmask = val;
3786 break;
3787 case MSR_FSBASE:
3788 env->segs[R_FS].base = val;
3789 break;
3790 case MSR_GSBASE:
3791 env->segs[R_GS].base = val;
3792 break;
3793 case MSR_KERNELGSBASE:
3794 env->kernelgsbase = val;
3795 break;
3796#endif
3797 default:
3798#ifndef VBOX
3799 /* XXX: exception ? */
3800 break;
3801#else /* VBOX */
3802 {
3803 uint32_t ecx = (uint32_t)ECX;
3804 /* In X2APIC specification this range is reserved for APIC control. */
3805 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3806 cpu_apic_wrmsr(env, ecx, val);
3807 /** @todo else exception? */
3808 break;
3809 }
3810 case MSR_K8_TSC_AUX:
3811 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3812 break;
3813#endif /* VBOX */
3814 }
3815}
3816
3817void helper_rdmsr(void)
3818{
3819 uint64_t val;
3820
3821 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3822
3823 switch((uint32_t)ECX) {
3824 case MSR_IA32_SYSENTER_CS:
3825 val = env->sysenter_cs;
3826 break;
3827 case MSR_IA32_SYSENTER_ESP:
3828 val = env->sysenter_esp;
3829 break;
3830 case MSR_IA32_SYSENTER_EIP:
3831 val = env->sysenter_eip;
3832 break;
3833 case MSR_IA32_APICBASE:
3834 val = cpu_get_apic_base(env);
3835 break;
3836 case MSR_EFER:
3837 val = env->efer;
3838 break;
3839 case MSR_STAR:
3840 val = env->star;
3841 break;
3842 case MSR_PAT:
3843 val = env->pat;
3844 break;
3845 case MSR_VM_HSAVE_PA:
3846 val = env->vm_hsave;
3847 break;
3848 case MSR_IA32_PERF_STATUS:
3849 /* tsc_increment_by_tick */
3850 val = 1000ULL;
3851 /* CPU multiplier */
3852 val |= (((uint64_t)4ULL) << 40);
3853 break;
3854#ifdef TARGET_X86_64
3855 case MSR_LSTAR:
3856 val = env->lstar;
3857 break;
3858 case MSR_CSTAR:
3859 val = env->cstar;
3860 break;
3861 case MSR_FMASK:
3862 val = env->fmask;
3863 break;
3864 case MSR_FSBASE:
3865 val = env->segs[R_FS].base;
3866 break;
3867 case MSR_GSBASE:
3868 val = env->segs[R_GS].base;
3869 break;
3870 case MSR_KERNELGSBASE:
3871 val = env->kernelgsbase;
3872 break;
3873#endif
3874#ifdef USE_KQEMU
3875 case MSR_QPI_COMMBASE:
3876 if (env->kqemu_enabled) {
3877 val = kqemu_comm_base;
3878 } else {
3879 val = 0;
3880 }
3881 break;
3882#endif
3883 default:
3884#ifndef VBOX
3885 /* XXX: exception ? */
3886 val = 0;
3887 break;
3888#else /* VBOX */
3889 {
3890 uint32_t ecx = (uint32_t)ECX;
3891 /* In X2APIC specification this range is reserved for APIC control. */
3892 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3893 val = cpu_apic_rdmsr(env, ecx);
3894 else
3895 val = 0; /** @todo else exception? */
3896 break;
3897 }
3898 case MSR_IA32_TSC:
3899 case MSR_K8_TSC_AUX:
3900 val = cpu_rdmsr(env, (uint32_t)ECX);
3901 break;
3902#endif /* VBOX */
3903 }
3904 EAX = (uint32_t)(val);
3905 EDX = (uint32_t)(val >> 32);
3906}
3907#endif
3908
3909target_ulong helper_lsl(target_ulong selector1)
3910{
3911 unsigned int limit;
3912 uint32_t e1, e2, eflags, selector;
3913 int rpl, dpl, cpl, type;
3914
3915 selector = selector1 & 0xffff;
3916 eflags = cc_table[CC_OP].compute_all();
3917 if (load_segment(&e1, &e2, selector) != 0)
3918 goto fail;
3919 rpl = selector & 3;
3920 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3921 cpl = env->hflags & HF_CPL_MASK;
3922 if (e2 & DESC_S_MASK) {
3923 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3924 /* conforming */
3925 } else {
3926 if (dpl < cpl || dpl < rpl)
3927 goto fail;
3928 }
3929 } else {
3930 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3931 switch(type) {
3932 case 1:
3933 case 2:
3934 case 3:
3935 case 9:
3936 case 11:
3937 break;
3938 default:
3939 goto fail;
3940 }
3941 if (dpl < cpl || dpl < rpl) {
3942 fail:
3943 CC_SRC = eflags & ~CC_Z;
3944 return 0;
3945 }
3946 }
3947 limit = get_seg_limit(e1, e2);
3948 CC_SRC = eflags | CC_Z;
3949 return limit;
3950}
3951
3952target_ulong helper_lar(target_ulong selector1)
3953{
3954 uint32_t e1, e2, eflags, selector;
3955 int rpl, dpl, cpl, type;
3956
3957 selector = selector1 & 0xffff;
3958 eflags = cc_table[CC_OP].compute_all();
3959 if ((selector & 0xfffc) == 0)
3960 goto fail;
3961 if (load_segment(&e1, &e2, selector) != 0)
3962 goto fail;
3963 rpl = selector & 3;
3964 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3965 cpl = env->hflags & HF_CPL_MASK;
3966 if (e2 & DESC_S_MASK) {
3967 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3968 /* conforming */
3969 } else {
3970 if (dpl < cpl || dpl < rpl)
3971 goto fail;
3972 }
3973 } else {
3974 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3975 switch(type) {
3976 case 1:
3977 case 2:
3978 case 3:
3979 case 4:
3980 case 5:
3981 case 9:
3982 case 11:
3983 case 12:
3984 break;
3985 default:
3986 goto fail;
3987 }
3988 if (dpl < cpl || dpl < rpl) {
3989 fail:
3990 CC_SRC = eflags & ~CC_Z;
3991 return 0;
3992 }
3993 }
3994 CC_SRC = eflags | CC_Z;
3995 return e2 & 0x00f0ff00;
3996}
3997
3998void helper_verr(target_ulong selector1)
3999{
4000 uint32_t e1, e2, eflags, selector;
4001 int rpl, dpl, cpl;
4002
4003 selector = selector1 & 0xffff;
4004 eflags = cc_table[CC_OP].compute_all();
4005 if ((selector & 0xfffc) == 0)
4006 goto fail;
4007 if (load_segment(&e1, &e2, selector) != 0)
4008 goto fail;
4009 if (!(e2 & DESC_S_MASK))
4010 goto fail;
4011 rpl = selector & 3;
4012 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4013 cpl = env->hflags & HF_CPL_MASK;
4014 if (e2 & DESC_CS_MASK) {
4015 if (!(e2 & DESC_R_MASK))
4016 goto fail;
4017 if (!(e2 & DESC_C_MASK)) {
4018 if (dpl < cpl || dpl < rpl)
4019 goto fail;
4020 }
4021 } else {
4022 if (dpl < cpl || dpl < rpl) {
4023 fail:
4024 CC_SRC = eflags & ~CC_Z;
4025 return;
4026 }
4027 }
4028 CC_SRC = eflags | CC_Z;
4029}
4030
4031void helper_verw(target_ulong selector1)
4032{
4033 uint32_t e1, e2, eflags, selector;
4034 int rpl, dpl, cpl;
4035
4036 selector = selector1 & 0xffff;
4037 eflags = cc_table[CC_OP].compute_all();
4038 if ((selector & 0xfffc) == 0)
4039 goto fail;
4040 if (load_segment(&e1, &e2, selector) != 0)
4041 goto fail;
4042 if (!(e2 & DESC_S_MASK))
4043 goto fail;
4044 rpl = selector & 3;
4045 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4046 cpl = env->hflags & HF_CPL_MASK;
4047 if (e2 & DESC_CS_MASK) {
4048 goto fail;
4049 } else {
4050 if (dpl < cpl || dpl < rpl)
4051 goto fail;
4052 if (!(e2 & DESC_W_MASK)) {
4053 fail:
4054 CC_SRC = eflags & ~CC_Z;
4055 return;
4056 }
4057 }
4058 CC_SRC = eflags | CC_Z;
4059}
4060
4061/* x87 FPU helpers */
4062
4063static void fpu_set_exception(int mask)
4064{
4065 env->fpus |= mask;
4066 if (env->fpus & (~env->fpuc & FPUC_EM))
4067 env->fpus |= FPUS_SE | FPUS_B;
4068}
4069
4070#ifndef VBOX
4071static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4072#else /* VBOX */
4073DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4074#endif /* VBOX */
4075{
4076 if (b == 0.0)
4077 fpu_set_exception(FPUS_ZE);
4078 return a / b;
4079}
4080
4081void fpu_raise_exception(void)
4082{
4083 if (env->cr[0] & CR0_NE_MASK) {
4084 raise_exception(EXCP10_COPR);
4085 }
4086#if !defined(CONFIG_USER_ONLY)
4087 else {
4088 cpu_set_ferr(env);
4089 }
4090#endif
4091}
4092
4093void helper_flds_FT0(uint32_t val)
4094{
4095 union {
4096 float32 f;
4097 uint32_t i;
4098 } u;
4099 u.i = val;
4100 FT0 = float32_to_floatx(u.f, &env->fp_status);
4101}
4102
4103void helper_fldl_FT0(uint64_t val)
4104{
4105 union {
4106 float64 f;
4107 uint64_t i;
4108 } u;
4109 u.i = val;
4110 FT0 = float64_to_floatx(u.f, &env->fp_status);
4111}
4112
4113void helper_fildl_FT0(int32_t val)
4114{
4115 FT0 = int32_to_floatx(val, &env->fp_status);
4116}
4117
4118void helper_flds_ST0(uint32_t val)
4119{
4120 int new_fpstt;
4121 union {
4122 float32 f;
4123 uint32_t i;
4124 } u;
4125 new_fpstt = (env->fpstt - 1) & 7;
4126 u.i = val;
4127 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4128 env->fpstt = new_fpstt;
4129 env->fptags[new_fpstt] = 0; /* validate stack entry */
4130}
4131
4132void helper_fldl_ST0(uint64_t val)
4133{
4134 int new_fpstt;
4135 union {
4136 float64 f;
4137 uint64_t i;
4138 } u;
4139 new_fpstt = (env->fpstt - 1) & 7;
4140 u.i = val;
4141 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4142 env->fpstt = new_fpstt;
4143 env->fptags[new_fpstt] = 0; /* validate stack entry */
4144}
4145
4146void helper_fildl_ST0(int32_t val)
4147{
4148 int new_fpstt;
4149 new_fpstt = (env->fpstt - 1) & 7;
4150 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4151 env->fpstt = new_fpstt;
4152 env->fptags[new_fpstt] = 0; /* validate stack entry */
4153}
4154
4155void helper_fildll_ST0(int64_t val)
4156{
4157 int new_fpstt;
4158 new_fpstt = (env->fpstt - 1) & 7;
4159 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4160 env->fpstt = new_fpstt;
4161 env->fptags[new_fpstt] = 0; /* validate stack entry */
4162}
4163
4164#ifndef VBOX
4165uint32_t helper_fsts_ST0(void)
4166#else
4167RTCCUINTREG helper_fsts_ST0(void)
4168#endif
4169{
4170 union {
4171 float32 f;
4172 uint32_t i;
4173 } u;
4174 u.f = floatx_to_float32(ST0, &env->fp_status);
4175 return u.i;
4176}
4177
4178uint64_t helper_fstl_ST0(void)
4179{
4180 union {
4181 float64 f;
4182 uint64_t i;
4183 } u;
4184 u.f = floatx_to_float64(ST0, &env->fp_status);
4185 return u.i;
4186}
4187#ifndef VBOX
4188int32_t helper_fist_ST0(void)
4189#else
4190RTCCINTREG helper_fist_ST0(void)
4191#endif
4192{
4193 int32_t val;
4194 val = floatx_to_int32(ST0, &env->fp_status);
4195 if (val != (int16_t)val)
4196 val = -32768;
4197 return val;
4198}
4199
4200#ifndef VBOX
4201int32_t helper_fistl_ST0(void)
4202#else
4203RTCCINTREG helper_fistl_ST0(void)
4204#endif
4205{
4206 int32_t val;
4207 val = floatx_to_int32(ST0, &env->fp_status);
4208 return val;
4209}
4210
4211int64_t helper_fistll_ST0(void)
4212{
4213 int64_t val;
4214 val = floatx_to_int64(ST0, &env->fp_status);
4215 return val;
4216}
4217
4218#ifndef VBOX
4219int32_t helper_fistt_ST0(void)
4220#else
4221RTCCINTREG helper_fistt_ST0(void)
4222#endif
4223{
4224 int32_t val;
4225 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4226 if (val != (int16_t)val)
4227 val = -32768;
4228 return val;
4229}
4230
4231#ifndef VBOX
4232int32_t helper_fisttl_ST0(void)
4233#else
4234RTCCINTREG helper_fisttl_ST0(void)
4235#endif
4236{
4237 int32_t val;
4238 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4239 return val;
4240}
4241
4242int64_t helper_fisttll_ST0(void)
4243{
4244 int64_t val;
4245 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4246 return val;
4247}
4248
4249void helper_fldt_ST0(target_ulong ptr)
4250{
4251 int new_fpstt;
4252 new_fpstt = (env->fpstt - 1) & 7;
4253 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4254 env->fpstt = new_fpstt;
4255 env->fptags[new_fpstt] = 0; /* validate stack entry */
4256}
4257
4258void helper_fstt_ST0(target_ulong ptr)
4259{
4260 helper_fstt(ST0, ptr);
4261}
4262
4263void helper_fpush(void)
4264{
4265 fpush();
4266}
4267
4268void helper_fpop(void)
4269{
4270 fpop();
4271}
4272
4273void helper_fdecstp(void)
4274{
4275 env->fpstt = (env->fpstt - 1) & 7;
4276 env->fpus &= (~0x4700);
4277}
4278
4279void helper_fincstp(void)
4280{
4281 env->fpstt = (env->fpstt + 1) & 7;
4282 env->fpus &= (~0x4700);
4283}
4284
4285/* FPU move */
4286
4287void helper_ffree_STN(int st_index)
4288{
4289 env->fptags[(env->fpstt + st_index) & 7] = 1;
4290}
4291
4292void helper_fmov_ST0_FT0(void)
4293{
4294 ST0 = FT0;
4295}
4296
4297void helper_fmov_FT0_STN(int st_index)
4298{
4299 FT0 = ST(st_index);
4300}
4301
4302void helper_fmov_ST0_STN(int st_index)
4303{
4304 ST0 = ST(st_index);
4305}
4306
4307void helper_fmov_STN_ST0(int st_index)
4308{
4309 ST(st_index) = ST0;
4310}
4311
4312void helper_fxchg_ST0_STN(int st_index)
4313{
4314 CPU86_LDouble tmp;
4315 tmp = ST(st_index);
4316 ST(st_index) = ST0;
4317 ST0 = tmp;
4318}
4319
4320/* FPU operations */
4321
4322static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4323
4324void helper_fcom_ST0_FT0(void)
4325{
4326 int ret;
4327
4328 ret = floatx_compare(ST0, FT0, &env->fp_status);
4329 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4330 FORCE_RET();
4331}
4332
4333void helper_fucom_ST0_FT0(void)
4334{
4335 int ret;
4336
4337 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4338 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4339 FORCE_RET();
4340}
4341
4342static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4343
4344void helper_fcomi_ST0_FT0(void)
4345{
4346 int eflags;
4347 int ret;
4348
4349 ret = floatx_compare(ST0, FT0, &env->fp_status);
4350 eflags = cc_table[CC_OP].compute_all();
4351 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4352 CC_SRC = eflags;
4353 FORCE_RET();
4354}
4355
4356void helper_fucomi_ST0_FT0(void)
4357{
4358 int eflags;
4359 int ret;
4360
4361 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4362 eflags = cc_table[CC_OP].compute_all();
4363 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4364 CC_SRC = eflags;
4365 FORCE_RET();
4366}
4367
4368void helper_fadd_ST0_FT0(void)
4369{
4370 ST0 += FT0;
4371}
4372
4373void helper_fmul_ST0_FT0(void)
4374{
4375 ST0 *= FT0;
4376}
4377
4378void helper_fsub_ST0_FT0(void)
4379{
4380 ST0 -= FT0;
4381}
4382
4383void helper_fsubr_ST0_FT0(void)
4384{
4385 ST0 = FT0 - ST0;
4386}
4387
4388void helper_fdiv_ST0_FT0(void)
4389{
4390 ST0 = helper_fdiv(ST0, FT0);
4391}
4392
4393void helper_fdivr_ST0_FT0(void)
4394{
4395 ST0 = helper_fdiv(FT0, ST0);
4396}
4397
4398/* fp operations between STN and ST0 */
4399
4400void helper_fadd_STN_ST0(int st_index)
4401{
4402 ST(st_index) += ST0;
4403}
4404
4405void helper_fmul_STN_ST0(int st_index)
4406{
4407 ST(st_index) *= ST0;
4408}
4409
4410void helper_fsub_STN_ST0(int st_index)
4411{
4412 ST(st_index) -= ST0;
4413}
4414
4415void helper_fsubr_STN_ST0(int st_index)
4416{
4417 CPU86_LDouble *p;
4418 p = &ST(st_index);
4419 *p = ST0 - *p;
4420}
4421
4422void helper_fdiv_STN_ST0(int st_index)
4423{
4424 CPU86_LDouble *p;
4425 p = &ST(st_index);
4426 *p = helper_fdiv(*p, ST0);
4427}
4428
4429void helper_fdivr_STN_ST0(int st_index)
4430{
4431 CPU86_LDouble *p;
4432 p = &ST(st_index);
4433 *p = helper_fdiv(ST0, *p);
4434}
4435
4436/* misc FPU operations */
4437void helper_fchs_ST0(void)
4438{
4439 ST0 = floatx_chs(ST0);
4440}
4441
4442void helper_fabs_ST0(void)
4443{
4444 ST0 = floatx_abs(ST0);
4445}
4446
4447void helper_fld1_ST0(void)
4448{
4449 ST0 = f15rk[1];
4450}
4451
4452void helper_fldl2t_ST0(void)
4453{
4454 ST0 = f15rk[6];
4455}
4456
4457void helper_fldl2e_ST0(void)
4458{
4459 ST0 = f15rk[5];
4460}
4461
4462void helper_fldpi_ST0(void)
4463{
4464 ST0 = f15rk[2];
4465}
4466
4467void helper_fldlg2_ST0(void)
4468{
4469 ST0 = f15rk[3];
4470}
4471
4472void helper_fldln2_ST0(void)
4473{
4474 ST0 = f15rk[4];
4475}
4476
4477void helper_fldz_ST0(void)
4478{
4479 ST0 = f15rk[0];
4480}
4481
4482void helper_fldz_FT0(void)
4483{
4484 FT0 = f15rk[0];
4485}
4486
4487#ifndef VBOX
4488uint32_t helper_fnstsw(void)
4489#else
4490RTCCUINTREG helper_fnstsw(void)
4491#endif
4492{
4493 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4494}
4495
4496#ifndef VBOX
4497uint32_t helper_fnstcw(void)
4498#else
4499RTCCUINTREG helper_fnstcw(void)
4500#endif
4501{
4502 return env->fpuc;
4503}
4504
4505static void update_fp_status(void)
4506{
4507 int rnd_type;
4508
4509 /* set rounding mode */
4510 switch(env->fpuc & RC_MASK) {
4511 default:
4512 case RC_NEAR:
4513 rnd_type = float_round_nearest_even;
4514 break;
4515 case RC_DOWN:
4516 rnd_type = float_round_down;
4517 break;
4518 case RC_UP:
4519 rnd_type = float_round_up;
4520 break;
4521 case RC_CHOP:
4522 rnd_type = float_round_to_zero;
4523 break;
4524 }
4525 set_float_rounding_mode(rnd_type, &env->fp_status);
4526#ifdef FLOATX80
4527 switch((env->fpuc >> 8) & 3) {
4528 case 0:
4529 rnd_type = 32;
4530 break;
4531 case 2:
4532 rnd_type = 64;
4533 break;
4534 case 3:
4535 default:
4536 rnd_type = 80;
4537 break;
4538 }
4539 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4540#endif
4541}
4542
4543void helper_fldcw(uint32_t val)
4544{
4545 env->fpuc = val;
4546 update_fp_status();
4547}
4548
4549void helper_fclex(void)
4550{
4551 env->fpus &= 0x7f00;
4552}
4553
4554void helper_fwait(void)
4555{
4556 if (env->fpus & FPUS_SE)
4557 fpu_raise_exception();
4558 FORCE_RET();
4559}
4560
4561void helper_fninit(void)
4562{
4563 env->fpus = 0;
4564 env->fpstt = 0;
4565 env->fpuc = 0x37f;
4566 env->fptags[0] = 1;
4567 env->fptags[1] = 1;
4568 env->fptags[2] = 1;
4569 env->fptags[3] = 1;
4570 env->fptags[4] = 1;
4571 env->fptags[5] = 1;
4572 env->fptags[6] = 1;
4573 env->fptags[7] = 1;
4574}
4575
4576/* BCD ops */
4577
4578void helper_fbld_ST0(target_ulong ptr)
4579{
4580 CPU86_LDouble tmp;
4581 uint64_t val;
4582 unsigned int v;
4583 int i;
4584
4585 val = 0;
4586 for(i = 8; i >= 0; i--) {
4587 v = ldub(ptr + i);
4588 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4589 }
4590 tmp = val;
4591 if (ldub(ptr + 9) & 0x80)
4592 tmp = -tmp;
4593 fpush();
4594 ST0 = tmp;
4595}
4596
4597void helper_fbst_ST0(target_ulong ptr)
4598{
4599 int v;
4600 target_ulong mem_ref, mem_end;
4601 int64_t val;
4602
4603 val = floatx_to_int64(ST0, &env->fp_status);
4604 mem_ref = ptr;
4605 mem_end = mem_ref + 9;
4606 if (val < 0) {
4607 stb(mem_end, 0x80);
4608 val = -val;
4609 } else {
4610 stb(mem_end, 0x00);
4611 }
4612 while (mem_ref < mem_end) {
4613 if (val == 0)
4614 break;
4615 v = val % 100;
4616 val = val / 100;
4617 v = ((v / 10) << 4) | (v % 10);
4618 stb(mem_ref++, v);
4619 }
4620 while (mem_ref < mem_end) {
4621 stb(mem_ref++, 0);
4622 }
4623}
4624
4625void helper_f2xm1(void)
4626{
4627 ST0 = pow(2.0,ST0) - 1.0;
4628}
4629
4630void helper_fyl2x(void)
4631{
4632 CPU86_LDouble fptemp;
4633
4634 fptemp = ST0;
4635 if (fptemp>0.0){
4636 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4637 ST1 *= fptemp;
4638 fpop();
4639 } else {
4640 env->fpus &= (~0x4700);
4641 env->fpus |= 0x400;
4642 }
4643}
4644
4645void helper_fptan(void)
4646{
4647 CPU86_LDouble fptemp;
4648
4649 fptemp = ST0;
4650 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4651 env->fpus |= 0x400;
4652 } else {
4653 ST0 = tan(fptemp);
4654 fpush();
4655 ST0 = 1.0;
4656 env->fpus &= (~0x400); /* C2 <-- 0 */
4657 /* the above code is for |arg| < 2**52 only */
4658 }
4659}
4660
4661void helper_fpatan(void)
4662{
4663 CPU86_LDouble fptemp, fpsrcop;
4664
4665 fpsrcop = ST1;
4666 fptemp = ST0;
4667 ST1 = atan2(fpsrcop,fptemp);
4668 fpop();
4669}
4670
4671void helper_fxtract(void)
4672{
4673 CPU86_LDoubleU temp;
4674 unsigned int expdif;
4675
4676 temp.d = ST0;
4677 expdif = EXPD(temp) - EXPBIAS;
4678 /*DP exponent bias*/
4679 ST0 = expdif;
4680 fpush();
4681 BIASEXPONENT(temp);
4682 ST0 = temp.d;
4683}
4684
4685#ifdef VBOX
4686#ifdef _MSC_VER
4687/* MSC cannot divide by zero */
4688extern double _Nan;
4689#define NaN _Nan
4690#else
4691#define NaN (0.0 / 0.0)
4692#endif
4693#endif /* VBOX */
4694
4695void helper_fprem1(void)
4696{
4697 CPU86_LDouble dblq, fpsrcop, fptemp;
4698 CPU86_LDoubleU fpsrcop1, fptemp1;
4699 int expdif;
4700 signed long long int q;
4701
4702#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4703 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4704#else
4705 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4706#endif
4707 ST0 = 0.0 / 0.0; /* NaN */
4708 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4709 return;
4710 }
4711
4712 fpsrcop = ST0;
4713 fptemp = ST1;
4714 fpsrcop1.d = fpsrcop;
4715 fptemp1.d = fptemp;
4716 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4717
4718 if (expdif < 0) {
4719 /* optimisation? taken from the AMD docs */
4720 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4721 /* ST0 is unchanged */
4722 return;
4723 }
4724
4725 if (expdif < 53) {
4726 dblq = fpsrcop / fptemp;
4727 /* round dblq towards nearest integer */
4728 dblq = rint(dblq);
4729 ST0 = fpsrcop - fptemp * dblq;
4730
4731 /* convert dblq to q by truncating towards zero */
4732 if (dblq < 0.0)
4733 q = (signed long long int)(-dblq);
4734 else
4735 q = (signed long long int)dblq;
4736
4737 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4738 /* (C0,C3,C1) <-- (q2,q1,q0) */
4739 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4740 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4741 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4742 } else {
4743 env->fpus |= 0x400; /* C2 <-- 1 */
4744 fptemp = pow(2.0, expdif - 50);
4745 fpsrcop = (ST0 / ST1) / fptemp;
4746 /* fpsrcop = integer obtained by chopping */
4747 fpsrcop = (fpsrcop < 0.0) ?
4748 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4749 ST0 -= (ST1 * fpsrcop * fptemp);
4750 }
4751}
4752
4753void helper_fprem(void)
4754{
4755 CPU86_LDouble dblq, fpsrcop, fptemp;
4756 CPU86_LDoubleU fpsrcop1, fptemp1;
4757 int expdif;
4758 signed long long int q;
4759
4760#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4761 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4762#else
4763 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4764#endif
4765 ST0 = 0.0 / 0.0; /* NaN */
4766 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4767 return;
4768 }
4769
4770 fpsrcop = (CPU86_LDouble)ST0;
4771 fptemp = (CPU86_LDouble)ST1;
4772 fpsrcop1.d = fpsrcop;
4773 fptemp1.d = fptemp;
4774 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4775
4776 if (expdif < 0) {
4777 /* optimisation? taken from the AMD docs */
4778 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4779 /* ST0 is unchanged */
4780 return;
4781 }
4782
4783 if ( expdif < 53 ) {
4784 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4785 /* round dblq towards zero */
4786 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4787 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4788
4789 /* convert dblq to q by truncating towards zero */
4790 if (dblq < 0.0)
4791 q = (signed long long int)(-dblq);
4792 else
4793 q = (signed long long int)dblq;
4794
4795 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4796 /* (C0,C3,C1) <-- (q2,q1,q0) */
4797 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4798 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4799 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4800 } else {
4801 int N = 32 + (expdif % 32); /* as per AMD docs */
4802 env->fpus |= 0x400; /* C2 <-- 1 */
4803 fptemp = pow(2.0, (double)(expdif - N));
4804 fpsrcop = (ST0 / ST1) / fptemp;
4805 /* fpsrcop = integer obtained by chopping */
4806 fpsrcop = (fpsrcop < 0.0) ?
4807 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4808 ST0 -= (ST1 * fpsrcop * fptemp);
4809 }
4810}
4811
4812void helper_fyl2xp1(void)
4813{
4814 CPU86_LDouble fptemp;
4815
4816 fptemp = ST0;
4817 if ((fptemp+1.0)>0.0) {
4818 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4819 ST1 *= fptemp;
4820 fpop();
4821 } else {
4822 env->fpus &= (~0x4700);
4823 env->fpus |= 0x400;
4824 }
4825}
4826
4827void helper_fsqrt(void)
4828{
4829 CPU86_LDouble fptemp;
4830
4831 fptemp = ST0;
4832 if (fptemp<0.0) {
4833 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4834 env->fpus |= 0x400;
4835 }
4836 ST0 = sqrt(fptemp);
4837}
4838
4839void helper_fsincos(void)
4840{
4841 CPU86_LDouble fptemp;
4842
4843 fptemp = ST0;
4844 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4845 env->fpus |= 0x400;
4846 } else {
4847 ST0 = sin(fptemp);
4848 fpush();
4849 ST0 = cos(fptemp);
4850 env->fpus &= (~0x400); /* C2 <-- 0 */
4851 /* the above code is for |arg| < 2**63 only */
4852 }
4853}
4854
4855void helper_frndint(void)
4856{
4857 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4858}
4859
4860void helper_fscale(void)
4861{
4862 ST0 = ldexp (ST0, (int)(ST1));
4863}
4864
4865void helper_fsin(void)
4866{
4867 CPU86_LDouble fptemp;
4868
4869 fptemp = ST0;
4870 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4871 env->fpus |= 0x400;
4872 } else {
4873 ST0 = sin(fptemp);
4874 env->fpus &= (~0x400); /* C2 <-- 0 */
4875 /* the above code is for |arg| < 2**53 only */
4876 }
4877}
4878
4879void helper_fcos(void)
4880{
4881 CPU86_LDouble fptemp;
4882
4883 fptemp = ST0;
4884 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4885 env->fpus |= 0x400;
4886 } else {
4887 ST0 = cos(fptemp);
4888 env->fpus &= (~0x400); /* C2 <-- 0 */
4889 /* the above code is for |arg5 < 2**63 only */
4890 }
4891}
4892
4893void helper_fxam_ST0(void)
4894{
4895 CPU86_LDoubleU temp;
4896 int expdif;
4897
4898 temp.d = ST0;
4899
4900 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4901 if (SIGND(temp))
4902 env->fpus |= 0x200; /* C1 <-- 1 */
4903
4904 /* XXX: test fptags too */
4905 expdif = EXPD(temp);
4906 if (expdif == MAXEXPD) {
4907#ifdef USE_X86LDOUBLE
4908 if (MANTD(temp) == 0x8000000000000000ULL)
4909#else
4910 if (MANTD(temp) == 0)
4911#endif
4912 env->fpus |= 0x500 /*Infinity*/;
4913 else
4914 env->fpus |= 0x100 /*NaN*/;
4915 } else if (expdif == 0) {
4916 if (MANTD(temp) == 0)
4917 env->fpus |= 0x4000 /*Zero*/;
4918 else
4919 env->fpus |= 0x4400 /*Denormal*/;
4920 } else {
4921 env->fpus |= 0x400;
4922 }
4923}
4924
4925void helper_fstenv(target_ulong ptr, int data32)
4926{
4927 int fpus, fptag, exp, i;
4928 uint64_t mant;
4929 CPU86_LDoubleU tmp;
4930
4931 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4932 fptag = 0;
4933 for (i=7; i>=0; i--) {
4934 fptag <<= 2;
4935 if (env->fptags[i]) {
4936 fptag |= 3;
4937 } else {
4938 tmp.d = env->fpregs[i].d;
4939 exp = EXPD(tmp);
4940 mant = MANTD(tmp);
4941 if (exp == 0 && mant == 0) {
4942 /* zero */
4943 fptag |= 1;
4944 } else if (exp == 0 || exp == MAXEXPD
4945#ifdef USE_X86LDOUBLE
4946 || (mant & (1LL << 63)) == 0
4947#endif
4948 ) {
4949 /* NaNs, infinity, denormal */
4950 fptag |= 2;
4951 }
4952 }
4953 }
4954 if (data32) {
4955 /* 32 bit */
4956 stl(ptr, env->fpuc);
4957 stl(ptr + 4, fpus);
4958 stl(ptr + 8, fptag);
4959 stl(ptr + 12, 0); /* fpip */
4960 stl(ptr + 16, 0); /* fpcs */
4961 stl(ptr + 20, 0); /* fpoo */
4962 stl(ptr + 24, 0); /* fpos */
4963 } else {
4964 /* 16 bit */
4965 stw(ptr, env->fpuc);
4966 stw(ptr + 2, fpus);
4967 stw(ptr + 4, fptag);
4968 stw(ptr + 6, 0);
4969 stw(ptr + 8, 0);
4970 stw(ptr + 10, 0);
4971 stw(ptr + 12, 0);
4972 }
4973}
4974
4975void helper_fldenv(target_ulong ptr, int data32)
4976{
4977 int i, fpus, fptag;
4978
4979 if (data32) {
4980 env->fpuc = lduw(ptr);
4981 fpus = lduw(ptr + 4);
4982 fptag = lduw(ptr + 8);
4983 }
4984 else {
4985 env->fpuc = lduw(ptr);
4986 fpus = lduw(ptr + 2);
4987 fptag = lduw(ptr + 4);
4988 }
4989 env->fpstt = (fpus >> 11) & 7;
4990 env->fpus = fpus & ~0x3800;
4991 for(i = 0;i < 8; i++) {
4992 env->fptags[i] = ((fptag & 3) == 3);
4993 fptag >>= 2;
4994 }
4995}
4996
4997void helper_fsave(target_ulong ptr, int data32)
4998{
4999 CPU86_LDouble tmp;
5000 int i;
5001
5002 helper_fstenv(ptr, data32);
5003
5004 ptr += (14 << data32);
5005 for(i = 0;i < 8; i++) {
5006 tmp = ST(i);
5007 helper_fstt(tmp, ptr);
5008 ptr += 10;
5009 }
5010
5011 /* fninit */
5012 env->fpus = 0;
5013 env->fpstt = 0;
5014 env->fpuc = 0x37f;
5015 env->fptags[0] = 1;
5016 env->fptags[1] = 1;
5017 env->fptags[2] = 1;
5018 env->fptags[3] = 1;
5019 env->fptags[4] = 1;
5020 env->fptags[5] = 1;
5021 env->fptags[6] = 1;
5022 env->fptags[7] = 1;
5023}
5024
5025void helper_frstor(target_ulong ptr, int data32)
5026{
5027 CPU86_LDouble tmp;
5028 int i;
5029
5030 helper_fldenv(ptr, data32);
5031 ptr += (14 << data32);
5032
5033 for(i = 0;i < 8; i++) {
5034 tmp = helper_fldt(ptr);
5035 ST(i) = tmp;
5036 ptr += 10;
5037 }
5038}
5039
5040void helper_fxsave(target_ulong ptr, int data64)
5041{
5042 int fpus, fptag, i, nb_xmm_regs;
5043 CPU86_LDouble tmp;
5044 target_ulong addr;
5045
5046 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5047 fptag = 0;
5048 for(i = 0; i < 8; i++) {
5049 fptag |= (env->fptags[i] << i);
5050 }
5051 stw(ptr, env->fpuc);
5052 stw(ptr + 2, fpus);
5053 stw(ptr + 4, fptag ^ 0xff);
5054#ifdef TARGET_X86_64
5055 if (data64) {
5056 stq(ptr + 0x08, 0); /* rip */
5057 stq(ptr + 0x10, 0); /* rdp */
5058 } else
5059#endif
5060 {
5061 stl(ptr + 0x08, 0); /* eip */
5062 stl(ptr + 0x0c, 0); /* sel */
5063 stl(ptr + 0x10, 0); /* dp */
5064 stl(ptr + 0x14, 0); /* sel */
5065 }
5066
5067 addr = ptr + 0x20;
5068 for(i = 0;i < 8; i++) {
5069 tmp = ST(i);
5070 helper_fstt(tmp, addr);
5071 addr += 16;
5072 }
5073
5074 if (env->cr[4] & CR4_OSFXSR_MASK) {
5075 /* XXX: finish it */
5076 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5077 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5078 if (env->hflags & HF_CS64_MASK)
5079 nb_xmm_regs = 16;
5080 else
5081 nb_xmm_regs = 8;
5082 addr = ptr + 0xa0;
5083 for(i = 0; i < nb_xmm_regs; i++) {
5084 stq(addr, env->xmm_regs[i].XMM_Q(0));
5085 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5086 addr += 16;
5087 }
5088 }
5089}
5090
5091void helper_fxrstor(target_ulong ptr, int data64)
5092{
5093 int i, fpus, fptag, nb_xmm_regs;
5094 CPU86_LDouble tmp;
5095 target_ulong addr;
5096
5097 env->fpuc = lduw(ptr);
5098 fpus = lduw(ptr + 2);
5099 fptag = lduw(ptr + 4);
5100 env->fpstt = (fpus >> 11) & 7;
5101 env->fpus = fpus & ~0x3800;
5102 fptag ^= 0xff;
5103 for(i = 0;i < 8; i++) {
5104 env->fptags[i] = ((fptag >> i) & 1);
5105 }
5106
5107 addr = ptr + 0x20;
5108 for(i = 0;i < 8; i++) {
5109 tmp = helper_fldt(addr);
5110 ST(i) = tmp;
5111 addr += 16;
5112 }
5113
5114 if (env->cr[4] & CR4_OSFXSR_MASK) {
5115 /* XXX: finish it */
5116 env->mxcsr = ldl(ptr + 0x18);
5117 //ldl(ptr + 0x1c);
5118 if (env->hflags & HF_CS64_MASK)
5119 nb_xmm_regs = 16;
5120 else
5121 nb_xmm_regs = 8;
5122 addr = ptr + 0xa0;
5123 for(i = 0; i < nb_xmm_regs; i++) {
5124#if !defined(VBOX) || __GNUC__ < 4
5125 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5126 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5127#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5128# if 1
5129 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5130 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5131 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5132 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5133# else
5134 /* this works fine on Mac OS X, gcc 4.0.1 */
5135 uint64_t u64 = ldq(addr);
5136 env->xmm_regs[i].XMM_Q(0);
5137 u64 = ldq(addr + 4);
5138 env->xmm_regs[i].XMM_Q(1) = u64;
5139# endif
5140#endif
5141 addr += 16;
5142 }
5143 }
5144}
5145
5146#ifndef USE_X86LDOUBLE
5147
5148void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5149{
5150 CPU86_LDoubleU temp;
5151 int e;
5152
5153 temp.d = f;
5154 /* mantissa */
5155 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5156 /* exponent + sign */
5157 e = EXPD(temp) - EXPBIAS + 16383;
5158 e |= SIGND(temp) >> 16;
5159 *pexp = e;
5160}
5161
5162CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5163{
5164 CPU86_LDoubleU temp;
5165 int e;
5166 uint64_t ll;
5167
5168 /* XXX: handle overflow ? */
5169 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5170 e |= (upper >> 4) & 0x800; /* sign */
5171 ll = (mant >> 11) & ((1LL << 52) - 1);
5172#ifdef __arm__
5173 temp.l.upper = (e << 20) | (ll >> 32);
5174 temp.l.lower = ll;
5175#else
5176 temp.ll = ll | ((uint64_t)e << 52);
5177#endif
5178 return temp.d;
5179}
5180
5181#else
5182
5183void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5184{
5185 CPU86_LDoubleU temp;
5186
5187 temp.d = f;
5188 *pmant = temp.l.lower;
5189 *pexp = temp.l.upper;
5190}
5191
5192CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5193{
5194 CPU86_LDoubleU temp;
5195
5196 temp.l.upper = upper;
5197 temp.l.lower = mant;
5198 return temp.d;
5199}
5200#endif
5201
5202#ifdef TARGET_X86_64
5203
5204//#define DEBUG_MULDIV
5205
5206static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5207{
5208 *plow += a;
5209 /* carry test */
5210 if (*plow < a)
5211 (*phigh)++;
5212 *phigh += b;
5213}
5214
5215static void neg128(uint64_t *plow, uint64_t *phigh)
5216{
5217 *plow = ~ *plow;
5218 *phigh = ~ *phigh;
5219 add128(plow, phigh, 1, 0);
5220}
5221
5222/* return TRUE if overflow */
5223static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5224{
5225 uint64_t q, r, a1, a0;
5226 int i, qb, ab;
5227
5228 a0 = *plow;
5229 a1 = *phigh;
5230 if (a1 == 0) {
5231 q = a0 / b;
5232 r = a0 % b;
5233 *plow = q;
5234 *phigh = r;
5235 } else {
5236 if (a1 >= b)
5237 return 1;
5238 /* XXX: use a better algorithm */
5239 for(i = 0; i < 64; i++) {
5240 ab = a1 >> 63;
5241 a1 = (a1 << 1) | (a0 >> 63);
5242 if (ab || a1 >= b) {
5243 a1 -= b;
5244 qb = 1;
5245 } else {
5246 qb = 0;
5247 }
5248 a0 = (a0 << 1) | qb;
5249 }
5250#if defined(DEBUG_MULDIV)
5251 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5252 *phigh, *plow, b, a0, a1);
5253#endif
5254 *plow = a0;
5255 *phigh = a1;
5256 }
5257 return 0;
5258}
5259
5260/* return TRUE if overflow */
5261static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5262{
5263 int sa, sb;
5264 sa = ((int64_t)*phigh < 0);
5265 if (sa)
5266 neg128(plow, phigh);
5267 sb = (b < 0);
5268 if (sb)
5269 b = -b;
5270 if (div64(plow, phigh, b) != 0)
5271 return 1;
5272 if (sa ^ sb) {
5273 if (*plow > (1ULL << 63))
5274 return 1;
5275 *plow = - *plow;
5276 } else {
5277 if (*plow >= (1ULL << 63))
5278 return 1;
5279 }
5280 if (sa)
5281 *phigh = - *phigh;
5282 return 0;
5283}
5284
5285void helper_mulq_EAX_T0(target_ulong t0)
5286{
5287 uint64_t r0, r1;
5288
5289 mulu64(&r0, &r1, EAX, t0);
5290 EAX = r0;
5291 EDX = r1;
5292 CC_DST = r0;
5293 CC_SRC = r1;
5294}
5295
5296void helper_imulq_EAX_T0(target_ulong t0)
5297{
5298 uint64_t r0, r1;
5299
5300 muls64(&r0, &r1, EAX, t0);
5301 EAX = r0;
5302 EDX = r1;
5303 CC_DST = r0;
5304 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5305}
5306
5307target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5308{
5309 uint64_t r0, r1;
5310
5311 muls64(&r0, &r1, t0, t1);
5312 CC_DST = r0;
5313 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5314 return r0;
5315}
5316
5317void helper_divq_EAX(target_ulong t0)
5318{
5319 uint64_t r0, r1;
5320 if (t0 == 0) {
5321 raise_exception(EXCP00_DIVZ);
5322 }
5323 r0 = EAX;
5324 r1 = EDX;
5325 if (div64(&r0, &r1, t0))
5326 raise_exception(EXCP00_DIVZ);
5327 EAX = r0;
5328 EDX = r1;
5329}
5330
5331void helper_idivq_EAX(target_ulong t0)
5332{
5333 uint64_t r0, r1;
5334 if (t0 == 0) {
5335 raise_exception(EXCP00_DIVZ);
5336 }
5337 r0 = EAX;
5338 r1 = EDX;
5339 if (idiv64(&r0, &r1, t0))
5340 raise_exception(EXCP00_DIVZ);
5341 EAX = r0;
5342 EDX = r1;
5343}
5344#endif
5345
5346static void do_hlt(void)
5347{
5348 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5349 env->halted = 1;
5350 env->exception_index = EXCP_HLT;
5351 cpu_loop_exit();
5352}
5353
5354void helper_hlt(int next_eip_addend)
5355{
5356 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5357 EIP += next_eip_addend;
5358
5359 do_hlt();
5360}
5361
5362void helper_monitor(target_ulong ptr)
5363{
5364 if ((uint32_t)ECX != 0)
5365 raise_exception(EXCP0D_GPF);
5366 /* XXX: store address ? */
5367 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5368}
5369
5370void helper_mwait(int next_eip_addend)
5371{
5372 if ((uint32_t)ECX != 0)
5373 raise_exception(EXCP0D_GPF);
5374#ifdef VBOX
5375 helper_hlt(next_eip_addend);
5376#else
5377 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5378 EIP += next_eip_addend;
5379
5380 /* XXX: not complete but not completely erroneous */
5381 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5382 /* more than one CPU: do not sleep because another CPU may
5383 wake this one */
5384 } else {
5385 do_hlt();
5386 }
5387#endif
5388}
5389
5390void helper_debug(void)
5391{
5392 env->exception_index = EXCP_DEBUG;
5393 cpu_loop_exit();
5394}
5395
5396void helper_raise_interrupt(int intno, int next_eip_addend)
5397{
5398 raise_interrupt(intno, 1, 0, next_eip_addend);
5399}
5400
5401void helper_raise_exception(int exception_index)
5402{
5403 raise_exception(exception_index);
5404}
5405
5406void helper_cli(void)
5407{
5408 env->eflags &= ~IF_MASK;
5409}
5410
5411void helper_sti(void)
5412{
5413 env->eflags |= IF_MASK;
5414}
5415
5416#ifdef VBOX
5417void helper_cli_vme(void)
5418{
5419 env->eflags &= ~VIF_MASK;
5420}
5421
5422void helper_sti_vme(void)
5423{
5424 /* First check, then change eflags according to the AMD manual */
5425 if (env->eflags & VIP_MASK) {
5426 raise_exception(EXCP0D_GPF);
5427 }
5428 env->eflags |= VIF_MASK;
5429}
5430#endif
5431
5432#if 0
5433/* vm86plus instructions */
5434void helper_cli_vm(void)
5435{
5436 env->eflags &= ~VIF_MASK;
5437}
5438
5439void helper_sti_vm(void)
5440{
5441 env->eflags |= VIF_MASK;
5442 if (env->eflags & VIP_MASK) {
5443 raise_exception(EXCP0D_GPF);
5444 }
5445}
5446#endif
5447
5448void helper_set_inhibit_irq(void)
5449{
5450 env->hflags |= HF_INHIBIT_IRQ_MASK;
5451}
5452
5453void helper_reset_inhibit_irq(void)
5454{
5455 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5456}
5457
5458void helper_boundw(target_ulong a0, int v)
5459{
5460 int low, high;
5461 low = ldsw(a0);
5462 high = ldsw(a0 + 2);
5463 v = (int16_t)v;
5464 if (v < low || v > high) {
5465 raise_exception(EXCP05_BOUND);
5466 }
5467 FORCE_RET();
5468}
5469
5470void helper_boundl(target_ulong a0, int v)
5471{
5472 int low, high;
5473 low = ldl(a0);
5474 high = ldl(a0 + 4);
5475 if (v < low || v > high) {
5476 raise_exception(EXCP05_BOUND);
5477 }
5478 FORCE_RET();
5479}
5480
5481static float approx_rsqrt(float a)
5482{
5483 return 1.0 / sqrt(a);
5484}
5485
5486static float approx_rcp(float a)
5487{
5488 return 1.0 / a;
5489}
5490
5491#if !defined(CONFIG_USER_ONLY)
5492
5493#define MMUSUFFIX _mmu
5494
5495#define SHIFT 0
5496#include "softmmu_template.h"
5497
5498#define SHIFT 1
5499#include "softmmu_template.h"
5500
5501#define SHIFT 2
5502#include "softmmu_template.h"
5503
5504#define SHIFT 3
5505#include "softmmu_template.h"
5506
5507#endif
5508
5509#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5510/* This code assumes real physical address always fit into host CPU reg,
5511 which is wrong in general, but true for our current use cases. */
5512RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5513{
5514 return remR3PhysReadS8(addr);
5515}
5516RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5517{
5518 return remR3PhysReadU8(addr);
5519}
5520void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5521{
5522 remR3PhysWriteU8(addr, val);
5523}
5524RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5525{
5526 return remR3PhysReadS16(addr);
5527}
5528RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5529{
5530 return remR3PhysReadU16(addr);
5531}
5532void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5533{
5534 remR3PhysWriteU16(addr, val);
5535}
5536RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5537{
5538 return remR3PhysReadS32(addr);
5539}
5540RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5541{
5542 return remR3PhysReadU32(addr);
5543}
5544void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5545{
5546 remR3PhysWriteU32(addr, val);
5547}
5548uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5549{
5550 return remR3PhysReadU64(addr);
5551}
5552void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5553{
5554 remR3PhysWriteU64(addr, val);
5555}
5556#endif
5557
5558/* try to fill the TLB and return an exception if error. If retaddr is
5559 NULL, it means that the function was called in C code (i.e. not
5560 from generated code or from helper.c) */
5561/* XXX: fix it to restore all registers */
5562void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5563{
5564 TranslationBlock *tb;
5565 int ret;
5566 unsigned long pc;
5567 CPUX86State *saved_env;
5568
5569 /* XXX: hack to restore env in all cases, even if not called from
5570 generated code */
5571 saved_env = env;
5572 env = cpu_single_env;
5573
5574 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5575 if (ret) {
5576 if (retaddr) {
5577 /* now we have a real cpu fault */
5578 pc = (unsigned long)retaddr;
5579 tb = tb_find_pc(pc);
5580 if (tb) {
5581 /* the PC is inside the translated code. It means that we have
5582 a virtual CPU fault */
5583 cpu_restore_state(tb, env, pc, NULL);
5584 }
5585 }
5586 raise_exception_err(env->exception_index, env->error_code);
5587 }
5588 env = saved_env;
5589}
5590
5591#ifdef VBOX
5592
5593/**
5594 * Correctly computes the eflags.
5595 * @returns eflags.
5596 * @param env1 CPU environment.
5597 */
5598uint32_t raw_compute_eflags(CPUX86State *env1)
5599{
5600 CPUX86State *savedenv = env;
5601 uint32_t efl;
5602 env = env1;
5603 efl = compute_eflags();
5604 env = savedenv;
5605 return efl;
5606}
5607
5608/**
5609 * Reads byte from virtual address in guest memory area.
5610 * XXX: is it working for any addresses? swapped out pages?
5611 * @returns readed data byte.
5612 * @param env1 CPU environment.
5613 * @param pvAddr GC Virtual address.
5614 */
5615uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5616{
5617 CPUX86State *savedenv = env;
5618 uint8_t u8;
5619 env = env1;
5620 u8 = ldub_kernel(addr);
5621 env = savedenv;
5622 return u8;
5623}
5624
5625/**
5626 * Reads byte from virtual address in guest memory area.
5627 * XXX: is it working for any addresses? swapped out pages?
5628 * @returns readed data byte.
5629 * @param env1 CPU environment.
5630 * @param pvAddr GC Virtual address.
5631 */
5632uint16_t read_word(CPUX86State *env1, target_ulong addr)
5633{
5634 CPUX86State *savedenv = env;
5635 uint16_t u16;
5636 env = env1;
5637 u16 = lduw_kernel(addr);
5638 env = savedenv;
5639 return u16;
5640}
5641
5642/**
5643 * Reads byte from virtual address in guest memory area.
5644 * XXX: is it working for any addresses? swapped out pages?
5645 * @returns readed data byte.
5646 * @param env1 CPU environment.
5647 * @param pvAddr GC Virtual address.
5648 */
5649uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5650{
5651 CPUX86State *savedenv = env;
5652 uint32_t u32;
5653 env = env1;
5654 u32 = ldl_kernel(addr);
5655 env = savedenv;
5656 return u32;
5657}
5658
5659/**
5660 * Writes byte to virtual address in guest memory area.
5661 * XXX: is it working for any addresses? swapped out pages?
5662 * @returns readed data byte.
5663 * @param env1 CPU environment.
5664 * @param pvAddr GC Virtual address.
5665 * @param val byte value
5666 */
5667void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5668{
5669 CPUX86State *savedenv = env;
5670 env = env1;
5671 stb(addr, val);
5672 env = savedenv;
5673}
5674
5675void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5676{
5677 CPUX86State *savedenv = env;
5678 env = env1;
5679 stw(addr, val);
5680 env = savedenv;
5681}
5682
5683void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5684{
5685 CPUX86State *savedenv = env;
5686 env = env1;
5687 stl(addr, val);
5688 env = savedenv;
5689}
5690
5691/**
5692 * Correctly loads selector into segment register with updating internal
5693 * qemu data/caches.
5694 * @param env1 CPU environment.
5695 * @param seg_reg Segment register.
5696 * @param selector Selector to load.
5697 */
5698void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5699{
5700 CPUX86State *savedenv = env;
5701#ifdef FORCE_SEGMENT_SYNC
5702 jmp_buf old_buf;
5703#endif
5704
5705 env = env1;
5706
5707 if ( env->eflags & X86_EFL_VM
5708 || !(env->cr[0] & X86_CR0_PE))
5709 {
5710 load_seg_vm(seg_reg, selector);
5711
5712 env = savedenv;
5713
5714 /* Successful sync. */
5715 env1->segs[seg_reg].newselector = 0;
5716 }
5717 else
5718 {
5719 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5720 time critical - let's not do that */
5721#ifdef FORCE_SEGMENT_SYNC
5722 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5723#endif
5724 if (setjmp(env1->jmp_env) == 0)
5725 {
5726 if (seg_reg == R_CS)
5727 {
5728 uint32_t e1, e2;
5729 e1 = e2 = 0;
5730 load_segment(&e1, &e2, selector);
5731 cpu_x86_load_seg_cache(env, R_CS, selector,
5732 get_seg_base(e1, e2),
5733 get_seg_limit(e1, e2),
5734 e2);
5735 }
5736 else
5737 helper_load_seg(seg_reg, selector);
5738 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5739 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5740
5741 env = savedenv;
5742
5743 /* Successful sync. */
5744 env1->segs[seg_reg].newselector = 0;
5745 }
5746 else
5747 {
5748 env = savedenv;
5749
5750 /* Postpone sync until the guest uses the selector. */
5751 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5752 env1->segs[seg_reg].newselector = selector;
5753 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5754 env1->exception_index = -1;
5755 env1->error_code = 0;
5756 env1->old_exception = -1;
5757 }
5758#ifdef FORCE_SEGMENT_SYNC
5759 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5760#endif
5761 }
5762
5763}
5764
5765DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5766{
5767 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5768}
5769
5770
5771int emulate_single_instr(CPUX86State *env1)
5772{
5773 TranslationBlock *tb;
5774 TranslationBlock *current;
5775 int flags;
5776 uint8_t *tc_ptr;
5777 target_ulong old_eip;
5778
5779 /* ensures env is loaded! */
5780 CPUX86State *savedenv = env;
5781 env = env1;
5782
5783 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5784
5785 current = env->current_tb;
5786 env->current_tb = NULL;
5787 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5788
5789 /*
5790 * Translate only one instruction.
5791 */
5792 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5793 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5794 env->segs[R_CS].base, flags, 0);
5795
5796 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5797
5798
5799 /* tb_link_phys: */
5800 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5801 tb->jmp_next[0] = NULL;
5802 tb->jmp_next[1] = NULL;
5803 Assert(tb->jmp_next[0] == NULL);
5804 Assert(tb->jmp_next[1] == NULL);
5805 if (tb->tb_next_offset[0] != 0xffff)
5806 tb_reset_jump(tb, 0);
5807 if (tb->tb_next_offset[1] != 0xffff)
5808 tb_reset_jump(tb, 1);
5809
5810 /*
5811 * Execute it using emulation
5812 */
5813 old_eip = env->eip;
5814 env->current_tb = tb;
5815
5816 /*
5817 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5818 * perhaps not a very safe hack
5819 */
5820 while(old_eip == env->eip)
5821 {
5822 tc_ptr = tb->tc_ptr;
5823
5824#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5825 int fake_ret;
5826 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5827#else
5828 tcg_qemu_tb_exec(tc_ptr);
5829#endif
5830 /*
5831 * Exit once we detect an external interrupt and interrupts are enabled
5832 */
5833 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5834 ( (env->eflags & IF_MASK) &&
5835 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5836 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5837 {
5838 break;
5839 }
5840 }
5841 env->current_tb = current;
5842
5843 tb_phys_invalidate(tb, -1);
5844 tb_free(tb);
5845/*
5846 Assert(tb->tb_next_offset[0] == 0xffff);
5847 Assert(tb->tb_next_offset[1] == 0xffff);
5848 Assert(tb->tb_next[0] == 0xffff);
5849 Assert(tb->tb_next[1] == 0xffff);
5850 Assert(tb->jmp_next[0] == NULL);
5851 Assert(tb->jmp_next[1] == NULL);
5852 Assert(tb->jmp_first == NULL); */
5853
5854 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5855
5856 /*
5857 * Execute the next instruction when we encounter instruction fusing.
5858 */
5859 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5860 {
5861 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5862 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5863 emulate_single_instr(env);
5864 }
5865
5866 env = savedenv;
5867 return 0;
5868}
5869
5870/**
5871 * Correctly loads a new ldtr selector.
5872 *
5873 * @param env1 CPU environment.
5874 * @param selector Selector to load.
5875 */
5876void sync_ldtr(CPUX86State *env1, int selector)
5877{
5878 CPUX86State *saved_env = env;
5879 if (setjmp(env1->jmp_env) == 0)
5880 {
5881 env = env1;
5882 helper_lldt(selector);
5883 env = saved_env;
5884 }
5885 else
5886 {
5887 env = saved_env;
5888#ifdef VBOX_STRICT
5889 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5890#endif
5891 }
5892}
5893
5894int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5895 uint32_t *esp_ptr, int dpl)
5896{
5897 int type, index, shift;
5898
5899 CPUX86State *savedenv = env;
5900 env = env1;
5901
5902 if (!(env->tr.flags & DESC_P_MASK))
5903 cpu_abort(env, "invalid tss");
5904 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5905 if ((type & 7) != 1)
5906 cpu_abort(env, "invalid tss type %d", type);
5907 shift = type >> 3;
5908 index = (dpl * 4 + 2) << shift;
5909 if (index + (4 << shift) - 1 > env->tr.limit)
5910 {
5911 env = savedenv;
5912 return 0;
5913 }
5914 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5915
5916 if (shift == 0) {
5917 *esp_ptr = lduw_kernel(env->tr.base + index);
5918 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5919 } else {
5920 *esp_ptr = ldl_kernel(env->tr.base + index);
5921 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5922 }
5923
5924 env = savedenv;
5925 return 1;
5926}
5927
5928//*****************************************************************************
5929// Needs to be at the bottom of the file (overriding macros)
5930
5931#ifndef VBOX
5932static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5933#else /* VBOX */
5934DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5935#endif /* VBOX */
5936{
5937 return *(CPU86_LDouble *)ptr;
5938}
5939
5940#ifndef VBOX
5941static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5942#else /* VBOX */
5943DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5944#endif /* VBOX */
5945{
5946 *(CPU86_LDouble *)ptr = f;
5947}
5948
5949#undef stw
5950#undef stl
5951#undef stq
5952#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5953#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5954#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5955
5956//*****************************************************************************
5957void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5958{
5959 int fpus, fptag, i, nb_xmm_regs;
5960 CPU86_LDouble tmp;
5961 uint8_t *addr;
5962 int data64 = !!(env->hflags & HF_LMA_MASK);
5963
5964 if (env->cpuid_features & CPUID_FXSR)
5965 {
5966 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5967 fptag = 0;
5968 for(i = 0; i < 8; i++) {
5969 fptag |= (env->fptags[i] << i);
5970 }
5971 stw(ptr, env->fpuc);
5972 stw(ptr + 2, fpus);
5973 stw(ptr + 4, fptag ^ 0xff);
5974
5975 addr = ptr + 0x20;
5976 for(i = 0;i < 8; i++) {
5977 tmp = ST(i);
5978 helper_fstt_raw(tmp, addr);
5979 addr += 16;
5980 }
5981
5982 if (env->cr[4] & CR4_OSFXSR_MASK) {
5983 /* XXX: finish it */
5984 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5985 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5986 nb_xmm_regs = 8 << data64;
5987 addr = ptr + 0xa0;
5988 for(i = 0; i < nb_xmm_regs; i++) {
5989#if __GNUC__ < 4
5990 stq(addr, env->xmm_regs[i].XMM_Q(0));
5991 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5992#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5993 stl(addr, env->xmm_regs[i].XMM_L(0));
5994 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5995 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5996 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5997#endif
5998 addr += 16;
5999 }
6000 }
6001 }
6002 else
6003 {
6004 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6005 int fptag;
6006
6007 fp->FCW = env->fpuc;
6008 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6009 fptag = 0;
6010 for (i=7; i>=0; i--) {
6011 fptag <<= 2;
6012 if (env->fptags[i]) {
6013 fptag |= 3;
6014 } else {
6015 /* the FPU automatically computes it */
6016 }
6017 }
6018 fp->FTW = fptag;
6019
6020 for(i = 0;i < 8; i++) {
6021 tmp = ST(i);
6022 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6023 }
6024 }
6025}
6026
6027//*****************************************************************************
6028#undef lduw
6029#undef ldl
6030#undef ldq
6031#define lduw(a) *(uint16_t *)(a)
6032#define ldl(a) *(uint32_t *)(a)
6033#define ldq(a) *(uint64_t *)(a)
6034//*****************************************************************************
6035void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6036{
6037 int i, fpus, fptag, nb_xmm_regs;
6038 CPU86_LDouble tmp;
6039 uint8_t *addr;
6040 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6041
6042 if (env->cpuid_features & CPUID_FXSR)
6043 {
6044 env->fpuc = lduw(ptr);
6045 fpus = lduw(ptr + 2);
6046 fptag = lduw(ptr + 4);
6047 env->fpstt = (fpus >> 11) & 7;
6048 env->fpus = fpus & ~0x3800;
6049 fptag ^= 0xff;
6050 for(i = 0;i < 8; i++) {
6051 env->fptags[i] = ((fptag >> i) & 1);
6052 }
6053
6054 addr = ptr + 0x20;
6055 for(i = 0;i < 8; i++) {
6056 tmp = helper_fldt_raw(addr);
6057 ST(i) = tmp;
6058 addr += 16;
6059 }
6060
6061 if (env->cr[4] & CR4_OSFXSR_MASK) {
6062 /* XXX: finish it, endianness */
6063 env->mxcsr = ldl(ptr + 0x18);
6064 //ldl(ptr + 0x1c);
6065 nb_xmm_regs = 8 << data64;
6066 addr = ptr + 0xa0;
6067 for(i = 0; i < nb_xmm_regs; i++) {
6068#if HC_ARCH_BITS == 32
6069 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6070 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6071 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6072 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6073 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6074#else
6075 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6076 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6077#endif
6078 addr += 16;
6079 }
6080 }
6081 }
6082 else
6083 {
6084 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6085 int fptag, j;
6086
6087 env->fpuc = fp->FCW;
6088 env->fpstt = (fp->FSW >> 11) & 7;
6089 env->fpus = fp->FSW & ~0x3800;
6090 fptag = fp->FTW;
6091 for(i = 0;i < 8; i++) {
6092 env->fptags[i] = ((fptag & 3) == 3);
6093 fptag >>= 2;
6094 }
6095 j = env->fpstt;
6096 for(i = 0;i < 8; i++) {
6097 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6098 ST(i) = tmp;
6099 }
6100 }
6101}
6102//*****************************************************************************
6103//*****************************************************************************
6104
6105#endif /* VBOX */
6106
6107/* Secure Virtual Machine helpers */
6108
6109#if defined(CONFIG_USER_ONLY)
6110
6111void helper_vmrun(int aflag, int next_eip_addend)
6112{
6113}
6114void helper_vmmcall(void)
6115{
6116}
6117void helper_vmload(int aflag)
6118{
6119}
6120void helper_vmsave(int aflag)
6121{
6122}
6123void helper_stgi(void)
6124{
6125}
6126void helper_clgi(void)
6127{
6128}
6129void helper_skinit(void)
6130{
6131}
6132void helper_invlpga(int aflag)
6133{
6134}
6135void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6136{
6137}
6138void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6139{
6140}
6141
6142void helper_svm_check_io(uint32_t port, uint32_t param,
6143 uint32_t next_eip_addend)
6144{
6145}
6146#else
6147
6148#ifndef VBOX
6149static inline void svm_save_seg(target_phys_addr_t addr,
6150#else /* VBOX */
6151DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6152#endif /* VBOX */
6153 const SegmentCache *sc)
6154{
6155 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6156 sc->selector);
6157 stq_phys(addr + offsetof(struct vmcb_seg, base),
6158 sc->base);
6159 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6160 sc->limit);
6161 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6162 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6163}
6164
6165#ifndef VBOX
6166static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6167#else /* VBOX */
6168DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6169#endif /* VBOX */
6170{
6171 unsigned int flags;
6172
6173 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6174 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6175 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6176 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6177 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6178}
6179
6180#ifndef VBOX
6181static inline void svm_load_seg_cache(target_phys_addr_t addr,
6182#else /* VBOX */
6183DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6184#endif /* VBOX */
6185 CPUState *env, int seg_reg)
6186{
6187 SegmentCache sc1, *sc = &sc1;
6188 svm_load_seg(addr, sc);
6189 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6190 sc->base, sc->limit, sc->flags);
6191}
6192
6193void helper_vmrun(int aflag, int next_eip_addend)
6194{
6195 target_ulong addr;
6196 uint32_t event_inj;
6197 uint32_t int_ctl;
6198
6199 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6200
6201 if (aflag == 2)
6202 addr = EAX;
6203 else
6204 addr = (uint32_t)EAX;
6205
6206 if (loglevel & CPU_LOG_TB_IN_ASM)
6207 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6208
6209 env->vm_vmcb = addr;
6210
6211 /* save the current CPU state in the hsave page */
6212 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6213 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6214
6215 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6216 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6217
6218 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6219 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6220 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6221 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6222 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6223 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6224
6225 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6226 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6227
6228 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6229 &env->segs[R_ES]);
6230 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6231 &env->segs[R_CS]);
6232 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6233 &env->segs[R_SS]);
6234 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6235 &env->segs[R_DS]);
6236
6237 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6238 EIP + next_eip_addend);
6239 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6240 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6241
6242 /* load the interception bitmaps so we do not need to access the
6243 vmcb in svm mode */
6244 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6245 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6246 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6247 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6248 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6249 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6250
6251 /* enable intercepts */
6252 env->hflags |= HF_SVMI_MASK;
6253
6254 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6255
6256 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6257 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6258
6259 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6260 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6261
6262 /* clear exit_info_2 so we behave like the real hardware */
6263 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6264
6265 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6266 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6267 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6268 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6269 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6270 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6271 if (int_ctl & V_INTR_MASKING_MASK) {
6272 env->v_tpr = int_ctl & V_TPR_MASK;
6273 env->hflags2 |= HF2_VINTR_MASK;
6274 if (env->eflags & IF_MASK)
6275 env->hflags2 |= HF2_HIF_MASK;
6276 }
6277
6278 cpu_load_efer(env,
6279 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6280 env->eflags = 0;
6281 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6282 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6283 CC_OP = CC_OP_EFLAGS;
6284
6285 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6286 env, R_ES);
6287 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6288 env, R_CS);
6289 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6290 env, R_SS);
6291 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6292 env, R_DS);
6293
6294 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6295 env->eip = EIP;
6296 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6297 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6298 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6299 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6300 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6301
6302 /* FIXME: guest state consistency checks */
6303
6304 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6305 case TLB_CONTROL_DO_NOTHING:
6306 break;
6307 case TLB_CONTROL_FLUSH_ALL_ASID:
6308 /* FIXME: this is not 100% correct but should work for now */
6309 tlb_flush(env, 1);
6310 break;
6311 }
6312
6313 env->hflags2 |= HF2_GIF_MASK;
6314
6315 if (int_ctl & V_IRQ_MASK) {
6316 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6317 }
6318
6319 /* maybe we need to inject an event */
6320 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6321 if (event_inj & SVM_EVTINJ_VALID) {
6322 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6323 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6324 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6325 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6326
6327 if (loglevel & CPU_LOG_TB_IN_ASM)
6328 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6329 /* FIXME: need to implement valid_err */
6330 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6331 case SVM_EVTINJ_TYPE_INTR:
6332 env->exception_index = vector;
6333 env->error_code = event_inj_err;
6334 env->exception_is_int = 0;
6335 env->exception_next_eip = -1;
6336 if (loglevel & CPU_LOG_TB_IN_ASM)
6337 fprintf(logfile, "INTR");
6338 /* XXX: is it always correct ? */
6339 do_interrupt(vector, 0, 0, 0, 1);
6340 break;
6341 case SVM_EVTINJ_TYPE_NMI:
6342 env->exception_index = EXCP02_NMI;
6343 env->error_code = event_inj_err;
6344 env->exception_is_int = 0;
6345 env->exception_next_eip = EIP;
6346 if (loglevel & CPU_LOG_TB_IN_ASM)
6347 fprintf(logfile, "NMI");
6348 cpu_loop_exit();
6349 break;
6350 case SVM_EVTINJ_TYPE_EXEPT:
6351 env->exception_index = vector;
6352 env->error_code = event_inj_err;
6353 env->exception_is_int = 0;
6354 env->exception_next_eip = -1;
6355 if (loglevel & CPU_LOG_TB_IN_ASM)
6356 fprintf(logfile, "EXEPT");
6357 cpu_loop_exit();
6358 break;
6359 case SVM_EVTINJ_TYPE_SOFT:
6360 env->exception_index = vector;
6361 env->error_code = event_inj_err;
6362 env->exception_is_int = 1;
6363 env->exception_next_eip = EIP;
6364 if (loglevel & CPU_LOG_TB_IN_ASM)
6365 fprintf(logfile, "SOFT");
6366 cpu_loop_exit();
6367 break;
6368 }
6369 if (loglevel & CPU_LOG_TB_IN_ASM)
6370 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6371 }
6372}
6373
6374void helper_vmmcall(void)
6375{
6376 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6377 raise_exception(EXCP06_ILLOP);
6378}
6379
6380void helper_vmload(int aflag)
6381{
6382 target_ulong addr;
6383 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6384
6385 if (aflag == 2)
6386 addr = EAX;
6387 else
6388 addr = (uint32_t)EAX;
6389
6390 if (loglevel & CPU_LOG_TB_IN_ASM)
6391 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6392 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6393 env->segs[R_FS].base);
6394
6395 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6396 env, R_FS);
6397 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6398 env, R_GS);
6399 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6400 &env->tr);
6401 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6402 &env->ldt);
6403
6404#ifdef TARGET_X86_64
6405 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6406 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6407 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6408 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6409#endif
6410 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6411 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6412 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6413 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6414}
6415
6416void helper_vmsave(int aflag)
6417{
6418 target_ulong addr;
6419 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6420
6421 if (aflag == 2)
6422 addr = EAX;
6423 else
6424 addr = (uint32_t)EAX;
6425
6426 if (loglevel & CPU_LOG_TB_IN_ASM)
6427 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6428 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6429 env->segs[R_FS].base);
6430
6431 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6432 &env->segs[R_FS]);
6433 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6434 &env->segs[R_GS]);
6435 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6436 &env->tr);
6437 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6438 &env->ldt);
6439
6440#ifdef TARGET_X86_64
6441 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6442 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6443 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6444 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6445#endif
6446 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6447 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6448 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6449 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6450}
6451
6452void helper_stgi(void)
6453{
6454 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6455 env->hflags2 |= HF2_GIF_MASK;
6456}
6457
6458void helper_clgi(void)
6459{
6460 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6461 env->hflags2 &= ~HF2_GIF_MASK;
6462}
6463
6464void helper_skinit(void)
6465{
6466 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6467 /* XXX: not implemented */
6468 raise_exception(EXCP06_ILLOP);
6469}
6470
6471void helper_invlpga(int aflag)
6472{
6473 target_ulong addr;
6474 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6475
6476 if (aflag == 2)
6477 addr = EAX;
6478 else
6479 addr = (uint32_t)EAX;
6480
6481 /* XXX: could use the ASID to see if it is needed to do the
6482 flush */
6483 tlb_flush_page(env, addr);
6484}
6485
6486void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6487{
6488 if (likely(!(env->hflags & HF_SVMI_MASK)))
6489 return;
6490#ifndef VBOX
6491 switch(type) {
6492#ifndef VBOX
6493 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6494#else
6495 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6496 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6497 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6498#endif
6499 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6500 helper_vmexit(type, param);
6501 }
6502 break;
6503#ifndef VBOX
6504 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6505#else
6506 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6507 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6508 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6509#endif
6510 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6511 helper_vmexit(type, param);
6512 }
6513 break;
6514 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6515 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6516 helper_vmexit(type, param);
6517 }
6518 break;
6519 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6520 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6521 helper_vmexit(type, param);
6522 }
6523 break;
6524 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6525 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6526 helper_vmexit(type, param);
6527 }
6528 break;
6529 case SVM_EXIT_MSR:
6530 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6531 /* FIXME: this should be read in at vmrun (faster this way?) */
6532 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6533 uint32_t t0, t1;
6534 switch((uint32_t)ECX) {
6535 case 0 ... 0x1fff:
6536 t0 = (ECX * 2) % 8;
6537 t1 = ECX / 8;
6538 break;
6539 case 0xc0000000 ... 0xc0001fff:
6540 t0 = (8192 + ECX - 0xc0000000) * 2;
6541 t1 = (t0 / 8);
6542 t0 %= 8;
6543 break;
6544 case 0xc0010000 ... 0xc0011fff:
6545 t0 = (16384 + ECX - 0xc0010000) * 2;
6546 t1 = (t0 / 8);
6547 t0 %= 8;
6548 break;
6549 default:
6550 helper_vmexit(type, param);
6551 t0 = 0;
6552 t1 = 0;
6553 break;
6554 }
6555 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6556 helper_vmexit(type, param);
6557 }
6558 break;
6559 default:
6560 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6561 helper_vmexit(type, param);
6562 }
6563 break;
6564 }
6565#else
6566 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6567#endif
6568}
6569
6570void helper_svm_check_io(uint32_t port, uint32_t param,
6571 uint32_t next_eip_addend)
6572{
6573 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6574 /* FIXME: this should be read in at vmrun (faster this way?) */
6575 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6576 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6577 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6578 /* next EIP */
6579 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6580 env->eip + next_eip_addend);
6581 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6582 }
6583 }
6584}
6585
6586/* Note: currently only 32 bits of exit_code are used */
6587void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6588{
6589 uint32_t int_ctl;
6590
6591 if (loglevel & CPU_LOG_TB_IN_ASM)
6592 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6593 exit_code, exit_info_1,
6594 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6595 EIP);
6596
6597 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6598 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6599 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6600 } else {
6601 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6602 }
6603
6604 /* Save the VM state in the vmcb */
6605 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6606 &env->segs[R_ES]);
6607 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6608 &env->segs[R_CS]);
6609 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6610 &env->segs[R_SS]);
6611 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6612 &env->segs[R_DS]);
6613
6614 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6615 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6616
6617 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6618 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6619
6620 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6621 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6622 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6623 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6625
6626 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6627 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6628 int_ctl |= env->v_tpr & V_TPR_MASK;
6629 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6630 int_ctl |= V_IRQ_MASK;
6631 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6632
6633 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6637 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6639 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6640
6641 /* Reload the host state from vm_hsave */
6642 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6643 env->hflags &= ~HF_SVMI_MASK;
6644 env->intercept = 0;
6645 env->intercept_exceptions = 0;
6646 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6647 env->tsc_offset = 0;
6648
6649 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6650 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6651
6652 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6653 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6654
6655 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6656 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6657 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6658 /* we need to set the efer after the crs so the hidden flags get
6659 set properly */
6660 cpu_load_efer(env,
6661 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6662 env->eflags = 0;
6663 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6664 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6665 CC_OP = CC_OP_EFLAGS;
6666
6667 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6668 env, R_ES);
6669 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6670 env, R_CS);
6671 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6672 env, R_SS);
6673 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6674 env, R_DS);
6675
6676 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6677 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6678 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6679
6680 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6681 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6682
6683 /* other setups */
6684 cpu_x86_set_cpl(env, 0);
6685 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6686 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6687
6688 env->hflags2 &= ~HF2_GIF_MASK;
6689 /* FIXME: Resets the current ASID register to zero (host ASID). */
6690
6691 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6692
6693 /* Clears the TSC_OFFSET inside the processor. */
6694
6695 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6696 from the page table indicated the host's CR3. If the PDPEs contain
6697 illegal state, the processor causes a shutdown. */
6698
6699 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6700 env->cr[0] |= CR0_PE_MASK;
6701 env->eflags &= ~VM_MASK;
6702
6703 /* Disables all breakpoints in the host DR7 register. */
6704
6705 /* Checks the reloaded host state for consistency. */
6706
6707 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6708 host's code segment or non-canonical (in the case of long mode), a
6709 #GP fault is delivered inside the host.) */
6710
6711 /* remove any pending exception */
6712 env->exception_index = -1;
6713 env->error_code = 0;
6714 env->old_exception = -1;
6715
6716 cpu_loop_exit();
6717}
6718
6719#endif
6720
6721/* MMX/SSE */
6722/* XXX: optimize by storing fptt and fptags in the static cpu state */
6723void helper_enter_mmx(void)
6724{
6725 env->fpstt = 0;
6726 *(uint32_t *)(env->fptags) = 0;
6727 *(uint32_t *)(env->fptags + 4) = 0;
6728}
6729
6730void helper_emms(void)
6731{
6732 /* set to empty state */
6733 *(uint32_t *)(env->fptags) = 0x01010101;
6734 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6735}
6736
6737/* XXX: suppress */
6738void helper_movq(uint64_t *d, uint64_t *s)
6739{
6740 *d = *s;
6741}
6742
6743#define SHIFT 0
6744#include "ops_sse.h"
6745
6746#define SHIFT 1
6747#include "ops_sse.h"
6748
6749#define SHIFT 0
6750#include "helper_template.h"
6751#undef SHIFT
6752
6753#define SHIFT 1
6754#include "helper_template.h"
6755#undef SHIFT
6756
6757#define SHIFT 2
6758#include "helper_template.h"
6759#undef SHIFT
6760
6761#ifdef TARGET_X86_64
6762
6763#define SHIFT 3
6764#include "helper_template.h"
6765#undef SHIFT
6766
6767#endif
6768
6769/* bit operations */
6770target_ulong helper_bsf(target_ulong t0)
6771{
6772 int count;
6773 target_ulong res;
6774
6775 res = t0;
6776 count = 0;
6777 while ((res & 1) == 0) {
6778 count++;
6779 res >>= 1;
6780 }
6781 return count;
6782}
6783
6784target_ulong helper_bsr(target_ulong t0)
6785{
6786 int count;
6787 target_ulong res, mask;
6788
6789 res = t0;
6790 count = TARGET_LONG_BITS - 1;
6791 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6792 while ((res & mask) == 0) {
6793 count--;
6794 res <<= 1;
6795 }
6796 return count;
6797}
6798
6799
6800static int compute_all_eflags(void)
6801{
6802 return CC_SRC;
6803}
6804
6805static int compute_c_eflags(void)
6806{
6807 return CC_SRC & CC_C;
6808}
6809
6810#ifndef VBOX
6811CCTable cc_table[CC_OP_NB] = {
6812 [CC_OP_DYNAMIC] = { /* should never happen */ },
6813
6814 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6815
6816 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6817 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6818 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6819
6820 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6821 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6822 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6823
6824 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6825 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6826 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6827
6828 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6829 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6830 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6831
6832 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6833 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6834 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6835
6836 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6837 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6838 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6839
6840 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6841 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6842 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6843
6844 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6845 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6846 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6847
6848 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6849 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6850 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6851
6852 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6853 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6854 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6855
6856#ifdef TARGET_X86_64
6857 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6858
6859 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6860
6861 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6862
6863 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6864
6865 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6866
6867 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6868
6869 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6870
6871 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6872
6873 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6874
6875 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6876#endif
6877};
6878#else /* VBOX */
6879/* Sync carefully with cpu.h */
6880CCTable cc_table[CC_OP_NB] = {
6881 /* CC_OP_DYNAMIC */ { 0, 0 },
6882
6883 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6884
6885 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6886 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6887 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6888#ifdef TARGET_X86_64
6889 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6890#else
6891 /* CC_OP_MULQ */ { 0, 0 },
6892#endif
6893
6894 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6895 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6896 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6897#ifdef TARGET_X86_64
6898 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6899#else
6900 /* CC_OP_ADDQ */ { 0, 0 },
6901#endif
6902
6903 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6904 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6905 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6906#ifdef TARGET_X86_64
6907 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6908#else
6909 /* CC_OP_ADCQ */ { 0, 0 },
6910#endif
6911
6912 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6913 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6914 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6915#ifdef TARGET_X86_64
6916 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6917#else
6918 /* CC_OP_SUBQ */ { 0, 0 },
6919#endif
6920
6921 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6922 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6923 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6924#ifdef TARGET_X86_64
6925 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6926#else
6927 /* CC_OP_SBBQ */ { 0, 0 },
6928#endif
6929
6930 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6931 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6932 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6933#ifdef TARGET_X86_64
6934 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6935#else
6936 /* CC_OP_LOGICQ */ { 0, 0 },
6937#endif
6938
6939 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6940 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6941 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6942#ifdef TARGET_X86_64
6943 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6944#else
6945 /* CC_OP_INCQ */ { 0, 0 },
6946#endif
6947
6948 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6949 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6950 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6951#ifdef TARGET_X86_64
6952 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6953#else
6954 /* CC_OP_DECQ */ { 0, 0 },
6955#endif
6956
6957 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6958 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6959 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6960#ifdef TARGET_X86_64
6961 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6962#else
6963 /* CC_OP_SHLQ */ { 0, 0 },
6964#endif
6965
6966 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6967 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6968 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6969#ifdef TARGET_X86_64
6970 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6971#else
6972 /* CC_OP_SARQ */ { 0, 0 },
6973#endif
6974};
6975#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette