VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 30361

Last change on this file since 30361 was 29333, checked in by vboxsync, 14 years ago

removed VBOX_WITH_VMI

  • Property svn:eol-style set to native
File size: 195.0 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34#include "qemu-common.h"
35#include <math.h>
36#include "tcg.h"
37#endif
38//#define DEBUG_PCALL
39
40#if 0
41#define raise_exception_err(a, b)\
42do {\
43 if (logfile)\
44 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
45 (raise_exception_err)(a, b);\
46} while (0)
47#endif
48
49const uint8_t parity_table[256] = {
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82};
83
84/* modulo 17 table */
85const uint8_t rclw_table[32] = {
86 0, 1, 2, 3, 4, 5, 6, 7,
87 8, 9,10,11,12,13,14,15,
88 16, 0, 1, 2, 3, 4, 5, 6,
89 7, 8, 9,10,11,12,13,14,
90};
91
92/* modulo 9 table */
93const uint8_t rclb_table[32] = {
94 0, 1, 2, 3, 4, 5, 6, 7,
95 8, 0, 1, 2, 3, 4, 5, 6,
96 7, 8, 0, 1, 2, 3, 4, 5,
97 6, 7, 8, 0, 1, 2, 3, 4,
98};
99
100const CPU86_LDouble f15rk[7] =
101{
102 0.00000000000000000000L,
103 1.00000000000000000000L,
104 3.14159265358979323851L, /*pi*/
105 0.30102999566398119523L, /*lg2*/
106 0.69314718055994530943L, /*ln2*/
107 1.44269504088896340739L, /*l2e*/
108 3.32192809488736234781L, /*l2t*/
109};
110
111/* broken thread support */
112
113spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
114
115void helper_lock(void)
116{
117 spin_lock(&global_cpu_lock);
118}
119
120void helper_unlock(void)
121{
122 spin_unlock(&global_cpu_lock);
123}
124
125void helper_write_eflags(target_ulong t0, uint32_t update_mask)
126{
127 load_eflags(t0, update_mask);
128}
129
130target_ulong helper_read_eflags(void)
131{
132 uint32_t eflags;
133 eflags = cc_table[CC_OP].compute_all();
134 eflags |= (DF & DF_MASK);
135 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
136 return eflags;
137}
138
139#ifdef VBOX
140void helper_write_eflags_vme(target_ulong t0)
141{
142 unsigned int new_eflags = t0;
143
144 assert(env->eflags & (1<<VM_SHIFT));
145
146 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
147 /* if TF will be set -> #GP */
148 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
149 || (new_eflags & TF_MASK)) {
150 raise_exception(EXCP0D_GPF);
151 } else {
152 load_eflags(new_eflags,
153 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
154
155 if (new_eflags & IF_MASK) {
156 env->eflags |= VIF_MASK;
157 } else {
158 env->eflags &= ~VIF_MASK;
159 }
160 }
161}
162
163target_ulong helper_read_eflags_vme(void)
164{
165 uint32_t eflags;
166 eflags = cc_table[CC_OP].compute_all();
167 eflags |= (DF & DF_MASK);
168 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
169 if (env->eflags & VIF_MASK)
170 eflags |= IF_MASK;
171 else
172 eflags &= ~IF_MASK;
173
174 /* According to AMD manual, should be read with IOPL == 3 */
175 eflags |= (3 << IOPL_SHIFT);
176
177 /* We only use helper_read_eflags_vme() in 16-bits mode */
178 return eflags & 0xffff;
179}
180
181void helper_dump_state()
182{
183 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
184 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
185 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
186 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
187 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
188 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
189 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
190}
191#endif
192
193/* return non zero if error */
194#ifndef VBOX
195static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
196#else /* VBOX */
197DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
198#endif /* VBOX */
199 int selector)
200{
201 SegmentCache *dt;
202 int index;
203 target_ulong ptr;
204
205#ifdef VBOX
206 /* Trying to load a selector with CPL=1? */
207 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
208 {
209 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
210 selector = selector & 0xfffc;
211 }
212#endif
213
214 if (selector & 0x4)
215 dt = &env->ldt;
216 else
217 dt = &env->gdt;
218 index = selector & ~7;
219 if ((index + 7) > dt->limit)
220 return -1;
221 ptr = dt->base + index;
222 *e1_ptr = ldl_kernel(ptr);
223 *e2_ptr = ldl_kernel(ptr + 4);
224 return 0;
225}
226
227#ifndef VBOX
228static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
229#else /* VBOX */
230DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
231#endif /* VBOX */
232{
233 unsigned int limit;
234 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
235 if (e2 & DESC_G_MASK)
236 limit = (limit << 12) | 0xfff;
237 return limit;
238}
239
240#ifndef VBOX
241static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
242#else /* VBOX */
243DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
244#endif /* VBOX */
245{
246 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
247}
248
249#ifndef VBOX
250static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
251#else /* VBOX */
252DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
253#endif /* VBOX */
254{
255 sc->base = get_seg_base(e1, e2);
256 sc->limit = get_seg_limit(e1, e2);
257 sc->flags = e2;
258}
259
260/* init the segment cache in vm86 mode. */
261#ifndef VBOX
262static inline void load_seg_vm(int seg, int selector)
263#else /* VBOX */
264DECLINLINE(void) load_seg_vm(int seg, int selector)
265#endif /* VBOX */
266{
267 selector &= 0xffff;
268#ifdef VBOX
269 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
270 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
271 flags |= (3 << DESC_DPL_SHIFT);
272
273 cpu_x86_load_seg_cache(env, seg, selector,
274 (selector << 4), 0xffff, flags);
275#else
276 cpu_x86_load_seg_cache(env, seg, selector,
277 (selector << 4), 0xffff, 0);
278#endif
279}
280
281#ifndef VBOX
282static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
283#else /* VBOX */
284DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
285#endif /* VBOX */
286 uint32_t *esp_ptr, int dpl)
287{
288#ifndef VBOX
289 int type, index, shift;
290#else
291 unsigned int type, index, shift;
292#endif
293
294#if 0
295 {
296 int i;
297 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
298 for(i=0;i<env->tr.limit;i++) {
299 printf("%02x ", env->tr.base[i]);
300 if ((i & 7) == 7) printf("\n");
301 }
302 printf("\n");
303 }
304#endif
305
306 if (!(env->tr.flags & DESC_P_MASK))
307 cpu_abort(env, "invalid tss");
308 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
309 if ((type & 7) != 1)
310 cpu_abort(env, "invalid tss type");
311 shift = type >> 3;
312 index = (dpl * 4 + 2) << shift;
313 if (index + (4 << shift) - 1 > env->tr.limit)
314 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
315 if (shift == 0) {
316 *esp_ptr = lduw_kernel(env->tr.base + index);
317 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
318 } else {
319 *esp_ptr = ldl_kernel(env->tr.base + index);
320 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
321 }
322}
323
324/* XXX: merge with load_seg() */
325static void tss_load_seg(int seg_reg, int selector)
326{
327 uint32_t e1, e2;
328 int rpl, dpl, cpl;
329
330#ifdef VBOX
331 e1 = e2 = 0;
332 cpl = env->hflags & HF_CPL_MASK;
333 /* Trying to load a selector with CPL=1? */
334 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
335 {
336 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
337 selector = selector & 0xfffc;
338 }
339#endif
340
341 if ((selector & 0xfffc) != 0) {
342 if (load_segment(&e1, &e2, selector) != 0)
343 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
344 if (!(e2 & DESC_S_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 rpl = selector & 3;
347 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
348 cpl = env->hflags & HF_CPL_MASK;
349 if (seg_reg == R_CS) {
350 if (!(e2 & DESC_CS_MASK))
351 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
352 /* XXX: is it correct ? */
353 if (dpl != rpl)
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if ((e2 & DESC_C_MASK) && dpl > rpl)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 } else if (seg_reg == R_SS) {
358 /* SS must be writable data */
359 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 if (dpl != cpl || dpl != rpl)
362 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
363 } else {
364 /* not readable code */
365 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 /* if data or non conforming code, checks the rights */
368 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
369 if (dpl < cpl || dpl < rpl)
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 }
372 }
373 if (!(e2 & DESC_P_MASK))
374 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
375 cpu_x86_load_seg_cache(env, seg_reg, selector,
376 get_seg_base(e1, e2),
377 get_seg_limit(e1, e2),
378 e2);
379 } else {
380 if (seg_reg == R_SS || seg_reg == R_CS)
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382#ifdef VBOX
383#if 0
384 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
385 cpu_x86_load_seg_cache(env, seg_reg, selector,
386 0, 0, 0);
387#endif
388#endif
389 }
390}
391
392#define SWITCH_TSS_JMP 0
393#define SWITCH_TSS_IRET 1
394#define SWITCH_TSS_CALL 2
395
396/* XXX: restore CPU state in registers (PowerPC case) */
397static void switch_tss(int tss_selector,
398 uint32_t e1, uint32_t e2, int source,
399 uint32_t next_eip)
400{
401 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
402 target_ulong tss_base;
403 uint32_t new_regs[8], new_segs[6];
404 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
405 uint32_t old_eflags, eflags_mask;
406 SegmentCache *dt;
407#ifndef VBOX
408 int index;
409#else
410 unsigned int index;
411#endif
412 target_ulong ptr;
413
414 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
415#ifdef DEBUG_PCALL
416 if (loglevel & CPU_LOG_PCALL)
417 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
418#endif
419
420#if defined(VBOX) && defined(DEBUG)
421 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
422#endif
423
424 /* if task gate, we read the TSS segment and we load it */
425 if (type == 5) {
426 if (!(e2 & DESC_P_MASK))
427 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
428 tss_selector = e1 >> 16;
429 if (tss_selector & 4)
430 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
431 if (load_segment(&e1, &e2, tss_selector) != 0)
432 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
433 if (e2 & DESC_S_MASK)
434 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
435 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
436 if ((type & 7) != 1)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 }
439
440 if (!(e2 & DESC_P_MASK))
441 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
442
443 if (type & 8)
444 tss_limit_max = 103;
445 else
446 tss_limit_max = 43;
447 tss_limit = get_seg_limit(e1, e2);
448 tss_base = get_seg_base(e1, e2);
449 if ((tss_selector & 4) != 0 ||
450 tss_limit < tss_limit_max)
451 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
452 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
453 if (old_type & 8)
454 old_tss_limit_max = 103;
455 else
456 old_tss_limit_max = 43;
457
458 /* read all the registers from the new TSS */
459 if (type & 8) {
460 /* 32 bit */
461 new_cr3 = ldl_kernel(tss_base + 0x1c);
462 new_eip = ldl_kernel(tss_base + 0x20);
463 new_eflags = ldl_kernel(tss_base + 0x24);
464 for(i = 0; i < 8; i++)
465 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
466 for(i = 0; i < 6; i++)
467 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
468 new_ldt = lduw_kernel(tss_base + 0x60);
469 new_trap = ldl_kernel(tss_base + 0x64);
470 } else {
471 /* 16 bit */
472 new_cr3 = 0;
473 new_eip = lduw_kernel(tss_base + 0x0e);
474 new_eflags = lduw_kernel(tss_base + 0x10);
475 for(i = 0; i < 8; i++)
476 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
477 for(i = 0; i < 4; i++)
478 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
479 new_ldt = lduw_kernel(tss_base + 0x2a);
480 new_segs[R_FS] = 0;
481 new_segs[R_GS] = 0;
482 new_trap = 0;
483 }
484
485 /* NOTE: we must avoid memory exceptions during the task switch,
486 so we make dummy accesses before */
487 /* XXX: it can still fail in some cases, so a bigger hack is
488 necessary to valid the TLB after having done the accesses */
489
490 v1 = ldub_kernel(env->tr.base);
491 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
492 stb_kernel(env->tr.base, v1);
493 stb_kernel(env->tr.base + old_tss_limit_max, v2);
494
495 /* clear busy bit (it is restartable) */
496 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
497 target_ulong ptr;
498 uint32_t e2;
499 ptr = env->gdt.base + (env->tr.selector & ~7);
500 e2 = ldl_kernel(ptr + 4);
501 e2 &= ~DESC_TSS_BUSY_MASK;
502 stl_kernel(ptr + 4, e2);
503 }
504 old_eflags = compute_eflags();
505 if (source == SWITCH_TSS_IRET)
506 old_eflags &= ~NT_MASK;
507
508 /* save the current state in the old TSS */
509 if (type & 8) {
510 /* 32 bit */
511 stl_kernel(env->tr.base + 0x20, next_eip);
512 stl_kernel(env->tr.base + 0x24, old_eflags);
513 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
514 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
515 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
516 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
517 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
518 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
519 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
520 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
521 for(i = 0; i < 6; i++)
522 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
523#ifdef VBOX
524 /* Must store the ldt as it gets reloaded and might have been changed. */
525 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
526#endif
527#if defined(VBOX) && defined(DEBUG)
528 printf("TSS 32 bits switch\n");
529 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
530#endif
531 } else {
532 /* 16 bit */
533 stw_kernel(env->tr.base + 0x0e, next_eip);
534 stw_kernel(env->tr.base + 0x10, old_eflags);
535 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
536 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
537 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
538 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
539 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
540 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
541 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
542 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
543 for(i = 0; i < 4; i++)
544 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
545#ifdef VBOX
546 /* Must store the ldt as it gets reloaded and might have been changed. */
547 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
548#endif
549 }
550
551 /* now if an exception occurs, it will occurs in the next task
552 context */
553
554 if (source == SWITCH_TSS_CALL) {
555 stw_kernel(tss_base, env->tr.selector);
556 new_eflags |= NT_MASK;
557 }
558
559 /* set busy bit */
560 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
561 target_ulong ptr;
562 uint32_t e2;
563 ptr = env->gdt.base + (tss_selector & ~7);
564 e2 = ldl_kernel(ptr + 4);
565 e2 |= DESC_TSS_BUSY_MASK;
566 stl_kernel(ptr + 4, e2);
567 }
568
569 /* set the new CPU state */
570 /* from this point, any exception which occurs can give problems */
571 env->cr[0] |= CR0_TS_MASK;
572 env->hflags |= HF_TS_MASK;
573 env->tr.selector = tss_selector;
574 env->tr.base = tss_base;
575 env->tr.limit = tss_limit;
576 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
577
578 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
579 cpu_x86_update_cr3(env, new_cr3);
580 }
581
582 /* load all registers without an exception, then reload them with
583 possible exception */
584 env->eip = new_eip;
585 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
586 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
587 if (!(type & 8))
588 eflags_mask &= 0xffff;
589 load_eflags(new_eflags, eflags_mask);
590 /* XXX: what to do in 16 bit case ? */
591 EAX = new_regs[0];
592 ECX = new_regs[1];
593 EDX = new_regs[2];
594 EBX = new_regs[3];
595 ESP = new_regs[4];
596 EBP = new_regs[5];
597 ESI = new_regs[6];
598 EDI = new_regs[7];
599 if (new_eflags & VM_MASK) {
600 for(i = 0; i < 6; i++)
601 load_seg_vm(i, new_segs[i]);
602 /* in vm86, CPL is always 3 */
603 cpu_x86_set_cpl(env, 3);
604 } else {
605 /* CPL is set the RPL of CS */
606 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
607 /* first just selectors as the rest may trigger exceptions */
608 for(i = 0; i < 6; i++)
609 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
610 }
611
612 env->ldt.selector = new_ldt & ~4;
613 env->ldt.base = 0;
614 env->ldt.limit = 0;
615 env->ldt.flags = 0;
616
617 /* load the LDT */
618 if (new_ldt & 4)
619 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
620
621 if ((new_ldt & 0xfffc) != 0) {
622 dt = &env->gdt;
623 index = new_ldt & ~7;
624 if ((index + 7) > dt->limit)
625 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
626 ptr = dt->base + index;
627 e1 = ldl_kernel(ptr);
628 e2 = ldl_kernel(ptr + 4);
629 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
630 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
631 if (!(e2 & DESC_P_MASK))
632 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
633 load_seg_cache_raw_dt(&env->ldt, e1, e2);
634 }
635
636 /* load the segments */
637 if (!(new_eflags & VM_MASK)) {
638 tss_load_seg(R_CS, new_segs[R_CS]);
639 tss_load_seg(R_SS, new_segs[R_SS]);
640 tss_load_seg(R_ES, new_segs[R_ES]);
641 tss_load_seg(R_DS, new_segs[R_DS]);
642 tss_load_seg(R_FS, new_segs[R_FS]);
643 tss_load_seg(R_GS, new_segs[R_GS]);
644 }
645
646 /* check that EIP is in the CS segment limits */
647 if (new_eip > env->segs[R_CS].limit) {
648 /* XXX: different exception if CALL ? */
649 raise_exception_err(EXCP0D_GPF, 0);
650 }
651}
652
653/* check if Port I/O is allowed in TSS */
654#ifndef VBOX
655static inline void check_io(int addr, int size)
656{
657 int io_offset, val, mask;
658
659#else /* VBOX */
660DECLINLINE(void) check_io(int addr, int size)
661{
662 int val, mask;
663 unsigned int io_offset;
664#endif /* VBOX */
665 /* TSS must be a valid 32 bit one */
666 if (!(env->tr.flags & DESC_P_MASK) ||
667 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
668 env->tr.limit < 103)
669 goto fail;
670 io_offset = lduw_kernel(env->tr.base + 0x66);
671 io_offset += (addr >> 3);
672 /* Note: the check needs two bytes */
673 if ((io_offset + 1) > env->tr.limit)
674 goto fail;
675 val = lduw_kernel(env->tr.base + io_offset);
676 val >>= (addr & 7);
677 mask = (1 << size) - 1;
678 /* all bits must be zero to allow the I/O */
679 if ((val & mask) != 0) {
680 fail:
681 raise_exception_err(EXCP0D_GPF, 0);
682 }
683}
684
685#ifdef VBOX
686/* Keep in sync with gen_check_external_event() */
687void helper_check_external_event()
688{
689 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
690 | CPU_INTERRUPT_EXTERNAL_TIMER
691 | CPU_INTERRUPT_EXTERNAL_DMA))
692 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
693 && (env->eflags & IF_MASK)
694 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
695 {
696 helper_external_event();
697 }
698
699}
700
701void helper_sync_seg(uint32_t reg)
702{
703 if (env->segs[reg].newselector)
704 sync_seg(env, reg, env->segs[reg].newselector);
705}
706#endif
707
708void helper_check_iob(uint32_t t0)
709{
710 check_io(t0, 1);
711}
712
713void helper_check_iow(uint32_t t0)
714{
715 check_io(t0, 2);
716}
717
718void helper_check_iol(uint32_t t0)
719{
720 check_io(t0, 4);
721}
722
723void helper_outb(uint32_t port, uint32_t data)
724{
725 cpu_outb(env, port, data & 0xff);
726}
727
728target_ulong helper_inb(uint32_t port)
729{
730 return cpu_inb(env, port);
731}
732
733void helper_outw(uint32_t port, uint32_t data)
734{
735 cpu_outw(env, port, data & 0xffff);
736}
737
738target_ulong helper_inw(uint32_t port)
739{
740 return cpu_inw(env, port);
741}
742
743void helper_outl(uint32_t port, uint32_t data)
744{
745 cpu_outl(env, port, data);
746}
747
748target_ulong helper_inl(uint32_t port)
749{
750 return cpu_inl(env, port);
751}
752
753#ifndef VBOX
754static inline unsigned int get_sp_mask(unsigned int e2)
755#else /* VBOX */
756DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
757#endif /* VBOX */
758{
759 if (e2 & DESC_B_MASK)
760 return 0xffffffff;
761 else
762 return 0xffff;
763}
764
765#ifdef TARGET_X86_64
766#define SET_ESP(val, sp_mask)\
767do {\
768 if ((sp_mask) == 0xffff)\
769 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
770 else if ((sp_mask) == 0xffffffffLL)\
771 ESP = (uint32_t)(val);\
772 else\
773 ESP = (val);\
774} while (0)
775#else
776#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
777#endif
778
779/* in 64-bit machines, this can overflow. So this segment addition macro
780 * can be used to trim the value to 32-bit whenever needed */
781#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
782
783/* XXX: add a is_user flag to have proper security support */
784#define PUSHW(ssp, sp, sp_mask, val)\
785{\
786 sp -= 2;\
787 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
788}
789
790#define PUSHL(ssp, sp, sp_mask, val)\
791{\
792 sp -= 4;\
793 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
794}
795
796#define POPW(ssp, sp, sp_mask, val)\
797{\
798 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
799 sp += 2;\
800}
801
802#define POPL(ssp, sp, sp_mask, val)\
803{\
804 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
805 sp += 4;\
806}
807
808/* protected mode interrupt */
809static void do_interrupt_protected(int intno, int is_int, int error_code,
810 unsigned int next_eip, int is_hw)
811{
812 SegmentCache *dt;
813 target_ulong ptr, ssp;
814 int type, dpl, selector, ss_dpl, cpl;
815 int has_error_code, new_stack, shift;
816 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
817 uint32_t old_eip, sp_mask;
818
819#ifdef VBOX
820 ss = ss_e1 = ss_e2 = 0;
821 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
822 cpu_loop_exit();
823#endif
824
825 has_error_code = 0;
826 if (!is_int && !is_hw) {
827 switch(intno) {
828 case 8:
829 case 10:
830 case 11:
831 case 12:
832 case 13:
833 case 14:
834 case 17:
835 has_error_code = 1;
836 break;
837 }
838 }
839 if (is_int)
840 old_eip = next_eip;
841 else
842 old_eip = env->eip;
843
844 dt = &env->idt;
845#ifndef VBOX
846 if (intno * 8 + 7 > dt->limit)
847#else
848 if ((unsigned)intno * 8 + 7 > dt->limit)
849#endif
850 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
851 ptr = dt->base + intno * 8;
852 e1 = ldl_kernel(ptr);
853 e2 = ldl_kernel(ptr + 4);
854 /* check gate type */
855 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
856 switch(type) {
857 case 5: /* task gate */
858 /* must do that check here to return the correct error code */
859 if (!(e2 & DESC_P_MASK))
860 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
861 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
862 if (has_error_code) {
863 int type;
864 uint32_t mask;
865 /* push the error code */
866 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
867 shift = type >> 3;
868 if (env->segs[R_SS].flags & DESC_B_MASK)
869 mask = 0xffffffff;
870 else
871 mask = 0xffff;
872 esp = (ESP - (2 << shift)) & mask;
873 ssp = env->segs[R_SS].base + esp;
874 if (shift)
875 stl_kernel(ssp, error_code);
876 else
877 stw_kernel(ssp, error_code);
878 SET_ESP(esp, mask);
879 }
880 return;
881 case 6: /* 286 interrupt gate */
882 case 7: /* 286 trap gate */
883 case 14: /* 386 interrupt gate */
884 case 15: /* 386 trap gate */
885 break;
886 default:
887 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
888 break;
889 }
890 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
891 cpl = env->hflags & HF_CPL_MASK;
892 /* check privilege if software int */
893 if (is_int && dpl < cpl)
894 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
895 /* check valid bit */
896 if (!(e2 & DESC_P_MASK))
897 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
898 selector = e1 >> 16;
899 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
900 if ((selector & 0xfffc) == 0)
901 raise_exception_err(EXCP0D_GPF, 0);
902
903 if (load_segment(&e1, &e2, selector) != 0)
904 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
905 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
906 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
908 if (dpl > cpl)
909 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910 if (!(e2 & DESC_P_MASK))
911 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
912 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
913 /* to inner privilege */
914 get_ss_esp_from_tss(&ss, &esp, dpl);
915 if ((ss & 0xfffc) == 0)
916 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
917 if ((ss & 3) != dpl)
918 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
919 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
922 if (ss_dpl != dpl)
923 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
924 if (!(ss_e2 & DESC_S_MASK) ||
925 (ss_e2 & DESC_CS_MASK) ||
926 !(ss_e2 & DESC_W_MASK))
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if (!(ss_e2 & DESC_P_MASK))
929#ifdef VBOX /* See page 3-477 of 253666.pdf */
930 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
931#else
932 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
933#endif
934 new_stack = 1;
935 sp_mask = get_sp_mask(ss_e2);
936 ssp = get_seg_base(ss_e1, ss_e2);
937#if defined(VBOX) && defined(DEBUG)
938 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
939#endif
940 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
941 /* to same privilege */
942 if (env->eflags & VM_MASK)
943 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
944 new_stack = 0;
945 sp_mask = get_sp_mask(env->segs[R_SS].flags);
946 ssp = env->segs[R_SS].base;
947 esp = ESP;
948 dpl = cpl;
949 } else {
950 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
951 new_stack = 0; /* avoid warning */
952 sp_mask = 0; /* avoid warning */
953 ssp = 0; /* avoid warning */
954 esp = 0; /* avoid warning */
955 }
956
957 shift = type >> 3;
958
959#if 0
960 /* XXX: check that enough room is available */
961 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
962 if (env->eflags & VM_MASK)
963 push_size += 8;
964 push_size <<= shift;
965#endif
966 if (shift == 1) {
967 if (new_stack) {
968 if (env->eflags & VM_MASK) {
969 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
970 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
971 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
972 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
973 }
974 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
975 PUSHL(ssp, esp, sp_mask, ESP);
976 }
977 PUSHL(ssp, esp, sp_mask, compute_eflags());
978 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
979 PUSHL(ssp, esp, sp_mask, old_eip);
980 if (has_error_code) {
981 PUSHL(ssp, esp, sp_mask, error_code);
982 }
983 } else {
984 if (new_stack) {
985 if (env->eflags & VM_MASK) {
986 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
987 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
988 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
989 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
990 }
991 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
992 PUSHW(ssp, esp, sp_mask, ESP);
993 }
994 PUSHW(ssp, esp, sp_mask, compute_eflags());
995 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
996 PUSHW(ssp, esp, sp_mask, old_eip);
997 if (has_error_code) {
998 PUSHW(ssp, esp, sp_mask, error_code);
999 }
1000 }
1001
1002 if (new_stack) {
1003 if (env->eflags & VM_MASK) {
1004 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1005 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1006 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1007 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1008 }
1009 ss = (ss & ~3) | dpl;
1010 cpu_x86_load_seg_cache(env, R_SS, ss,
1011 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1012 }
1013 SET_ESP(esp, sp_mask);
1014
1015 selector = (selector & ~3) | dpl;
1016 cpu_x86_load_seg_cache(env, R_CS, selector,
1017 get_seg_base(e1, e2),
1018 get_seg_limit(e1, e2),
1019 e2);
1020 cpu_x86_set_cpl(env, dpl);
1021 env->eip = offset;
1022
1023 /* interrupt gate clear IF mask */
1024 if ((type & 1) == 0) {
1025 env->eflags &= ~IF_MASK;
1026 }
1027#ifndef VBOX
1028 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1029#else
1030 /*
1031 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1032 * gets confused by seeingingly changed EFLAGS. See #3491 and
1033 * public bug #2341.
1034 */
1035 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1036#endif
1037}
1038#ifdef VBOX
1039
1040/* check if VME interrupt redirection is enabled in TSS */
1041DECLINLINE(bool) is_vme_irq_redirected(int intno)
1042{
1043 unsigned int io_offset, intredir_offset;
1044 unsigned char val, mask;
1045
1046 /* TSS must be a valid 32 bit one */
1047 if (!(env->tr.flags & DESC_P_MASK) ||
1048 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1049 env->tr.limit < 103)
1050 goto fail;
1051 io_offset = lduw_kernel(env->tr.base + 0x66);
1052 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1053 if (io_offset < 0x68 + 0x20)
1054 io_offset = 0x68 + 0x20;
1055 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1056 intredir_offset = io_offset - 0x20;
1057
1058 intredir_offset += (intno >> 3);
1059 if ((intredir_offset) > env->tr.limit)
1060 goto fail;
1061
1062 val = ldub_kernel(env->tr.base + intredir_offset);
1063 mask = 1 << (unsigned char)(intno & 7);
1064
1065 /* bit set means no redirection. */
1066 if ((val & mask) != 0) {
1067 return false;
1068 }
1069 return true;
1070
1071fail:
1072 raise_exception_err(EXCP0D_GPF, 0);
1073 return true;
1074}
1075
1076/* V86 mode software interrupt with CR4.VME=1 */
1077static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1078{
1079 target_ulong ptr, ssp;
1080 int selector;
1081 uint32_t offset, esp;
1082 uint32_t old_cs, old_eflags;
1083 uint32_t iopl;
1084
1085 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1086
1087 if (!is_vme_irq_redirected(intno))
1088 {
1089 if (iopl == 3)
1090 {
1091 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1092 return;
1093 }
1094 else
1095 raise_exception_err(EXCP0D_GPF, 0);
1096 }
1097
1098 /* virtual mode idt is at linear address 0 */
1099 ptr = 0 + intno * 4;
1100 offset = lduw_kernel(ptr);
1101 selector = lduw_kernel(ptr + 2);
1102 esp = ESP;
1103 ssp = env->segs[R_SS].base;
1104 old_cs = env->segs[R_CS].selector;
1105
1106 old_eflags = compute_eflags();
1107 if (iopl < 3)
1108 {
1109 /* copy VIF into IF and set IOPL to 3 */
1110 if (env->eflags & VIF_MASK)
1111 old_eflags |= IF_MASK;
1112 else
1113 old_eflags &= ~IF_MASK;
1114
1115 old_eflags |= (3 << IOPL_SHIFT);
1116 }
1117
1118 /* XXX: use SS segment size ? */
1119 PUSHW(ssp, esp, 0xffff, old_eflags);
1120 PUSHW(ssp, esp, 0xffff, old_cs);
1121 PUSHW(ssp, esp, 0xffff, next_eip);
1122
1123 /* update processor state */
1124 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1125 env->eip = offset;
1126 env->segs[R_CS].selector = selector;
1127 env->segs[R_CS].base = (selector << 4);
1128 env->eflags &= ~(TF_MASK | RF_MASK);
1129
1130 if (iopl < 3)
1131 env->eflags &= ~VIF_MASK;
1132 else
1133 env->eflags &= ~IF_MASK;
1134}
1135#endif /* VBOX */
1136
1137#ifdef TARGET_X86_64
1138
1139#define PUSHQ(sp, val)\
1140{\
1141 sp -= 8;\
1142 stq_kernel(sp, (val));\
1143}
1144
1145#define POPQ(sp, val)\
1146{\
1147 val = ldq_kernel(sp);\
1148 sp += 8;\
1149}
1150
1151#ifndef VBOX
1152static inline target_ulong get_rsp_from_tss(int level)
1153#else /* VBOX */
1154DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1155#endif /* VBOX */
1156{
1157 int index;
1158
1159#if 0
1160 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1161 env->tr.base, env->tr.limit);
1162#endif
1163
1164 if (!(env->tr.flags & DESC_P_MASK))
1165 cpu_abort(env, "invalid tss");
1166 index = 8 * level + 4;
1167 if ((index + 7) > env->tr.limit)
1168 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1169 return ldq_kernel(env->tr.base + index);
1170}
1171
1172/* 64 bit interrupt */
1173static void do_interrupt64(int intno, int is_int, int error_code,
1174 target_ulong next_eip, int is_hw)
1175{
1176 SegmentCache *dt;
1177 target_ulong ptr;
1178 int type, dpl, selector, cpl, ist;
1179 int has_error_code, new_stack;
1180 uint32_t e1, e2, e3, ss;
1181 target_ulong old_eip, esp, offset;
1182
1183#ifdef VBOX
1184 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1185 cpu_loop_exit();
1186#endif
1187
1188 has_error_code = 0;
1189 if (!is_int && !is_hw) {
1190 switch(intno) {
1191 case 8:
1192 case 10:
1193 case 11:
1194 case 12:
1195 case 13:
1196 case 14:
1197 case 17:
1198 has_error_code = 1;
1199 break;
1200 }
1201 }
1202 if (is_int)
1203 old_eip = next_eip;
1204 else
1205 old_eip = env->eip;
1206
1207 dt = &env->idt;
1208 if (intno * 16 + 15 > dt->limit)
1209 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1210 ptr = dt->base + intno * 16;
1211 e1 = ldl_kernel(ptr);
1212 e2 = ldl_kernel(ptr + 4);
1213 e3 = ldl_kernel(ptr + 8);
1214 /* check gate type */
1215 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1216 switch(type) {
1217 case 14: /* 386 interrupt gate */
1218 case 15: /* 386 trap gate */
1219 break;
1220 default:
1221 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1222 break;
1223 }
1224 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1225 cpl = env->hflags & HF_CPL_MASK;
1226 /* check privilege if software int */
1227 if (is_int && dpl < cpl)
1228 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1229 /* check valid bit */
1230 if (!(e2 & DESC_P_MASK))
1231 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1232 selector = e1 >> 16;
1233 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1234 ist = e2 & 7;
1235 if ((selector & 0xfffc) == 0)
1236 raise_exception_err(EXCP0D_GPF, 0);
1237
1238 if (load_segment(&e1, &e2, selector) != 0)
1239 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1240 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1241 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1242 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1243 if (dpl > cpl)
1244 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1245 if (!(e2 & DESC_P_MASK))
1246 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1247 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1249 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1250 /* to inner privilege */
1251 if (ist != 0)
1252 esp = get_rsp_from_tss(ist + 3);
1253 else
1254 esp = get_rsp_from_tss(dpl);
1255 esp &= ~0xfLL; /* align stack */
1256 ss = 0;
1257 new_stack = 1;
1258 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1259 /* to same privilege */
1260 if (env->eflags & VM_MASK)
1261 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1262 new_stack = 0;
1263 if (ist != 0)
1264 esp = get_rsp_from_tss(ist + 3);
1265 else
1266 esp = ESP;
1267 esp &= ~0xfLL; /* align stack */
1268 dpl = cpl;
1269 } else {
1270 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1271 new_stack = 0; /* avoid warning */
1272 esp = 0; /* avoid warning */
1273 }
1274
1275 PUSHQ(esp, env->segs[R_SS].selector);
1276 PUSHQ(esp, ESP);
1277 PUSHQ(esp, compute_eflags());
1278 PUSHQ(esp, env->segs[R_CS].selector);
1279 PUSHQ(esp, old_eip);
1280 if (has_error_code) {
1281 PUSHQ(esp, error_code);
1282 }
1283
1284 if (new_stack) {
1285 ss = 0 | dpl;
1286 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1287 }
1288 ESP = esp;
1289
1290 selector = (selector & ~3) | dpl;
1291 cpu_x86_load_seg_cache(env, R_CS, selector,
1292 get_seg_base(e1, e2),
1293 get_seg_limit(e1, e2),
1294 e2);
1295 cpu_x86_set_cpl(env, dpl);
1296 env->eip = offset;
1297
1298 /* interrupt gate clear IF mask */
1299 if ((type & 1) == 0) {
1300 env->eflags &= ~IF_MASK;
1301 }
1302
1303#ifndef VBOX
1304 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1305#else
1306 /*
1307 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1308 * gets confused by seeingingly changed EFLAGS. See #3491 and
1309 * public bug #2341.
1310 */
1311 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1312#endif
1313}
1314#endif
1315
1316#if defined(CONFIG_USER_ONLY)
1317void helper_syscall(int next_eip_addend)
1318{
1319 env->exception_index = EXCP_SYSCALL;
1320 env->exception_next_eip = env->eip + next_eip_addend;
1321 cpu_loop_exit();
1322}
1323#else
1324void helper_syscall(int next_eip_addend)
1325{
1326 int selector;
1327
1328 if (!(env->efer & MSR_EFER_SCE)) {
1329 raise_exception_err(EXCP06_ILLOP, 0);
1330 }
1331 selector = (env->star >> 32) & 0xffff;
1332#ifdef TARGET_X86_64
1333 if (env->hflags & HF_LMA_MASK) {
1334 int code64;
1335
1336 ECX = env->eip + next_eip_addend;
1337 env->regs[11] = compute_eflags();
1338
1339 code64 = env->hflags & HF_CS64_MASK;
1340
1341 cpu_x86_set_cpl(env, 0);
1342 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1343 0, 0xffffffff,
1344 DESC_G_MASK | DESC_P_MASK |
1345 DESC_S_MASK |
1346 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1347 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1348 0, 0xffffffff,
1349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1350 DESC_S_MASK |
1351 DESC_W_MASK | DESC_A_MASK);
1352 env->eflags &= ~env->fmask;
1353 load_eflags(env->eflags, 0);
1354 if (code64)
1355 env->eip = env->lstar;
1356 else
1357 env->eip = env->cstar;
1358 } else
1359#endif
1360 {
1361 ECX = (uint32_t)(env->eip + next_eip_addend);
1362
1363 cpu_x86_set_cpl(env, 0);
1364 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1365 0, 0xffffffff,
1366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1367 DESC_S_MASK |
1368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1369 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1370 0, 0xffffffff,
1371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1372 DESC_S_MASK |
1373 DESC_W_MASK | DESC_A_MASK);
1374 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1375 env->eip = (uint32_t)env->star;
1376 }
1377}
1378#endif
1379
1380void helper_sysret(int dflag)
1381{
1382 int cpl, selector;
1383
1384 if (!(env->efer & MSR_EFER_SCE)) {
1385 raise_exception_err(EXCP06_ILLOP, 0);
1386 }
1387 cpl = env->hflags & HF_CPL_MASK;
1388 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1389 raise_exception_err(EXCP0D_GPF, 0);
1390 }
1391 selector = (env->star >> 48) & 0xffff;
1392#ifdef TARGET_X86_64
1393 if (env->hflags & HF_LMA_MASK) {
1394 if (dflag == 2) {
1395 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1396 0, 0xffffffff,
1397 DESC_G_MASK | DESC_P_MASK |
1398 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1399 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1400 DESC_L_MASK);
1401 env->eip = ECX;
1402 } else {
1403 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1404 0, 0xffffffff,
1405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1406 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1407 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1408 env->eip = (uint32_t)ECX;
1409 }
1410 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1411 0, 0xffffffff,
1412 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1413 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1414 DESC_W_MASK | DESC_A_MASK);
1415 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1416 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1417 cpu_x86_set_cpl(env, 3);
1418 } else
1419#endif
1420 {
1421 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1422 0, 0xffffffff,
1423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1425 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1426 env->eip = (uint32_t)ECX;
1427 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1428 0, 0xffffffff,
1429 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1430 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1431 DESC_W_MASK | DESC_A_MASK);
1432 env->eflags |= IF_MASK;
1433 cpu_x86_set_cpl(env, 3);
1434 }
1435#ifdef USE_KQEMU
1436 if (kqemu_is_ok(env)) {
1437 if (env->hflags & HF_LMA_MASK)
1438 CC_OP = CC_OP_EFLAGS;
1439 env->exception_index = -1;
1440 cpu_loop_exit();
1441 }
1442#endif
1443}
1444
1445#ifdef VBOX
1446/**
1447 * Checks and processes external VMM events.
1448 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1449 */
1450void helper_external_event(void)
1451{
1452#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1453 uintptr_t uSP;
1454# ifdef RT_ARCH_AMD64
1455 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1456# else
1457 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1458# endif
1459 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1460#endif
1461 /* Keep in sync with flags checked by gen_check_external_event() */
1462 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1463 {
1464 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1465 ~CPU_INTERRUPT_EXTERNAL_HARD);
1466 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1467 }
1468 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1469 {
1470 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1471 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1472 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1473 }
1474 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1475 {
1476 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1477 ~CPU_INTERRUPT_EXTERNAL_DMA);
1478 remR3DmaRun(env);
1479 }
1480 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1481 {
1482 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1483 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1484 remR3TimersRun(env);
1485 }
1486}
1487/* helper for recording call instruction addresses for later scanning */
1488void helper_record_call()
1489{
1490 if ( !(env->state & CPU_RAW_RING0)
1491 && (env->cr[0] & CR0_PG_MASK)
1492 && !(env->eflags & X86_EFL_IF))
1493 remR3RecordCall(env);
1494}
1495#endif /* VBOX */
1496
1497/* real mode interrupt */
1498static void do_interrupt_real(int intno, int is_int, int error_code,
1499 unsigned int next_eip)
1500{
1501 SegmentCache *dt;
1502 target_ulong ptr, ssp;
1503 int selector;
1504 uint32_t offset, esp;
1505 uint32_t old_cs, old_eip;
1506
1507 /* real mode (simpler !) */
1508 dt = &env->idt;
1509#ifndef VBOX
1510 if (intno * 4 + 3 > dt->limit)
1511#else
1512 if ((unsigned)intno * 4 + 3 > dt->limit)
1513#endif
1514 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1515 ptr = dt->base + intno * 4;
1516 offset = lduw_kernel(ptr);
1517 selector = lduw_kernel(ptr + 2);
1518 esp = ESP;
1519 ssp = env->segs[R_SS].base;
1520 if (is_int)
1521 old_eip = next_eip;
1522 else
1523 old_eip = env->eip;
1524 old_cs = env->segs[R_CS].selector;
1525 /* XXX: use SS segment size ? */
1526 PUSHW(ssp, esp, 0xffff, compute_eflags());
1527 PUSHW(ssp, esp, 0xffff, old_cs);
1528 PUSHW(ssp, esp, 0xffff, old_eip);
1529
1530 /* update processor state */
1531 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1532 env->eip = offset;
1533 env->segs[R_CS].selector = selector;
1534 env->segs[R_CS].base = (selector << 4);
1535 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1536}
1537
1538/* fake user mode interrupt */
1539void do_interrupt_user(int intno, int is_int, int error_code,
1540 target_ulong next_eip)
1541{
1542 SegmentCache *dt;
1543 target_ulong ptr;
1544 int dpl, cpl, shift;
1545 uint32_t e2;
1546
1547 dt = &env->idt;
1548 if (env->hflags & HF_LMA_MASK) {
1549 shift = 4;
1550 } else {
1551 shift = 3;
1552 }
1553 ptr = dt->base + (intno << shift);
1554 e2 = ldl_kernel(ptr + 4);
1555
1556 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1557 cpl = env->hflags & HF_CPL_MASK;
1558 /* check privilege if software int */
1559 if (is_int && dpl < cpl)
1560 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1561
1562 /* Since we emulate only user space, we cannot do more than
1563 exiting the emulation with the suitable exception and error
1564 code */
1565 if (is_int)
1566 EIP = next_eip;
1567}
1568
1569/*
1570 * Begin execution of an interruption. is_int is TRUE if coming from
1571 * the int instruction. next_eip is the EIP value AFTER the interrupt
1572 * instruction. It is only relevant if is_int is TRUE.
1573 */
1574void do_interrupt(int intno, int is_int, int error_code,
1575 target_ulong next_eip, int is_hw)
1576{
1577 if (loglevel & CPU_LOG_INT) {
1578 if ((env->cr[0] & CR0_PE_MASK)) {
1579 static int count;
1580 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1581 count, intno, error_code, is_int,
1582 env->hflags & HF_CPL_MASK,
1583 env->segs[R_CS].selector, EIP,
1584 (int)env->segs[R_CS].base + EIP,
1585 env->segs[R_SS].selector, ESP);
1586 if (intno == 0x0e) {
1587 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1588 } else {
1589 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1590 }
1591 fprintf(logfile, "\n");
1592 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1593#if 0
1594 {
1595 int i;
1596 uint8_t *ptr;
1597 fprintf(logfile, " code=");
1598 ptr = env->segs[R_CS].base + env->eip;
1599 for(i = 0; i < 16; i++) {
1600 fprintf(logfile, " %02x", ldub(ptr + i));
1601 }
1602 fprintf(logfile, "\n");
1603 }
1604#endif
1605 count++;
1606 }
1607 }
1608 if (env->cr[0] & CR0_PE_MASK) {
1609#ifdef TARGET_X86_64
1610 if (env->hflags & HF_LMA_MASK) {
1611 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1612 } else
1613#endif
1614 {
1615#ifdef VBOX
1616 /* int xx *, v86 code and VME enabled? */
1617 if ( (env->eflags & VM_MASK)
1618 && (env->cr[4] & CR4_VME_MASK)
1619 && is_int
1620 && !is_hw
1621 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1622 )
1623 do_soft_interrupt_vme(intno, error_code, next_eip);
1624 else
1625#endif /* VBOX */
1626 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1627 }
1628 } else {
1629 do_interrupt_real(intno, is_int, error_code, next_eip);
1630 }
1631}
1632
1633/*
1634 * Check nested exceptions and change to double or triple fault if
1635 * needed. It should only be called, if this is not an interrupt.
1636 * Returns the new exception number.
1637 */
1638static int check_exception(int intno, int *error_code)
1639{
1640 int first_contributory = env->old_exception == 0 ||
1641 (env->old_exception >= 10 &&
1642 env->old_exception <= 13);
1643 int second_contributory = intno == 0 ||
1644 (intno >= 10 && intno <= 13);
1645
1646 if (loglevel & CPU_LOG_INT)
1647 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1648 env->old_exception, intno);
1649
1650 if (env->old_exception == EXCP08_DBLE)
1651 cpu_abort(env, "triple fault");
1652
1653 if ((first_contributory && second_contributory)
1654 || (env->old_exception == EXCP0E_PAGE &&
1655 (second_contributory || (intno == EXCP0E_PAGE)))) {
1656 intno = EXCP08_DBLE;
1657 *error_code = 0;
1658 }
1659
1660 if (second_contributory || (intno == EXCP0E_PAGE) ||
1661 (intno == EXCP08_DBLE))
1662 env->old_exception = intno;
1663
1664 return intno;
1665}
1666
1667/*
1668 * Signal an interruption. It is executed in the main CPU loop.
1669 * is_int is TRUE if coming from the int instruction. next_eip is the
1670 * EIP value AFTER the interrupt instruction. It is only relevant if
1671 * is_int is TRUE.
1672 */
1673void raise_interrupt(int intno, int is_int, int error_code,
1674 int next_eip_addend)
1675{
1676#if defined(VBOX) && defined(DEBUG)
1677 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend));
1678#endif
1679 if (!is_int) {
1680 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1681 intno = check_exception(intno, &error_code);
1682 } else {
1683 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1684 }
1685
1686 env->exception_index = intno;
1687 env->error_code = error_code;
1688 env->exception_is_int = is_int;
1689 env->exception_next_eip = env->eip + next_eip_addend;
1690 cpu_loop_exit();
1691}
1692
1693/* shortcuts to generate exceptions */
1694
1695void (raise_exception_err)(int exception_index, int error_code)
1696{
1697 raise_interrupt(exception_index, 0, error_code, 0);
1698}
1699
1700void raise_exception(int exception_index)
1701{
1702 raise_interrupt(exception_index, 0, 0, 0);
1703}
1704
1705/* SMM support */
1706
1707#if defined(CONFIG_USER_ONLY)
1708
1709void do_smm_enter(void)
1710{
1711}
1712
1713void helper_rsm(void)
1714{
1715}
1716
1717#else
1718
1719#ifdef TARGET_X86_64
1720#define SMM_REVISION_ID 0x00020064
1721#else
1722#define SMM_REVISION_ID 0x00020000
1723#endif
1724
1725void do_smm_enter(void)
1726{
1727 target_ulong sm_state;
1728 SegmentCache *dt;
1729 int i, offset;
1730
1731 if (loglevel & CPU_LOG_INT) {
1732 fprintf(logfile, "SMM: enter\n");
1733 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1734 }
1735
1736 env->hflags |= HF_SMM_MASK;
1737 cpu_smm_update(env);
1738
1739 sm_state = env->smbase + 0x8000;
1740
1741#ifdef TARGET_X86_64
1742 for(i = 0; i < 6; i++) {
1743 dt = &env->segs[i];
1744 offset = 0x7e00 + i * 16;
1745 stw_phys(sm_state + offset, dt->selector);
1746 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1747 stl_phys(sm_state + offset + 4, dt->limit);
1748 stq_phys(sm_state + offset + 8, dt->base);
1749 }
1750
1751 stq_phys(sm_state + 0x7e68, env->gdt.base);
1752 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1753
1754 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1755 stq_phys(sm_state + 0x7e78, env->ldt.base);
1756 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1757 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1758
1759 stq_phys(sm_state + 0x7e88, env->idt.base);
1760 stl_phys(sm_state + 0x7e84, env->idt.limit);
1761
1762 stw_phys(sm_state + 0x7e90, env->tr.selector);
1763 stq_phys(sm_state + 0x7e98, env->tr.base);
1764 stl_phys(sm_state + 0x7e94, env->tr.limit);
1765 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1766
1767 stq_phys(sm_state + 0x7ed0, env->efer);
1768
1769 stq_phys(sm_state + 0x7ff8, EAX);
1770 stq_phys(sm_state + 0x7ff0, ECX);
1771 stq_phys(sm_state + 0x7fe8, EDX);
1772 stq_phys(sm_state + 0x7fe0, EBX);
1773 stq_phys(sm_state + 0x7fd8, ESP);
1774 stq_phys(sm_state + 0x7fd0, EBP);
1775 stq_phys(sm_state + 0x7fc8, ESI);
1776 stq_phys(sm_state + 0x7fc0, EDI);
1777 for(i = 8; i < 16; i++)
1778 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1779 stq_phys(sm_state + 0x7f78, env->eip);
1780 stl_phys(sm_state + 0x7f70, compute_eflags());
1781 stl_phys(sm_state + 0x7f68, env->dr[6]);
1782 stl_phys(sm_state + 0x7f60, env->dr[7]);
1783
1784 stl_phys(sm_state + 0x7f48, env->cr[4]);
1785 stl_phys(sm_state + 0x7f50, env->cr[3]);
1786 stl_phys(sm_state + 0x7f58, env->cr[0]);
1787
1788 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1789 stl_phys(sm_state + 0x7f00, env->smbase);
1790#else
1791 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1792 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1793 stl_phys(sm_state + 0x7ff4, compute_eflags());
1794 stl_phys(sm_state + 0x7ff0, env->eip);
1795 stl_phys(sm_state + 0x7fec, EDI);
1796 stl_phys(sm_state + 0x7fe8, ESI);
1797 stl_phys(sm_state + 0x7fe4, EBP);
1798 stl_phys(sm_state + 0x7fe0, ESP);
1799 stl_phys(sm_state + 0x7fdc, EBX);
1800 stl_phys(sm_state + 0x7fd8, EDX);
1801 stl_phys(sm_state + 0x7fd4, ECX);
1802 stl_phys(sm_state + 0x7fd0, EAX);
1803 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1804 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1805
1806 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1807 stl_phys(sm_state + 0x7f64, env->tr.base);
1808 stl_phys(sm_state + 0x7f60, env->tr.limit);
1809 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1810
1811 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1812 stl_phys(sm_state + 0x7f80, env->ldt.base);
1813 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1814 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1815
1816 stl_phys(sm_state + 0x7f74, env->gdt.base);
1817 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1818
1819 stl_phys(sm_state + 0x7f58, env->idt.base);
1820 stl_phys(sm_state + 0x7f54, env->idt.limit);
1821
1822 for(i = 0; i < 6; i++) {
1823 dt = &env->segs[i];
1824 if (i < 3)
1825 offset = 0x7f84 + i * 12;
1826 else
1827 offset = 0x7f2c + (i - 3) * 12;
1828 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1829 stl_phys(sm_state + offset + 8, dt->base);
1830 stl_phys(sm_state + offset + 4, dt->limit);
1831 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1832 }
1833 stl_phys(sm_state + 0x7f14, env->cr[4]);
1834
1835 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1836 stl_phys(sm_state + 0x7ef8, env->smbase);
1837#endif
1838 /* init SMM cpu state */
1839
1840#ifdef TARGET_X86_64
1841 cpu_load_efer(env, 0);
1842#endif
1843 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1844 env->eip = 0x00008000;
1845 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1846 0xffffffff, 0);
1847 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1848 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1849 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1850 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1851 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1852
1853 cpu_x86_update_cr0(env,
1854 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1855 cpu_x86_update_cr4(env, 0);
1856 env->dr[7] = 0x00000400;
1857 CC_OP = CC_OP_EFLAGS;
1858}
1859
1860void helper_rsm(void)
1861{
1862#ifdef VBOX
1863 cpu_abort(env, "helper_rsm");
1864#else /* !VBOX */
1865 target_ulong sm_
1866
1867 target_ulong sm_state;
1868 int i, offset;
1869 uint32_t val;
1870
1871 sm_state = env->smbase + 0x8000;
1872#ifdef TARGET_X86_64
1873 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1874
1875 for(i = 0; i < 6; i++) {
1876 offset = 0x7e00 + i * 16;
1877 cpu_x86_load_seg_cache(env, i,
1878 lduw_phys(sm_state + offset),
1879 ldq_phys(sm_state + offset + 8),
1880 ldl_phys(sm_state + offset + 4),
1881 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1882 }
1883
1884 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1885 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1886
1887 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1888 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1889 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1890 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1891
1892 env->idt.base = ldq_phys(sm_state + 0x7e88);
1893 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1894
1895 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1896 env->tr.base = ldq_phys(sm_state + 0x7e98);
1897 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1898 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1899
1900 EAX = ldq_phys(sm_state + 0x7ff8);
1901 ECX = ldq_phys(sm_state + 0x7ff0);
1902 EDX = ldq_phys(sm_state + 0x7fe8);
1903 EBX = ldq_phys(sm_state + 0x7fe0);
1904 ESP = ldq_phys(sm_state + 0x7fd8);
1905 EBP = ldq_phys(sm_state + 0x7fd0);
1906 ESI = ldq_phys(sm_state + 0x7fc8);
1907 EDI = ldq_phys(sm_state + 0x7fc0);
1908 for(i = 8; i < 16; i++)
1909 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1910 env->eip = ldq_phys(sm_state + 0x7f78);
1911 load_eflags(ldl_phys(sm_state + 0x7f70),
1912 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1913 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1914 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1915
1916 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1917 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1918 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1919
1920 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1921 if (val & 0x20000) {
1922 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1923 }
1924#else
1925 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1926 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1927 load_eflags(ldl_phys(sm_state + 0x7ff4),
1928 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1929 env->eip = ldl_phys(sm_state + 0x7ff0);
1930 EDI = ldl_phys(sm_state + 0x7fec);
1931 ESI = ldl_phys(sm_state + 0x7fe8);
1932 EBP = ldl_phys(sm_state + 0x7fe4);
1933 ESP = ldl_phys(sm_state + 0x7fe0);
1934 EBX = ldl_phys(sm_state + 0x7fdc);
1935 EDX = ldl_phys(sm_state + 0x7fd8);
1936 ECX = ldl_phys(sm_state + 0x7fd4);
1937 EAX = ldl_phys(sm_state + 0x7fd0);
1938 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1939 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1940
1941 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1942 env->tr.base = ldl_phys(sm_state + 0x7f64);
1943 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1944 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1945
1946 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1947 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1948 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1949 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1950
1951 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1952 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1953
1954 env->idt.base = ldl_phys(sm_state + 0x7f58);
1955 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1956
1957 for(i = 0; i < 6; i++) {
1958 if (i < 3)
1959 offset = 0x7f84 + i * 12;
1960 else
1961 offset = 0x7f2c + (i - 3) * 12;
1962 cpu_x86_load_seg_cache(env, i,
1963 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1964 ldl_phys(sm_state + offset + 8),
1965 ldl_phys(sm_state + offset + 4),
1966 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1967 }
1968 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1969
1970 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1971 if (val & 0x20000) {
1972 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1973 }
1974#endif
1975 CC_OP = CC_OP_EFLAGS;
1976 env->hflags &= ~HF_SMM_MASK;
1977 cpu_smm_update(env);
1978
1979 if (loglevel & CPU_LOG_INT) {
1980 fprintf(logfile, "SMM: after RSM\n");
1981 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1982 }
1983#endif /* !VBOX */
1984}
1985
1986#endif /* !CONFIG_USER_ONLY */
1987
1988
1989/* division, flags are undefined */
1990
1991void helper_divb_AL(target_ulong t0)
1992{
1993 unsigned int num, den, q, r;
1994
1995 num = (EAX & 0xffff);
1996 den = (t0 & 0xff);
1997 if (den == 0) {
1998 raise_exception(EXCP00_DIVZ);
1999 }
2000 q = (num / den);
2001 if (q > 0xff)
2002 raise_exception(EXCP00_DIVZ);
2003 q &= 0xff;
2004 r = (num % den) & 0xff;
2005 EAX = (EAX & ~0xffff) | (r << 8) | q;
2006}
2007
2008void helper_idivb_AL(target_ulong t0)
2009{
2010 int num, den, q, r;
2011
2012 num = (int16_t)EAX;
2013 den = (int8_t)t0;
2014 if (den == 0) {
2015 raise_exception(EXCP00_DIVZ);
2016 }
2017 q = (num / den);
2018 if (q != (int8_t)q)
2019 raise_exception(EXCP00_DIVZ);
2020 q &= 0xff;
2021 r = (num % den) & 0xff;
2022 EAX = (EAX & ~0xffff) | (r << 8) | q;
2023}
2024
2025void helper_divw_AX(target_ulong t0)
2026{
2027 unsigned int num, den, q, r;
2028
2029 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2030 den = (t0 & 0xffff);
2031 if (den == 0) {
2032 raise_exception(EXCP00_DIVZ);
2033 }
2034 q = (num / den);
2035 if (q > 0xffff)
2036 raise_exception(EXCP00_DIVZ);
2037 q &= 0xffff;
2038 r = (num % den) & 0xffff;
2039 EAX = (EAX & ~0xffff) | q;
2040 EDX = (EDX & ~0xffff) | r;
2041}
2042
2043void helper_idivw_AX(target_ulong t0)
2044{
2045 int num, den, q, r;
2046
2047 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2048 den = (int16_t)t0;
2049 if (den == 0) {
2050 raise_exception(EXCP00_DIVZ);
2051 }
2052 q = (num / den);
2053 if (q != (int16_t)q)
2054 raise_exception(EXCP00_DIVZ);
2055 q &= 0xffff;
2056 r = (num % den) & 0xffff;
2057 EAX = (EAX & ~0xffff) | q;
2058 EDX = (EDX & ~0xffff) | r;
2059}
2060
2061void helper_divl_EAX(target_ulong t0)
2062{
2063 unsigned int den, r;
2064 uint64_t num, q;
2065
2066 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2067 den = t0;
2068 if (den == 0) {
2069 raise_exception(EXCP00_DIVZ);
2070 }
2071 q = (num / den);
2072 r = (num % den);
2073 if (q > 0xffffffff)
2074 raise_exception(EXCP00_DIVZ);
2075 EAX = (uint32_t)q;
2076 EDX = (uint32_t)r;
2077}
2078
2079void helper_idivl_EAX(target_ulong t0)
2080{
2081 int den, r;
2082 int64_t num, q;
2083
2084 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2085 den = t0;
2086 if (den == 0) {
2087 raise_exception(EXCP00_DIVZ);
2088 }
2089 q = (num / den);
2090 r = (num % den);
2091 if (q != (int32_t)q)
2092 raise_exception(EXCP00_DIVZ);
2093 EAX = (uint32_t)q;
2094 EDX = (uint32_t)r;
2095}
2096
2097/* bcd */
2098
2099/* XXX: exception */
2100void helper_aam(int base)
2101{
2102 int al, ah;
2103 al = EAX & 0xff;
2104 ah = al / base;
2105 al = al % base;
2106 EAX = (EAX & ~0xffff) | al | (ah << 8);
2107 CC_DST = al;
2108}
2109
2110void helper_aad(int base)
2111{
2112 int al, ah;
2113 al = EAX & 0xff;
2114 ah = (EAX >> 8) & 0xff;
2115 al = ((ah * base) + al) & 0xff;
2116 EAX = (EAX & ~0xffff) | al;
2117 CC_DST = al;
2118}
2119
2120void helper_aaa(void)
2121{
2122 int icarry;
2123 int al, ah, af;
2124 int eflags;
2125
2126 eflags = cc_table[CC_OP].compute_all();
2127 af = eflags & CC_A;
2128 al = EAX & 0xff;
2129 ah = (EAX >> 8) & 0xff;
2130
2131 icarry = (al > 0xf9);
2132 if (((al & 0x0f) > 9 ) || af) {
2133 al = (al + 6) & 0x0f;
2134 ah = (ah + 1 + icarry) & 0xff;
2135 eflags |= CC_C | CC_A;
2136 } else {
2137 eflags &= ~(CC_C | CC_A);
2138 al &= 0x0f;
2139 }
2140 EAX = (EAX & ~0xffff) | al | (ah << 8);
2141 CC_SRC = eflags;
2142 FORCE_RET();
2143}
2144
2145void helper_aas(void)
2146{
2147 int icarry;
2148 int al, ah, af;
2149 int eflags;
2150
2151 eflags = cc_table[CC_OP].compute_all();
2152 af = eflags & CC_A;
2153 al = EAX & 0xff;
2154 ah = (EAX >> 8) & 0xff;
2155
2156 icarry = (al < 6);
2157 if (((al & 0x0f) > 9 ) || af) {
2158 al = (al - 6) & 0x0f;
2159 ah = (ah - 1 - icarry) & 0xff;
2160 eflags |= CC_C | CC_A;
2161 } else {
2162 eflags &= ~(CC_C | CC_A);
2163 al &= 0x0f;
2164 }
2165 EAX = (EAX & ~0xffff) | al | (ah << 8);
2166 CC_SRC = eflags;
2167 FORCE_RET();
2168}
2169
2170void helper_daa(void)
2171{
2172 int al, af, cf;
2173 int eflags;
2174
2175 eflags = cc_table[CC_OP].compute_all();
2176 cf = eflags & CC_C;
2177 af = eflags & CC_A;
2178 al = EAX & 0xff;
2179
2180 eflags = 0;
2181 if (((al & 0x0f) > 9 ) || af) {
2182 al = (al + 6) & 0xff;
2183 eflags |= CC_A;
2184 }
2185 if ((al > 0x9f) || cf) {
2186 al = (al + 0x60) & 0xff;
2187 eflags |= CC_C;
2188 }
2189 EAX = (EAX & ~0xff) | al;
2190 /* well, speed is not an issue here, so we compute the flags by hand */
2191 eflags |= (al == 0) << 6; /* zf */
2192 eflags |= parity_table[al]; /* pf */
2193 eflags |= (al & 0x80); /* sf */
2194 CC_SRC = eflags;
2195 FORCE_RET();
2196}
2197
2198void helper_das(void)
2199{
2200 int al, al1, af, cf;
2201 int eflags;
2202
2203 eflags = cc_table[CC_OP].compute_all();
2204 cf = eflags & CC_C;
2205 af = eflags & CC_A;
2206 al = EAX & 0xff;
2207
2208 eflags = 0;
2209 al1 = al;
2210 if (((al & 0x0f) > 9 ) || af) {
2211 eflags |= CC_A;
2212 if (al < 6 || cf)
2213 eflags |= CC_C;
2214 al = (al - 6) & 0xff;
2215 }
2216 if ((al1 > 0x99) || cf) {
2217 al = (al - 0x60) & 0xff;
2218 eflags |= CC_C;
2219 }
2220 EAX = (EAX & ~0xff) | al;
2221 /* well, speed is not an issue here, so we compute the flags by hand */
2222 eflags |= (al == 0) << 6; /* zf */
2223 eflags |= parity_table[al]; /* pf */
2224 eflags |= (al & 0x80); /* sf */
2225 CC_SRC = eflags;
2226 FORCE_RET();
2227}
2228
2229void helper_into(int next_eip_addend)
2230{
2231 int eflags;
2232 eflags = cc_table[CC_OP].compute_all();
2233 if (eflags & CC_O) {
2234 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2235 }
2236}
2237
2238void helper_cmpxchg8b(target_ulong a0)
2239{
2240 uint64_t d;
2241 int eflags;
2242
2243 eflags = cc_table[CC_OP].compute_all();
2244 d = ldq(a0);
2245 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2246 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2247 eflags |= CC_Z;
2248 } else {
2249 /* always do the store */
2250 stq(a0, d);
2251 EDX = (uint32_t)(d >> 32);
2252 EAX = (uint32_t)d;
2253 eflags &= ~CC_Z;
2254 }
2255 CC_SRC = eflags;
2256}
2257
2258#ifdef TARGET_X86_64
2259void helper_cmpxchg16b(target_ulong a0)
2260{
2261 uint64_t d0, d1;
2262 int eflags;
2263
2264 if ((a0 & 0xf) != 0)
2265 raise_exception(EXCP0D_GPF);
2266 eflags = cc_table[CC_OP].compute_all();
2267 d0 = ldq(a0);
2268 d1 = ldq(a0 + 8);
2269 if (d0 == EAX && d1 == EDX) {
2270 stq(a0, EBX);
2271 stq(a0 + 8, ECX);
2272 eflags |= CC_Z;
2273 } else {
2274 /* always do the store */
2275 stq(a0, d0);
2276 stq(a0 + 8, d1);
2277 EDX = d1;
2278 EAX = d0;
2279 eflags &= ~CC_Z;
2280 }
2281 CC_SRC = eflags;
2282}
2283#endif
2284
2285void helper_single_step(void)
2286{
2287 env->dr[6] |= 0x4000;
2288 raise_exception(EXCP01_SSTP);
2289}
2290
2291void helper_cpuid(void)
2292{
2293#ifndef VBOX
2294 uint32_t index;
2295
2296 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2297
2298 index = (uint32_t)EAX;
2299 /* test if maximum index reached */
2300 if (index & 0x80000000) {
2301 if (index > env->cpuid_xlevel)
2302 index = env->cpuid_level;
2303 } else {
2304 if (index > env->cpuid_level)
2305 index = env->cpuid_level;
2306 }
2307
2308 switch(index) {
2309 case 0:
2310 EAX = env->cpuid_level;
2311 EBX = env->cpuid_vendor1;
2312 EDX = env->cpuid_vendor2;
2313 ECX = env->cpuid_vendor3;
2314 break;
2315 case 1:
2316 EAX = env->cpuid_version;
2317 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2318 ECX = env->cpuid_ext_features;
2319 EDX = env->cpuid_features;
2320 break;
2321 case 2:
2322 /* cache info: needed for Pentium Pro compatibility */
2323 EAX = 1;
2324 EBX = 0;
2325 ECX = 0;
2326 EDX = 0x2c307d;
2327 break;
2328 case 4:
2329 /* cache info: needed for Core compatibility */
2330 switch (ECX) {
2331 case 0: /* L1 dcache info */
2332 EAX = 0x0000121;
2333 EBX = 0x1c0003f;
2334 ECX = 0x000003f;
2335 EDX = 0x0000001;
2336 break;
2337 case 1: /* L1 icache info */
2338 EAX = 0x0000122;
2339 EBX = 0x1c0003f;
2340 ECX = 0x000003f;
2341 EDX = 0x0000001;
2342 break;
2343 case 2: /* L2 cache info */
2344 EAX = 0x0000143;
2345 EBX = 0x3c0003f;
2346 ECX = 0x0000fff;
2347 EDX = 0x0000001;
2348 break;
2349 default: /* end of info */
2350 EAX = 0;
2351 EBX = 0;
2352 ECX = 0;
2353 EDX = 0;
2354 break;
2355 }
2356
2357 break;
2358 case 5:
2359 /* mwait info: needed for Core compatibility */
2360 EAX = 0; /* Smallest monitor-line size in bytes */
2361 EBX = 0; /* Largest monitor-line size in bytes */
2362 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2363 EDX = 0;
2364 break;
2365 case 6:
2366 /* Thermal and Power Leaf */
2367 EAX = 0;
2368 EBX = 0;
2369 ECX = 0;
2370 EDX = 0;
2371 break;
2372 case 9:
2373 /* Direct Cache Access Information Leaf */
2374 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2375 EBX = 0;
2376 ECX = 0;
2377 EDX = 0;
2378 break;
2379 case 0xA:
2380 /* Architectural Performance Monitoring Leaf */
2381 EAX = 0;
2382 EBX = 0;
2383 ECX = 0;
2384 EDX = 0;
2385 break;
2386 case 0x80000000:
2387 EAX = env->cpuid_xlevel;
2388 EBX = env->cpuid_vendor1;
2389 EDX = env->cpuid_vendor2;
2390 ECX = env->cpuid_vendor3;
2391 break;
2392 case 0x80000001:
2393 EAX = env->cpuid_features;
2394 EBX = 0;
2395 ECX = env->cpuid_ext3_features;
2396 EDX = env->cpuid_ext2_features;
2397 break;
2398 case 0x80000002:
2399 case 0x80000003:
2400 case 0x80000004:
2401 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2402 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2403 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2404 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2405 break;
2406 case 0x80000005:
2407 /* cache info (L1 cache) */
2408 EAX = 0x01ff01ff;
2409 EBX = 0x01ff01ff;
2410 ECX = 0x40020140;
2411 EDX = 0x40020140;
2412 break;
2413 case 0x80000006:
2414 /* cache info (L2 cache) */
2415 EAX = 0;
2416 EBX = 0x42004200;
2417 ECX = 0x02008140;
2418 EDX = 0;
2419 break;
2420 case 0x80000008:
2421 /* virtual & phys address size in low 2 bytes. */
2422/* XXX: This value must match the one used in the MMU code. */
2423 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2424 /* 64 bit processor */
2425#if defined(USE_KQEMU)
2426 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2427#else
2428/* XXX: The physical address space is limited to 42 bits in exec.c. */
2429 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2430#endif
2431 } else {
2432#if defined(USE_KQEMU)
2433 EAX = 0x00000020; /* 32 bits physical */
2434#else
2435 if (env->cpuid_features & CPUID_PSE36)
2436 EAX = 0x00000024; /* 36 bits physical */
2437 else
2438 EAX = 0x00000020; /* 32 bits physical */
2439#endif
2440 }
2441 EBX = 0;
2442 ECX = 0;
2443 EDX = 0;
2444 break;
2445 case 0x8000000A:
2446 EAX = 0x00000001;
2447 EBX = 0;
2448 ECX = 0;
2449 EDX = 0;
2450 break;
2451 default:
2452 /* reserved values: zero */
2453 EAX = 0;
2454 EBX = 0;
2455 ECX = 0;
2456 EDX = 0;
2457 break;
2458 }
2459#else /* VBOX */
2460 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2461#endif /* VBOX */
2462}
2463
2464void helper_enter_level(int level, int data32, target_ulong t1)
2465{
2466 target_ulong ssp;
2467 uint32_t esp_mask, esp, ebp;
2468
2469 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2470 ssp = env->segs[R_SS].base;
2471 ebp = EBP;
2472 esp = ESP;
2473 if (data32) {
2474 /* 32 bit */
2475 esp -= 4;
2476 while (--level) {
2477 esp -= 4;
2478 ebp -= 4;
2479 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2480 }
2481 esp -= 4;
2482 stl(ssp + (esp & esp_mask), t1);
2483 } else {
2484 /* 16 bit */
2485 esp -= 2;
2486 while (--level) {
2487 esp -= 2;
2488 ebp -= 2;
2489 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2490 }
2491 esp -= 2;
2492 stw(ssp + (esp & esp_mask), t1);
2493 }
2494}
2495
2496#ifdef TARGET_X86_64
2497void helper_enter64_level(int level, int data64, target_ulong t1)
2498{
2499 target_ulong esp, ebp;
2500 ebp = EBP;
2501 esp = ESP;
2502
2503 if (data64) {
2504 /* 64 bit */
2505 esp -= 8;
2506 while (--level) {
2507 esp -= 8;
2508 ebp -= 8;
2509 stq(esp, ldq(ebp));
2510 }
2511 esp -= 8;
2512 stq(esp, t1);
2513 } else {
2514 /* 16 bit */
2515 esp -= 2;
2516 while (--level) {
2517 esp -= 2;
2518 ebp -= 2;
2519 stw(esp, lduw(ebp));
2520 }
2521 esp -= 2;
2522 stw(esp, t1);
2523 }
2524}
2525#endif
2526
2527void helper_lldt(int selector)
2528{
2529 SegmentCache *dt;
2530 uint32_t e1, e2;
2531#ifndef VBOX
2532 int index, entry_limit;
2533#else
2534 unsigned int index, entry_limit;
2535#endif
2536 target_ulong ptr;
2537
2538#ifdef VBOX
2539 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2540 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2541#endif
2542
2543 selector &= 0xffff;
2544 if ((selector & 0xfffc) == 0) {
2545 /* XXX: NULL selector case: invalid LDT */
2546 env->ldt.base = 0;
2547 env->ldt.limit = 0;
2548 } else {
2549 if (selector & 0x4)
2550 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2551 dt = &env->gdt;
2552 index = selector & ~7;
2553#ifdef TARGET_X86_64
2554 if (env->hflags & HF_LMA_MASK)
2555 entry_limit = 15;
2556 else
2557#endif
2558 entry_limit = 7;
2559 if ((index + entry_limit) > dt->limit)
2560 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2561 ptr = dt->base + index;
2562 e1 = ldl_kernel(ptr);
2563 e2 = ldl_kernel(ptr + 4);
2564 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2565 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2566 if (!(e2 & DESC_P_MASK))
2567 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2568#ifdef TARGET_X86_64
2569 if (env->hflags & HF_LMA_MASK) {
2570 uint32_t e3;
2571 e3 = ldl_kernel(ptr + 8);
2572 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2573 env->ldt.base |= (target_ulong)e3 << 32;
2574 } else
2575#endif
2576 {
2577 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2578 }
2579 }
2580 env->ldt.selector = selector;
2581#ifdef VBOX
2582 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2583 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2584#endif
2585}
2586
2587void helper_ltr(int selector)
2588{
2589 SegmentCache *dt;
2590 uint32_t e1, e2;
2591#ifndef VBOX
2592 int index, type, entry_limit;
2593#else
2594 unsigned int index;
2595 int type, entry_limit;
2596#endif
2597 target_ulong ptr;
2598
2599#ifdef VBOX
2600 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2601 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2602 env->tr.flags, (RTSEL)(selector & 0xffff)));
2603#endif
2604 selector &= 0xffff;
2605 if ((selector & 0xfffc) == 0) {
2606 /* NULL selector case: invalid TR */
2607 env->tr.base = 0;
2608 env->tr.limit = 0;
2609 env->tr.flags = 0;
2610 } else {
2611 if (selector & 0x4)
2612 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2613 dt = &env->gdt;
2614 index = selector & ~7;
2615#ifdef TARGET_X86_64
2616 if (env->hflags & HF_LMA_MASK)
2617 entry_limit = 15;
2618 else
2619#endif
2620 entry_limit = 7;
2621 if ((index + entry_limit) > dt->limit)
2622 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2623 ptr = dt->base + index;
2624 e1 = ldl_kernel(ptr);
2625 e2 = ldl_kernel(ptr + 4);
2626 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2627 if ((e2 & DESC_S_MASK) ||
2628 (type != 1 && type != 9))
2629 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2630 if (!(e2 & DESC_P_MASK))
2631 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2632#ifdef TARGET_X86_64
2633 if (env->hflags & HF_LMA_MASK) {
2634 uint32_t e3, e4;
2635 e3 = ldl_kernel(ptr + 8);
2636 e4 = ldl_kernel(ptr + 12);
2637 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2638 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2639 load_seg_cache_raw_dt(&env->tr, e1, e2);
2640 env->tr.base |= (target_ulong)e3 << 32;
2641 } else
2642#endif
2643 {
2644 load_seg_cache_raw_dt(&env->tr, e1, e2);
2645 }
2646 e2 |= DESC_TSS_BUSY_MASK;
2647 stl_kernel(ptr + 4, e2);
2648 }
2649 env->tr.selector = selector;
2650#ifdef VBOX
2651 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2652 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2653 env->tr.flags, (RTSEL)(selector & 0xffff)));
2654#endif
2655}
2656
2657/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2658void helper_load_seg(int seg_reg, int selector)
2659{
2660 uint32_t e1, e2;
2661 int cpl, dpl, rpl;
2662 SegmentCache *dt;
2663#ifndef VBOX
2664 int index;
2665#else
2666 unsigned int index;
2667#endif
2668 target_ulong ptr;
2669
2670 selector &= 0xffff;
2671 cpl = env->hflags & HF_CPL_MASK;
2672
2673#ifdef VBOX
2674 /* Trying to load a selector with CPL=1? */
2675 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2676 {
2677 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2678 selector = selector & 0xfffc;
2679 }
2680#endif
2681 if ((selector & 0xfffc) == 0) {
2682 /* null selector case */
2683 if (seg_reg == R_SS
2684#ifdef TARGET_X86_64
2685 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2686#endif
2687 )
2688 raise_exception_err(EXCP0D_GPF, 0);
2689 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2690 } else {
2691
2692 if (selector & 0x4)
2693 dt = &env->ldt;
2694 else
2695 dt = &env->gdt;
2696 index = selector & ~7;
2697 if ((index + 7) > dt->limit)
2698 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2699 ptr = dt->base + index;
2700 e1 = ldl_kernel(ptr);
2701 e2 = ldl_kernel(ptr + 4);
2702
2703 if (!(e2 & DESC_S_MASK))
2704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2705 rpl = selector & 3;
2706 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2707 if (seg_reg == R_SS) {
2708 /* must be writable segment */
2709 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2711 if (rpl != cpl || dpl != cpl)
2712 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2713 } else {
2714 /* must be readable segment */
2715 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2716 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2717
2718 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2719 /* if not conforming code, test rights */
2720 if (dpl < cpl || dpl < rpl)
2721 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2722 }
2723 }
2724
2725 if (!(e2 & DESC_P_MASK)) {
2726 if (seg_reg == R_SS)
2727 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2728 else
2729 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2730 }
2731
2732 /* set the access bit if not already set */
2733 if (!(e2 & DESC_A_MASK)) {
2734 e2 |= DESC_A_MASK;
2735 stl_kernel(ptr + 4, e2);
2736 }
2737
2738 cpu_x86_load_seg_cache(env, seg_reg, selector,
2739 get_seg_base(e1, e2),
2740 get_seg_limit(e1, e2),
2741 e2);
2742#if 0
2743 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2744 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2745#endif
2746 }
2747}
2748
2749/* protected mode jump */
2750void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2751 int next_eip_addend)
2752{
2753 int gate_cs, type;
2754 uint32_t e1, e2, cpl, dpl, rpl, limit;
2755 target_ulong next_eip;
2756
2757#ifdef VBOX
2758 e1 = e2 = 0;
2759#endif
2760 if ((new_cs & 0xfffc) == 0)
2761 raise_exception_err(EXCP0D_GPF, 0);
2762 if (load_segment(&e1, &e2, new_cs) != 0)
2763 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2764 cpl = env->hflags & HF_CPL_MASK;
2765 if (e2 & DESC_S_MASK) {
2766 if (!(e2 & DESC_CS_MASK))
2767 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2768 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2769 if (e2 & DESC_C_MASK) {
2770 /* conforming code segment */
2771 if (dpl > cpl)
2772 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2773 } else {
2774 /* non conforming code segment */
2775 rpl = new_cs & 3;
2776 if (rpl > cpl)
2777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2778 if (dpl != cpl)
2779 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780 }
2781 if (!(e2 & DESC_P_MASK))
2782 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2783 limit = get_seg_limit(e1, e2);
2784 if (new_eip > limit &&
2785 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2788 get_seg_base(e1, e2), limit, e2);
2789 EIP = new_eip;
2790 } else {
2791 /* jump to call or task gate */
2792 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2793 rpl = new_cs & 3;
2794 cpl = env->hflags & HF_CPL_MASK;
2795 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2796 switch(type) {
2797 case 1: /* 286 TSS */
2798 case 9: /* 386 TSS */
2799 case 5: /* task gate */
2800 if (dpl < cpl || dpl < rpl)
2801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2802 next_eip = env->eip + next_eip_addend;
2803 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2804 CC_OP = CC_OP_EFLAGS;
2805 break;
2806 case 4: /* 286 call gate */
2807 case 12: /* 386 call gate */
2808 if ((dpl < cpl) || (dpl < rpl))
2809 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2810 if (!(e2 & DESC_P_MASK))
2811 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2812 gate_cs = e1 >> 16;
2813 new_eip = (e1 & 0xffff);
2814 if (type == 12)
2815 new_eip |= (e2 & 0xffff0000);
2816 if (load_segment(&e1, &e2, gate_cs) != 0)
2817 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2818 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2819 /* must be code segment */
2820 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2821 (DESC_S_MASK | DESC_CS_MASK)))
2822 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2823 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2824 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2825 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2826 if (!(e2 & DESC_P_MASK))
2827#ifdef VBOX /* See page 3-514 of 253666.pdf */
2828 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2829#else
2830 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2831#endif
2832 limit = get_seg_limit(e1, e2);
2833 if (new_eip > limit)
2834 raise_exception_err(EXCP0D_GPF, 0);
2835 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2836 get_seg_base(e1, e2), limit, e2);
2837 EIP = new_eip;
2838 break;
2839 default:
2840 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2841 break;
2842 }
2843 }
2844}
2845
2846/* real mode call */
2847void helper_lcall_real(int new_cs, target_ulong new_eip1,
2848 int shift, int next_eip)
2849{
2850 int new_eip;
2851 uint32_t esp, esp_mask;
2852 target_ulong ssp;
2853
2854 new_eip = new_eip1;
2855 esp = ESP;
2856 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2857 ssp = env->segs[R_SS].base;
2858 if (shift) {
2859 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2860 PUSHL(ssp, esp, esp_mask, next_eip);
2861 } else {
2862 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2863 PUSHW(ssp, esp, esp_mask, next_eip);
2864 }
2865
2866 SET_ESP(esp, esp_mask);
2867 env->eip = new_eip;
2868 env->segs[R_CS].selector = new_cs;
2869 env->segs[R_CS].base = (new_cs << 4);
2870}
2871
2872/* protected mode call */
2873void helper_lcall_protected(int new_cs, target_ulong new_eip,
2874 int shift, int next_eip_addend)
2875{
2876 int new_stack, i;
2877 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2878 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2879 uint32_t val, limit, old_sp_mask;
2880 target_ulong ssp, old_ssp, next_eip;
2881
2882#ifdef VBOX
2883 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2884#endif
2885 next_eip = env->eip + next_eip_addend;
2886#ifdef DEBUG_PCALL
2887 if (loglevel & CPU_LOG_PCALL) {
2888 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2889 new_cs, (uint32_t)new_eip, shift);
2890 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2891 }
2892#endif
2893 if ((new_cs & 0xfffc) == 0)
2894 raise_exception_err(EXCP0D_GPF, 0);
2895 if (load_segment(&e1, &e2, new_cs) != 0)
2896 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2897 cpl = env->hflags & HF_CPL_MASK;
2898#ifdef DEBUG_PCALL
2899 if (loglevel & CPU_LOG_PCALL) {
2900 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2901 }
2902#endif
2903 if (e2 & DESC_S_MASK) {
2904 if (!(e2 & DESC_CS_MASK))
2905 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2906 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2907 if (e2 & DESC_C_MASK) {
2908 /* conforming code segment */
2909 if (dpl > cpl)
2910 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2911 } else {
2912 /* non conforming code segment */
2913 rpl = new_cs & 3;
2914 if (rpl > cpl)
2915 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2916 if (dpl != cpl)
2917 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2918 }
2919 if (!(e2 & DESC_P_MASK))
2920 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2921
2922#ifdef TARGET_X86_64
2923 /* XXX: check 16/32 bit cases in long mode */
2924 if (shift == 2) {
2925 target_ulong rsp;
2926 /* 64 bit case */
2927 rsp = ESP;
2928 PUSHQ(rsp, env->segs[R_CS].selector);
2929 PUSHQ(rsp, next_eip);
2930 /* from this point, not restartable */
2931 ESP = rsp;
2932 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2933 get_seg_base(e1, e2),
2934 get_seg_limit(e1, e2), e2);
2935 EIP = new_eip;
2936 } else
2937#endif
2938 {
2939 sp = ESP;
2940 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2941 ssp = env->segs[R_SS].base;
2942 if (shift) {
2943 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2944 PUSHL(ssp, sp, sp_mask, next_eip);
2945 } else {
2946 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2947 PUSHW(ssp, sp, sp_mask, next_eip);
2948 }
2949
2950 limit = get_seg_limit(e1, e2);
2951 if (new_eip > limit)
2952 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2953 /* from this point, not restartable */
2954 SET_ESP(sp, sp_mask);
2955 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2956 get_seg_base(e1, e2), limit, e2);
2957 EIP = new_eip;
2958 }
2959 } else {
2960 /* check gate type */
2961 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2963 rpl = new_cs & 3;
2964 switch(type) {
2965 case 1: /* available 286 TSS */
2966 case 9: /* available 386 TSS */
2967 case 5: /* task gate */
2968 if (dpl < cpl || dpl < rpl)
2969 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2970 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2971 CC_OP = CC_OP_EFLAGS;
2972 return;
2973 case 4: /* 286 call gate */
2974 case 12: /* 386 call gate */
2975 break;
2976 default:
2977 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2978 break;
2979 }
2980 shift = type >> 3;
2981
2982 if (dpl < cpl || dpl < rpl)
2983 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2984 /* check valid bit */
2985 if (!(e2 & DESC_P_MASK))
2986 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2987 selector = e1 >> 16;
2988 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2989 param_count = e2 & 0x1f;
2990 if ((selector & 0xfffc) == 0)
2991 raise_exception_err(EXCP0D_GPF, 0);
2992
2993 if (load_segment(&e1, &e2, selector) != 0)
2994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2995 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2996 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2997 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2998 if (dpl > cpl)
2999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3000 if (!(e2 & DESC_P_MASK))
3001 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3002
3003 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3004 /* to inner privilege */
3005 get_ss_esp_from_tss(&ss, &sp, dpl);
3006#ifdef DEBUG_PCALL
3007 if (loglevel & CPU_LOG_PCALL)
3008 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3009 ss, sp, param_count, ESP);
3010#endif
3011 if ((ss & 0xfffc) == 0)
3012 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3013 if ((ss & 3) != dpl)
3014 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3015 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3018 if (ss_dpl != dpl)
3019 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3020 if (!(ss_e2 & DESC_S_MASK) ||
3021 (ss_e2 & DESC_CS_MASK) ||
3022 !(ss_e2 & DESC_W_MASK))
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if (!(ss_e2 & DESC_P_MASK))
3025#ifdef VBOX /* See page 3-99 of 253666.pdf */
3026 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3027#else
3028 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3029#endif
3030
3031 // push_size = ((param_count * 2) + 8) << shift;
3032
3033 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3034 old_ssp = env->segs[R_SS].base;
3035
3036 sp_mask = get_sp_mask(ss_e2);
3037 ssp = get_seg_base(ss_e1, ss_e2);
3038 if (shift) {
3039 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3040 PUSHL(ssp, sp, sp_mask, ESP);
3041 for(i = param_count - 1; i >= 0; i--) {
3042 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3043 PUSHL(ssp, sp, sp_mask, val);
3044 }
3045 } else {
3046 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3047 PUSHW(ssp, sp, sp_mask, ESP);
3048 for(i = param_count - 1; i >= 0; i--) {
3049 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3050 PUSHW(ssp, sp, sp_mask, val);
3051 }
3052 }
3053 new_stack = 1;
3054 } else {
3055 /* to same privilege */
3056 sp = ESP;
3057 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3058 ssp = env->segs[R_SS].base;
3059 // push_size = (4 << shift);
3060 new_stack = 0;
3061 }
3062
3063 if (shift) {
3064 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3065 PUSHL(ssp, sp, sp_mask, next_eip);
3066 } else {
3067 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3068 PUSHW(ssp, sp, sp_mask, next_eip);
3069 }
3070
3071 /* from this point, not restartable */
3072
3073 if (new_stack) {
3074 ss = (ss & ~3) | dpl;
3075 cpu_x86_load_seg_cache(env, R_SS, ss,
3076 ssp,
3077 get_seg_limit(ss_e1, ss_e2),
3078 ss_e2);
3079 }
3080
3081 selector = (selector & ~3) | dpl;
3082 cpu_x86_load_seg_cache(env, R_CS, selector,
3083 get_seg_base(e1, e2),
3084 get_seg_limit(e1, e2),
3085 e2);
3086 cpu_x86_set_cpl(env, dpl);
3087 SET_ESP(sp, sp_mask);
3088 EIP = offset;
3089 }
3090#ifdef USE_KQEMU
3091 if (kqemu_is_ok(env)) {
3092 env->exception_index = -1;
3093 cpu_loop_exit();
3094 }
3095#endif
3096}
3097
3098/* real and vm86 mode iret */
3099void helper_iret_real(int shift)
3100{
3101 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3102 target_ulong ssp;
3103 int eflags_mask;
3104#ifdef VBOX
3105 bool fVME = false;
3106
3107 remR3TrapClear(env->pVM);
3108#endif /* VBOX */
3109
3110 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3111 sp = ESP;
3112 ssp = env->segs[R_SS].base;
3113 if (shift == 1) {
3114 /* 32 bits */
3115 POPL(ssp, sp, sp_mask, new_eip);
3116 POPL(ssp, sp, sp_mask, new_cs);
3117 new_cs &= 0xffff;
3118 POPL(ssp, sp, sp_mask, new_eflags);
3119 } else {
3120 /* 16 bits */
3121 POPW(ssp, sp, sp_mask, new_eip);
3122 POPW(ssp, sp, sp_mask, new_cs);
3123 POPW(ssp, sp, sp_mask, new_eflags);
3124 }
3125#ifdef VBOX
3126 if ( (env->eflags & VM_MASK)
3127 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3128 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3129 {
3130 fVME = true;
3131 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3132 /* if TF will be set -> #GP */
3133 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3134 || (new_eflags & TF_MASK))
3135 raise_exception(EXCP0D_GPF);
3136 }
3137#endif /* VBOX */
3138 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3139 env->segs[R_CS].selector = new_cs;
3140 env->segs[R_CS].base = (new_cs << 4);
3141 env->eip = new_eip;
3142#ifdef VBOX
3143 if (fVME)
3144 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3145 else
3146#endif
3147 if (env->eflags & VM_MASK)
3148 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3149 else
3150 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3151 if (shift == 0)
3152 eflags_mask &= 0xffff;
3153 load_eflags(new_eflags, eflags_mask);
3154 env->hflags2 &= ~HF2_NMI_MASK;
3155#ifdef VBOX
3156 if (fVME)
3157 {
3158 if (new_eflags & IF_MASK)
3159 env->eflags |= VIF_MASK;
3160 else
3161 env->eflags &= ~VIF_MASK;
3162 }
3163#endif /* VBOX */
3164}
3165
3166#ifndef VBOX
3167static inline void validate_seg(int seg_reg, int cpl)
3168#else /* VBOX */
3169DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3170#endif /* VBOX */
3171{
3172 int dpl;
3173 uint32_t e2;
3174
3175 /* XXX: on x86_64, we do not want to nullify FS and GS because
3176 they may still contain a valid base. I would be interested to
3177 know how a real x86_64 CPU behaves */
3178 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3179 (env->segs[seg_reg].selector & 0xfffc) == 0)
3180 return;
3181
3182 e2 = env->segs[seg_reg].flags;
3183 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3184 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3185 /* data or non conforming code segment */
3186 if (dpl < cpl) {
3187 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3188 }
3189 }
3190}
3191
3192/* protected mode iret */
3193#ifndef VBOX
3194static inline void helper_ret_protected(int shift, int is_iret, int addend)
3195#else /* VBOX */
3196DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3197#endif /* VBOX */
3198{
3199 uint32_t new_cs, new_eflags, new_ss;
3200 uint32_t new_es, new_ds, new_fs, new_gs;
3201 uint32_t e1, e2, ss_e1, ss_e2;
3202 int cpl, dpl, rpl, eflags_mask, iopl;
3203 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3204
3205#ifdef VBOX
3206 ss_e1 = ss_e2 = e1 = e2 = 0;
3207#endif
3208
3209#ifdef TARGET_X86_64
3210 if (shift == 2)
3211 sp_mask = -1;
3212 else
3213#endif
3214 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3215 sp = ESP;
3216 ssp = env->segs[R_SS].base;
3217 new_eflags = 0; /* avoid warning */
3218#ifdef TARGET_X86_64
3219 if (shift == 2) {
3220 POPQ(sp, new_eip);
3221 POPQ(sp, new_cs);
3222 new_cs &= 0xffff;
3223 if (is_iret) {
3224 POPQ(sp, new_eflags);
3225 }
3226 } else
3227#endif
3228 if (shift == 1) {
3229 /* 32 bits */
3230 POPL(ssp, sp, sp_mask, new_eip);
3231 POPL(ssp, sp, sp_mask, new_cs);
3232 new_cs &= 0xffff;
3233 if (is_iret) {
3234 POPL(ssp, sp, sp_mask, new_eflags);
3235#if defined(VBOX) && defined(DEBUG)
3236 printf("iret: new CS %04X\n", new_cs);
3237 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3238 printf("iret: new EFLAGS %08X\n", new_eflags);
3239 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3240#endif
3241 if (new_eflags & VM_MASK)
3242 goto return_to_vm86;
3243 }
3244#ifdef VBOX
3245 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3246 {
3247#ifdef DEBUG
3248 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3249#endif
3250 new_cs = new_cs & 0xfffc;
3251 }
3252#endif
3253 } else {
3254 /* 16 bits */
3255 POPW(ssp, sp, sp_mask, new_eip);
3256 POPW(ssp, sp, sp_mask, new_cs);
3257 if (is_iret)
3258 POPW(ssp, sp, sp_mask, new_eflags);
3259 }
3260#ifdef DEBUG_PCALL
3261 if (loglevel & CPU_LOG_PCALL) {
3262 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3263 new_cs, new_eip, shift, addend);
3264 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3265 }
3266#endif
3267 if ((new_cs & 0xfffc) == 0)
3268 {
3269#if defined(VBOX) && defined(DEBUG)
3270 printf("new_cs & 0xfffc) == 0\n");
3271#endif
3272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3273 }
3274 if (load_segment(&e1, &e2, new_cs) != 0)
3275 {
3276#if defined(VBOX) && defined(DEBUG)
3277 printf("load_segment failed\n");
3278#endif
3279 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3280 }
3281 if (!(e2 & DESC_S_MASK) ||
3282 !(e2 & DESC_CS_MASK))
3283 {
3284#if defined(VBOX) && defined(DEBUG)
3285 printf("e2 mask %08x\n", e2);
3286#endif
3287 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3288 }
3289 cpl = env->hflags & HF_CPL_MASK;
3290 rpl = new_cs & 3;
3291 if (rpl < cpl)
3292 {
3293#if defined(VBOX) && defined(DEBUG)
3294 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3295#endif
3296 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3297 }
3298 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3299 if (e2 & DESC_C_MASK) {
3300 if (dpl > rpl)
3301 {
3302#if defined(VBOX) && defined(DEBUG)
3303 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3304#endif
3305 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3306 }
3307 } else {
3308 if (dpl != rpl)
3309 {
3310#if defined(VBOX) && defined(DEBUG)
3311 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3312#endif
3313 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3314 }
3315 }
3316 if (!(e2 & DESC_P_MASK))
3317 {
3318#if defined(VBOX) && defined(DEBUG)
3319 printf("DESC_P_MASK e2=%08x\n", e2);
3320#endif
3321 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3322 }
3323
3324 sp += addend;
3325 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3326 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3327 /* return to same privilege level */
3328 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3329 get_seg_base(e1, e2),
3330 get_seg_limit(e1, e2),
3331 e2);
3332 } else {
3333 /* return to different privilege level */
3334#ifdef TARGET_X86_64
3335 if (shift == 2) {
3336 POPQ(sp, new_esp);
3337 POPQ(sp, new_ss);
3338 new_ss &= 0xffff;
3339 } else
3340#endif
3341 if (shift == 1) {
3342 /* 32 bits */
3343 POPL(ssp, sp, sp_mask, new_esp);
3344 POPL(ssp, sp, sp_mask, new_ss);
3345 new_ss &= 0xffff;
3346 } else {
3347 /* 16 bits */
3348 POPW(ssp, sp, sp_mask, new_esp);
3349 POPW(ssp, sp, sp_mask, new_ss);
3350 }
3351#ifdef DEBUG_PCALL
3352 if (loglevel & CPU_LOG_PCALL) {
3353 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3354 new_ss, new_esp);
3355 }
3356#endif
3357 if ((new_ss & 0xfffc) == 0) {
3358#ifdef TARGET_X86_64
3359 /* NULL ss is allowed in long mode if cpl != 3*/
3360 /* XXX: test CS64 ? */
3361 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3362 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3363 0, 0xffffffff,
3364 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3365 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3366 DESC_W_MASK | DESC_A_MASK);
3367 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3368 } else
3369#endif
3370 {
3371 raise_exception_err(EXCP0D_GPF, 0);
3372 }
3373 } else {
3374 if ((new_ss & 3) != rpl)
3375 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3376 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3377 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3378 if (!(ss_e2 & DESC_S_MASK) ||
3379 (ss_e2 & DESC_CS_MASK) ||
3380 !(ss_e2 & DESC_W_MASK))
3381 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3382 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3383 if (dpl != rpl)
3384 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3385 if (!(ss_e2 & DESC_P_MASK))
3386 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3387 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3388 get_seg_base(ss_e1, ss_e2),
3389 get_seg_limit(ss_e1, ss_e2),
3390 ss_e2);
3391 }
3392
3393 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3394 get_seg_base(e1, e2),
3395 get_seg_limit(e1, e2),
3396 e2);
3397 cpu_x86_set_cpl(env, rpl);
3398 sp = new_esp;
3399#ifdef TARGET_X86_64
3400 if (env->hflags & HF_CS64_MASK)
3401 sp_mask = -1;
3402 else
3403#endif
3404 sp_mask = get_sp_mask(ss_e2);
3405
3406 /* validate data segments */
3407 validate_seg(R_ES, rpl);
3408 validate_seg(R_DS, rpl);
3409 validate_seg(R_FS, rpl);
3410 validate_seg(R_GS, rpl);
3411
3412 sp += addend;
3413 }
3414 SET_ESP(sp, sp_mask);
3415 env->eip = new_eip;
3416 if (is_iret) {
3417 /* NOTE: 'cpl' is the _old_ CPL */
3418 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3419 if (cpl == 0)
3420#ifdef VBOX
3421 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3422#else
3423 eflags_mask |= IOPL_MASK;
3424#endif
3425 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3426 if (cpl <= iopl)
3427 eflags_mask |= IF_MASK;
3428 if (shift == 0)
3429 eflags_mask &= 0xffff;
3430 load_eflags(new_eflags, eflags_mask);
3431 }
3432 return;
3433
3434 return_to_vm86:
3435 POPL(ssp, sp, sp_mask, new_esp);
3436 POPL(ssp, sp, sp_mask, new_ss);
3437 POPL(ssp, sp, sp_mask, new_es);
3438 POPL(ssp, sp, sp_mask, new_ds);
3439 POPL(ssp, sp, sp_mask, new_fs);
3440 POPL(ssp, sp, sp_mask, new_gs);
3441
3442 /* modify processor state */
3443 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3444 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3445 load_seg_vm(R_CS, new_cs & 0xffff);
3446 cpu_x86_set_cpl(env, 3);
3447 load_seg_vm(R_SS, new_ss & 0xffff);
3448 load_seg_vm(R_ES, new_es & 0xffff);
3449 load_seg_vm(R_DS, new_ds & 0xffff);
3450 load_seg_vm(R_FS, new_fs & 0xffff);
3451 load_seg_vm(R_GS, new_gs & 0xffff);
3452
3453 env->eip = new_eip & 0xffff;
3454 ESP = new_esp;
3455}
3456
3457void helper_iret_protected(int shift, int next_eip)
3458{
3459 int tss_selector, type;
3460 uint32_t e1, e2;
3461
3462#ifdef VBOX
3463 e1 = e2 = 0;
3464 remR3TrapClear(env->pVM);
3465#endif
3466
3467 /* specific case for TSS */
3468 if (env->eflags & NT_MASK) {
3469#ifdef TARGET_X86_64
3470 if (env->hflags & HF_LMA_MASK)
3471 raise_exception_err(EXCP0D_GPF, 0);
3472#endif
3473 tss_selector = lduw_kernel(env->tr.base + 0);
3474 if (tss_selector & 4)
3475 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3476 if (load_segment(&e1, &e2, tss_selector) != 0)
3477 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3478 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3479 /* NOTE: we check both segment and busy TSS */
3480 if (type != 3)
3481 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3482 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3483 } else {
3484 helper_ret_protected(shift, 1, 0);
3485 }
3486 env->hflags2 &= ~HF2_NMI_MASK;
3487#ifdef USE_KQEMU
3488 if (kqemu_is_ok(env)) {
3489 CC_OP = CC_OP_EFLAGS;
3490 env->exception_index = -1;
3491 cpu_loop_exit();
3492 }
3493#endif
3494}
3495
3496void helper_lret_protected(int shift, int addend)
3497{
3498 helper_ret_protected(shift, 0, addend);
3499#ifdef USE_KQEMU
3500 if (kqemu_is_ok(env)) {
3501 env->exception_index = -1;
3502 cpu_loop_exit();
3503 }
3504#endif
3505}
3506
3507void helper_sysenter(void)
3508{
3509 if (env->sysenter_cs == 0) {
3510 raise_exception_err(EXCP0D_GPF, 0);
3511 }
3512 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3513 cpu_x86_set_cpl(env, 0);
3514
3515#ifdef TARGET_X86_64
3516 if (env->hflags & HF_LMA_MASK) {
3517 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3518 0, 0xffffffff,
3519 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3520 DESC_S_MASK |
3521 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3522 } else
3523#endif
3524 {
3525 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3526 0, 0xffffffff,
3527 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3528 DESC_S_MASK |
3529 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3530 }
3531 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3532 0, 0xffffffff,
3533 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3534 DESC_S_MASK |
3535 DESC_W_MASK | DESC_A_MASK);
3536 ESP = env->sysenter_esp;
3537 EIP = env->sysenter_eip;
3538}
3539
3540void helper_sysexit(int dflag)
3541{
3542 int cpl;
3543
3544 cpl = env->hflags & HF_CPL_MASK;
3545 if (env->sysenter_cs == 0 || cpl != 0) {
3546 raise_exception_err(EXCP0D_GPF, 0);
3547 }
3548 cpu_x86_set_cpl(env, 3);
3549#ifdef TARGET_X86_64
3550 if (dflag == 2) {
3551 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3552 0, 0xffffffff,
3553 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3554 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3555 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3556 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3557 0, 0xffffffff,
3558 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3559 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3560 DESC_W_MASK | DESC_A_MASK);
3561 } else
3562#endif
3563 {
3564 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3565 0, 0xffffffff,
3566 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3567 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3568 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3569 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3570 0, 0xffffffff,
3571 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3572 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3573 DESC_W_MASK | DESC_A_MASK);
3574 }
3575 ESP = ECX;
3576 EIP = EDX;
3577#ifdef USE_KQEMU
3578 if (kqemu_is_ok(env)) {
3579 env->exception_index = -1;
3580 cpu_loop_exit();
3581 }
3582#endif
3583}
3584
3585#if defined(CONFIG_USER_ONLY)
3586target_ulong helper_read_crN(int reg)
3587{
3588 return 0;
3589}
3590
3591void helper_write_crN(int reg, target_ulong t0)
3592{
3593}
3594#else
3595target_ulong helper_read_crN(int reg)
3596{
3597 target_ulong val;
3598
3599 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3600 switch(reg) {
3601 default:
3602 val = env->cr[reg];
3603 break;
3604 case 8:
3605 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3606 val = cpu_get_apic_tpr(env);
3607 } else {
3608 val = env->v_tpr;
3609 }
3610 break;
3611 }
3612 return val;
3613}
3614
3615void helper_write_crN(int reg, target_ulong t0)
3616{
3617 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3618 switch(reg) {
3619 case 0:
3620 cpu_x86_update_cr0(env, t0);
3621 break;
3622 case 3:
3623 cpu_x86_update_cr3(env, t0);
3624 break;
3625 case 4:
3626 cpu_x86_update_cr4(env, t0);
3627 break;
3628 case 8:
3629 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3630 cpu_set_apic_tpr(env, t0);
3631 }
3632 env->v_tpr = t0 & 0x0f;
3633 break;
3634 default:
3635 env->cr[reg] = t0;
3636 break;
3637 }
3638}
3639#endif
3640
3641void helper_lmsw(target_ulong t0)
3642{
3643 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3644 if already set to one. */
3645 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3646 helper_write_crN(0, t0);
3647}
3648
3649void helper_clts(void)
3650{
3651 env->cr[0] &= ~CR0_TS_MASK;
3652 env->hflags &= ~HF_TS_MASK;
3653}
3654
3655/* XXX: do more */
3656void helper_movl_drN_T0(int reg, target_ulong t0)
3657{
3658 env->dr[reg] = t0;
3659}
3660
3661void helper_invlpg(target_ulong addr)
3662{
3663 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3664 tlb_flush_page(env, addr);
3665}
3666
3667void helper_rdtsc(void)
3668{
3669 uint64_t val;
3670
3671 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3672 raise_exception(EXCP0D_GPF);
3673 }
3674 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3675
3676 val = cpu_get_tsc(env) + env->tsc_offset;
3677 EAX = (uint32_t)(val);
3678 EDX = (uint32_t)(val >> 32);
3679}
3680
3681#ifdef VBOX
3682void helper_rdtscp(void)
3683{
3684 uint64_t val;
3685 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3686 raise_exception(EXCP0D_GPF);
3687 }
3688
3689 val = cpu_get_tsc(env);
3690 EAX = (uint32_t)(val);
3691 EDX = (uint32_t)(val >> 32);
3692 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3693}
3694#endif
3695
3696void helper_rdpmc(void)
3697{
3698#ifdef VBOX
3699 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3700 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3701 raise_exception(EXCP0D_GPF);
3702 }
3703 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3704 EAX = 0;
3705 EDX = 0;
3706#else
3707 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3708 raise_exception(EXCP0D_GPF);
3709 }
3710 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3711
3712 /* currently unimplemented */
3713 raise_exception_err(EXCP06_ILLOP, 0);
3714#endif
3715}
3716
3717#if defined(CONFIG_USER_ONLY)
3718void helper_wrmsr(void)
3719{
3720}
3721
3722void helper_rdmsr(void)
3723{
3724}
3725#else
3726void helper_wrmsr(void)
3727{
3728 uint64_t val;
3729
3730 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3731
3732 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3733
3734 switch((uint32_t)ECX) {
3735 case MSR_IA32_SYSENTER_CS:
3736 env->sysenter_cs = val & 0xffff;
3737 break;
3738 case MSR_IA32_SYSENTER_ESP:
3739 env->sysenter_esp = val;
3740 break;
3741 case MSR_IA32_SYSENTER_EIP:
3742 env->sysenter_eip = val;
3743 break;
3744 case MSR_IA32_APICBASE:
3745 cpu_set_apic_base(env, val);
3746 break;
3747 case MSR_EFER:
3748 {
3749 uint64_t update_mask;
3750 update_mask = 0;
3751 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3752 update_mask |= MSR_EFER_SCE;
3753 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3754 update_mask |= MSR_EFER_LME;
3755 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3756 update_mask |= MSR_EFER_FFXSR;
3757 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3758 update_mask |= MSR_EFER_NXE;
3759 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3760 update_mask |= MSR_EFER_SVME;
3761 cpu_load_efer(env, (env->efer & ~update_mask) |
3762 (val & update_mask));
3763 }
3764 break;
3765 case MSR_STAR:
3766 env->star = val;
3767 break;
3768 case MSR_PAT:
3769 env->pat = val;
3770 break;
3771 case MSR_VM_HSAVE_PA:
3772 env->vm_hsave = val;
3773 break;
3774#ifdef TARGET_X86_64
3775 case MSR_LSTAR:
3776 env->lstar = val;
3777 break;
3778 case MSR_CSTAR:
3779 env->cstar = val;
3780 break;
3781 case MSR_FMASK:
3782 env->fmask = val;
3783 break;
3784 case MSR_FSBASE:
3785 env->segs[R_FS].base = val;
3786 break;
3787 case MSR_GSBASE:
3788 env->segs[R_GS].base = val;
3789 break;
3790 case MSR_KERNELGSBASE:
3791 env->kernelgsbase = val;
3792 break;
3793#endif
3794 default:
3795#ifndef VBOX
3796 /* XXX: exception ? */
3797 break;
3798#else /* VBOX */
3799 {
3800 uint32_t ecx = (uint32_t)ECX;
3801 /* In X2APIC specification this range is reserved for APIC control. */
3802 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3803 cpu_apic_wrmsr(env, ecx, val);
3804 /** @todo else exception? */
3805 break;
3806 }
3807 case MSR_K8_TSC_AUX:
3808 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3809 break;
3810#endif /* VBOX */
3811 }
3812}
3813
3814void helper_rdmsr(void)
3815{
3816 uint64_t val;
3817 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3818
3819 switch((uint32_t)ECX) {
3820 case MSR_IA32_SYSENTER_CS:
3821 val = env->sysenter_cs;
3822 break;
3823 case MSR_IA32_SYSENTER_ESP:
3824 val = env->sysenter_esp;
3825 break;
3826 case MSR_IA32_SYSENTER_EIP:
3827 val = env->sysenter_eip;
3828 break;
3829 case MSR_IA32_APICBASE:
3830 val = cpu_get_apic_base(env);
3831 break;
3832 case MSR_EFER:
3833 val = env->efer;
3834 break;
3835 case MSR_STAR:
3836 val = env->star;
3837 break;
3838 case MSR_PAT:
3839 val = env->pat;
3840 break;
3841 case MSR_VM_HSAVE_PA:
3842 val = env->vm_hsave;
3843 break;
3844#ifdef VBOX
3845 case MSR_IA32_PERF_STATUS:
3846 case MSR_IA32_PLATFORM_INFO:
3847 case MSR_IA32_FSB_CLOCK_STS:
3848 case MSR_IA32_THERM_STATUS:
3849 val = CPUMGetGuestMsr(env->pVCpu, (uint32_t)ECX);
3850 break;
3851#else
3852 case MSR_IA32_PERF_STATUS:
3853 /* tsc_increment_by_tick */
3854 val = 1000ULL;
3855 /* CPU multiplier */
3856 val |= ((uint64_t)4ULL << 40);
3857 break;
3858#endif
3859#ifdef TARGET_X86_64
3860 case MSR_LSTAR:
3861 val = env->lstar;
3862 break;
3863 case MSR_CSTAR:
3864 val = env->cstar;
3865 break;
3866 case MSR_FMASK:
3867 val = env->fmask;
3868 break;
3869 case MSR_FSBASE:
3870 val = env->segs[R_FS].base;
3871 break;
3872 case MSR_GSBASE:
3873 val = env->segs[R_GS].base;
3874 break;
3875 case MSR_KERNELGSBASE:
3876 val = env->kernelgsbase;
3877 break;
3878#endif
3879#ifdef USE_KQEMU
3880 case MSR_QPI_COMMBASE:
3881 if (env->kqemu_enabled) {
3882 val = kqemu_comm_base;
3883 } else {
3884 val = 0;
3885 }
3886 break;
3887#endif
3888 default:
3889#ifndef VBOX
3890 /* XXX: exception ? */
3891 val = 0;
3892 break;
3893#else /* VBOX */
3894 {
3895 uint32_t ecx = (uint32_t)ECX;
3896 /* In X2APIC specification this range is reserved for APIC control. */
3897 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3898 val = cpu_apic_rdmsr(env, ecx);
3899 else
3900 val = 0; /** @todo else exception? */
3901 break;
3902 }
3903 case MSR_IA32_TSC:
3904 case MSR_K8_TSC_AUX:
3905 val = cpu_rdmsr(env, (uint32_t)ECX);
3906 break;
3907#endif /* VBOX */
3908 }
3909 EAX = (uint32_t)(val);
3910 EDX = (uint32_t)(val >> 32);
3911}
3912#endif
3913
3914target_ulong helper_lsl(target_ulong selector1)
3915{
3916 unsigned int limit;
3917 uint32_t e1, e2, eflags, selector;
3918 int rpl, dpl, cpl, type;
3919
3920 selector = selector1 & 0xffff;
3921 eflags = cc_table[CC_OP].compute_all();
3922 if (load_segment(&e1, &e2, selector) != 0)
3923 goto fail;
3924 rpl = selector & 3;
3925 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3926 cpl = env->hflags & HF_CPL_MASK;
3927 if (e2 & DESC_S_MASK) {
3928 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3929 /* conforming */
3930 } else {
3931 if (dpl < cpl || dpl < rpl)
3932 goto fail;
3933 }
3934 } else {
3935 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3936 switch(type) {
3937 case 1:
3938 case 2:
3939 case 3:
3940 case 9:
3941 case 11:
3942 break;
3943 default:
3944 goto fail;
3945 }
3946 if (dpl < cpl || dpl < rpl) {
3947 fail:
3948 CC_SRC = eflags & ~CC_Z;
3949 return 0;
3950 }
3951 }
3952 limit = get_seg_limit(e1, e2);
3953 CC_SRC = eflags | CC_Z;
3954 return limit;
3955}
3956
3957target_ulong helper_lar(target_ulong selector1)
3958{
3959 uint32_t e1, e2, eflags, selector;
3960 int rpl, dpl, cpl, type;
3961
3962 selector = selector1 & 0xffff;
3963 eflags = cc_table[CC_OP].compute_all();
3964 if ((selector & 0xfffc) == 0)
3965 goto fail;
3966 if (load_segment(&e1, &e2, selector) != 0)
3967 goto fail;
3968 rpl = selector & 3;
3969 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3970 cpl = env->hflags & HF_CPL_MASK;
3971 if (e2 & DESC_S_MASK) {
3972 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3973 /* conforming */
3974 } else {
3975 if (dpl < cpl || dpl < rpl)
3976 goto fail;
3977 }
3978 } else {
3979 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3980 switch(type) {
3981 case 1:
3982 case 2:
3983 case 3:
3984 case 4:
3985 case 5:
3986 case 9:
3987 case 11:
3988 case 12:
3989 break;
3990 default:
3991 goto fail;
3992 }
3993 if (dpl < cpl || dpl < rpl) {
3994 fail:
3995 CC_SRC = eflags & ~CC_Z;
3996 return 0;
3997 }
3998 }
3999 CC_SRC = eflags | CC_Z;
4000 return e2 & 0x00f0ff00;
4001}
4002
4003void helper_verr(target_ulong selector1)
4004{
4005 uint32_t e1, e2, eflags, selector;
4006 int rpl, dpl, cpl;
4007
4008 selector = selector1 & 0xffff;
4009 eflags = cc_table[CC_OP].compute_all();
4010 if ((selector & 0xfffc) == 0)
4011 goto fail;
4012 if (load_segment(&e1, &e2, selector) != 0)
4013 goto fail;
4014 if (!(e2 & DESC_S_MASK))
4015 goto fail;
4016 rpl = selector & 3;
4017 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4018 cpl = env->hflags & HF_CPL_MASK;
4019 if (e2 & DESC_CS_MASK) {
4020 if (!(e2 & DESC_R_MASK))
4021 goto fail;
4022 if (!(e2 & DESC_C_MASK)) {
4023 if (dpl < cpl || dpl < rpl)
4024 goto fail;
4025 }
4026 } else {
4027 if (dpl < cpl || dpl < rpl) {
4028 fail:
4029 CC_SRC = eflags & ~CC_Z;
4030 return;
4031 }
4032 }
4033 CC_SRC = eflags | CC_Z;
4034}
4035
4036void helper_verw(target_ulong selector1)
4037{
4038 uint32_t e1, e2, eflags, selector;
4039 int rpl, dpl, cpl;
4040
4041 selector = selector1 & 0xffff;
4042 eflags = cc_table[CC_OP].compute_all();
4043 if ((selector & 0xfffc) == 0)
4044 goto fail;
4045 if (load_segment(&e1, &e2, selector) != 0)
4046 goto fail;
4047 if (!(e2 & DESC_S_MASK))
4048 goto fail;
4049 rpl = selector & 3;
4050 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4051 cpl = env->hflags & HF_CPL_MASK;
4052 if (e2 & DESC_CS_MASK) {
4053 goto fail;
4054 } else {
4055 if (dpl < cpl || dpl < rpl)
4056 goto fail;
4057 if (!(e2 & DESC_W_MASK)) {
4058 fail:
4059 CC_SRC = eflags & ~CC_Z;
4060 return;
4061 }
4062 }
4063 CC_SRC = eflags | CC_Z;
4064}
4065
4066/* x87 FPU helpers */
4067
4068static void fpu_set_exception(int mask)
4069{
4070 env->fpus |= mask;
4071 if (env->fpus & (~env->fpuc & FPUC_EM))
4072 env->fpus |= FPUS_SE | FPUS_B;
4073}
4074
4075#ifndef VBOX
4076static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4077#else /* VBOX */
4078DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4079#endif /* VBOX */
4080{
4081 if (b == 0.0)
4082 fpu_set_exception(FPUS_ZE);
4083 return a / b;
4084}
4085
4086void fpu_raise_exception(void)
4087{
4088 if (env->cr[0] & CR0_NE_MASK) {
4089 raise_exception(EXCP10_COPR);
4090 }
4091#if !defined(CONFIG_USER_ONLY)
4092 else {
4093 cpu_set_ferr(env);
4094 }
4095#endif
4096}
4097
4098void helper_flds_FT0(uint32_t val)
4099{
4100 union {
4101 float32 f;
4102 uint32_t i;
4103 } u;
4104 u.i = val;
4105 FT0 = float32_to_floatx(u.f, &env->fp_status);
4106}
4107
4108void helper_fldl_FT0(uint64_t val)
4109{
4110 union {
4111 float64 f;
4112 uint64_t i;
4113 } u;
4114 u.i = val;
4115 FT0 = float64_to_floatx(u.f, &env->fp_status);
4116}
4117
4118void helper_fildl_FT0(int32_t val)
4119{
4120 FT0 = int32_to_floatx(val, &env->fp_status);
4121}
4122
4123void helper_flds_ST0(uint32_t val)
4124{
4125 int new_fpstt;
4126 union {
4127 float32 f;
4128 uint32_t i;
4129 } u;
4130 new_fpstt = (env->fpstt - 1) & 7;
4131 u.i = val;
4132 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4133 env->fpstt = new_fpstt;
4134 env->fptags[new_fpstt] = 0; /* validate stack entry */
4135}
4136
4137void helper_fldl_ST0(uint64_t val)
4138{
4139 int new_fpstt;
4140 union {
4141 float64 f;
4142 uint64_t i;
4143 } u;
4144 new_fpstt = (env->fpstt - 1) & 7;
4145 u.i = val;
4146 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4147 env->fpstt = new_fpstt;
4148 env->fptags[new_fpstt] = 0; /* validate stack entry */
4149}
4150
4151void helper_fildl_ST0(int32_t val)
4152{
4153 int new_fpstt;
4154 new_fpstt = (env->fpstt - 1) & 7;
4155 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4156 env->fpstt = new_fpstt;
4157 env->fptags[new_fpstt] = 0; /* validate stack entry */
4158}
4159
4160void helper_fildll_ST0(int64_t val)
4161{
4162 int new_fpstt;
4163 new_fpstt = (env->fpstt - 1) & 7;
4164 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4165 env->fpstt = new_fpstt;
4166 env->fptags[new_fpstt] = 0; /* validate stack entry */
4167}
4168
4169#ifndef VBOX
4170uint32_t helper_fsts_ST0(void)
4171#else
4172RTCCUINTREG helper_fsts_ST0(void)
4173#endif
4174{
4175 union {
4176 float32 f;
4177 uint32_t i;
4178 } u;
4179 u.f = floatx_to_float32(ST0, &env->fp_status);
4180 return u.i;
4181}
4182
4183uint64_t helper_fstl_ST0(void)
4184{
4185 union {
4186 float64 f;
4187 uint64_t i;
4188 } u;
4189 u.f = floatx_to_float64(ST0, &env->fp_status);
4190 return u.i;
4191}
4192#ifndef VBOX
4193int32_t helper_fist_ST0(void)
4194#else
4195RTCCINTREG helper_fist_ST0(void)
4196#endif
4197{
4198 int32_t val;
4199 val = floatx_to_int32(ST0, &env->fp_status);
4200 if (val != (int16_t)val)
4201 val = -32768;
4202 return val;
4203}
4204
4205#ifndef VBOX
4206int32_t helper_fistl_ST0(void)
4207#else
4208RTCCINTREG helper_fistl_ST0(void)
4209#endif
4210{
4211 int32_t val;
4212 val = floatx_to_int32(ST0, &env->fp_status);
4213 return val;
4214}
4215
4216int64_t helper_fistll_ST0(void)
4217{
4218 int64_t val;
4219 val = floatx_to_int64(ST0, &env->fp_status);
4220 return val;
4221}
4222
4223#ifndef VBOX
4224int32_t helper_fistt_ST0(void)
4225#else
4226RTCCINTREG helper_fistt_ST0(void)
4227#endif
4228{
4229 int32_t val;
4230 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4231 if (val != (int16_t)val)
4232 val = -32768;
4233 return val;
4234}
4235
4236#ifndef VBOX
4237int32_t helper_fisttl_ST0(void)
4238#else
4239RTCCINTREG helper_fisttl_ST0(void)
4240#endif
4241{
4242 int32_t val;
4243 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4244 return val;
4245}
4246
4247int64_t helper_fisttll_ST0(void)
4248{
4249 int64_t val;
4250 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4251 return val;
4252}
4253
4254void helper_fldt_ST0(target_ulong ptr)
4255{
4256 int new_fpstt;
4257 new_fpstt = (env->fpstt - 1) & 7;
4258 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4259 env->fpstt = new_fpstt;
4260 env->fptags[new_fpstt] = 0; /* validate stack entry */
4261}
4262
4263void helper_fstt_ST0(target_ulong ptr)
4264{
4265 helper_fstt(ST0, ptr);
4266}
4267
4268void helper_fpush(void)
4269{
4270 fpush();
4271}
4272
4273void helper_fpop(void)
4274{
4275 fpop();
4276}
4277
4278void helper_fdecstp(void)
4279{
4280 env->fpstt = (env->fpstt - 1) & 7;
4281 env->fpus &= (~0x4700);
4282}
4283
4284void helper_fincstp(void)
4285{
4286 env->fpstt = (env->fpstt + 1) & 7;
4287 env->fpus &= (~0x4700);
4288}
4289
4290/* FPU move */
4291
4292void helper_ffree_STN(int st_index)
4293{
4294 env->fptags[(env->fpstt + st_index) & 7] = 1;
4295}
4296
4297void helper_fmov_ST0_FT0(void)
4298{
4299 ST0 = FT0;
4300}
4301
4302void helper_fmov_FT0_STN(int st_index)
4303{
4304 FT0 = ST(st_index);
4305}
4306
4307void helper_fmov_ST0_STN(int st_index)
4308{
4309 ST0 = ST(st_index);
4310}
4311
4312void helper_fmov_STN_ST0(int st_index)
4313{
4314 ST(st_index) = ST0;
4315}
4316
4317void helper_fxchg_ST0_STN(int st_index)
4318{
4319 CPU86_LDouble tmp;
4320 tmp = ST(st_index);
4321 ST(st_index) = ST0;
4322 ST0 = tmp;
4323}
4324
4325/* FPU operations */
4326
4327static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4328
4329void helper_fcom_ST0_FT0(void)
4330{
4331 int ret;
4332
4333 ret = floatx_compare(ST0, FT0, &env->fp_status);
4334 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4335 FORCE_RET();
4336}
4337
4338void helper_fucom_ST0_FT0(void)
4339{
4340 int ret;
4341
4342 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4343 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4344 FORCE_RET();
4345}
4346
4347static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4348
4349void helper_fcomi_ST0_FT0(void)
4350{
4351 int eflags;
4352 int ret;
4353
4354 ret = floatx_compare(ST0, FT0, &env->fp_status);
4355 eflags = cc_table[CC_OP].compute_all();
4356 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4357 CC_SRC = eflags;
4358 FORCE_RET();
4359}
4360
4361void helper_fucomi_ST0_FT0(void)
4362{
4363 int eflags;
4364 int ret;
4365
4366 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4367 eflags = cc_table[CC_OP].compute_all();
4368 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4369 CC_SRC = eflags;
4370 FORCE_RET();
4371}
4372
4373void helper_fadd_ST0_FT0(void)
4374{
4375 ST0 += FT0;
4376}
4377
4378void helper_fmul_ST0_FT0(void)
4379{
4380 ST0 *= FT0;
4381}
4382
4383void helper_fsub_ST0_FT0(void)
4384{
4385 ST0 -= FT0;
4386}
4387
4388void helper_fsubr_ST0_FT0(void)
4389{
4390 ST0 = FT0 - ST0;
4391}
4392
4393void helper_fdiv_ST0_FT0(void)
4394{
4395 ST0 = helper_fdiv(ST0, FT0);
4396}
4397
4398void helper_fdivr_ST0_FT0(void)
4399{
4400 ST0 = helper_fdiv(FT0, ST0);
4401}
4402
4403/* fp operations between STN and ST0 */
4404
4405void helper_fadd_STN_ST0(int st_index)
4406{
4407 ST(st_index) += ST0;
4408}
4409
4410void helper_fmul_STN_ST0(int st_index)
4411{
4412 ST(st_index) *= ST0;
4413}
4414
4415void helper_fsub_STN_ST0(int st_index)
4416{
4417 ST(st_index) -= ST0;
4418}
4419
4420void helper_fsubr_STN_ST0(int st_index)
4421{
4422 CPU86_LDouble *p;
4423 p = &ST(st_index);
4424 *p = ST0 - *p;
4425}
4426
4427void helper_fdiv_STN_ST0(int st_index)
4428{
4429 CPU86_LDouble *p;
4430 p = &ST(st_index);
4431 *p = helper_fdiv(*p, ST0);
4432}
4433
4434void helper_fdivr_STN_ST0(int st_index)
4435{
4436 CPU86_LDouble *p;
4437 p = &ST(st_index);
4438 *p = helper_fdiv(ST0, *p);
4439}
4440
4441/* misc FPU operations */
4442void helper_fchs_ST0(void)
4443{
4444 ST0 = floatx_chs(ST0);
4445}
4446
4447void helper_fabs_ST0(void)
4448{
4449 ST0 = floatx_abs(ST0);
4450}
4451
4452void helper_fld1_ST0(void)
4453{
4454 ST0 = f15rk[1];
4455}
4456
4457void helper_fldl2t_ST0(void)
4458{
4459 ST0 = f15rk[6];
4460}
4461
4462void helper_fldl2e_ST0(void)
4463{
4464 ST0 = f15rk[5];
4465}
4466
4467void helper_fldpi_ST0(void)
4468{
4469 ST0 = f15rk[2];
4470}
4471
4472void helper_fldlg2_ST0(void)
4473{
4474 ST0 = f15rk[3];
4475}
4476
4477void helper_fldln2_ST0(void)
4478{
4479 ST0 = f15rk[4];
4480}
4481
4482void helper_fldz_ST0(void)
4483{
4484 ST0 = f15rk[0];
4485}
4486
4487void helper_fldz_FT0(void)
4488{
4489 FT0 = f15rk[0];
4490}
4491
4492#ifndef VBOX
4493uint32_t helper_fnstsw(void)
4494#else
4495RTCCUINTREG helper_fnstsw(void)
4496#endif
4497{
4498 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4499}
4500
4501#ifndef VBOX
4502uint32_t helper_fnstcw(void)
4503#else
4504RTCCUINTREG helper_fnstcw(void)
4505#endif
4506{
4507 return env->fpuc;
4508}
4509
4510static void update_fp_status(void)
4511{
4512 int rnd_type;
4513
4514 /* set rounding mode */
4515 switch(env->fpuc & RC_MASK) {
4516 default:
4517 case RC_NEAR:
4518 rnd_type = float_round_nearest_even;
4519 break;
4520 case RC_DOWN:
4521 rnd_type = float_round_down;
4522 break;
4523 case RC_UP:
4524 rnd_type = float_round_up;
4525 break;
4526 case RC_CHOP:
4527 rnd_type = float_round_to_zero;
4528 break;
4529 }
4530 set_float_rounding_mode(rnd_type, &env->fp_status);
4531#ifdef FLOATX80
4532 switch((env->fpuc >> 8) & 3) {
4533 case 0:
4534 rnd_type = 32;
4535 break;
4536 case 2:
4537 rnd_type = 64;
4538 break;
4539 case 3:
4540 default:
4541 rnd_type = 80;
4542 break;
4543 }
4544 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4545#endif
4546}
4547
4548void helper_fldcw(uint32_t val)
4549{
4550 env->fpuc = val;
4551 update_fp_status();
4552}
4553
4554void helper_fclex(void)
4555{
4556 env->fpus &= 0x7f00;
4557}
4558
4559void helper_fwait(void)
4560{
4561 if (env->fpus & FPUS_SE)
4562 fpu_raise_exception();
4563 FORCE_RET();
4564}
4565
4566void helper_fninit(void)
4567{
4568 env->fpus = 0;
4569 env->fpstt = 0;
4570 env->fpuc = 0x37f;
4571 env->fptags[0] = 1;
4572 env->fptags[1] = 1;
4573 env->fptags[2] = 1;
4574 env->fptags[3] = 1;
4575 env->fptags[4] = 1;
4576 env->fptags[5] = 1;
4577 env->fptags[6] = 1;
4578 env->fptags[7] = 1;
4579}
4580
4581/* BCD ops */
4582
4583void helper_fbld_ST0(target_ulong ptr)
4584{
4585 CPU86_LDouble tmp;
4586 uint64_t val;
4587 unsigned int v;
4588 int i;
4589
4590 val = 0;
4591 for(i = 8; i >= 0; i--) {
4592 v = ldub(ptr + i);
4593 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4594 }
4595 tmp = val;
4596 if (ldub(ptr + 9) & 0x80)
4597 tmp = -tmp;
4598 fpush();
4599 ST0 = tmp;
4600}
4601
4602void helper_fbst_ST0(target_ulong ptr)
4603{
4604 int v;
4605 target_ulong mem_ref, mem_end;
4606 int64_t val;
4607
4608 val = floatx_to_int64(ST0, &env->fp_status);
4609 mem_ref = ptr;
4610 mem_end = mem_ref + 9;
4611 if (val < 0) {
4612 stb(mem_end, 0x80);
4613 val = -val;
4614 } else {
4615 stb(mem_end, 0x00);
4616 }
4617 while (mem_ref < mem_end) {
4618 if (val == 0)
4619 break;
4620 v = val % 100;
4621 val = val / 100;
4622 v = ((v / 10) << 4) | (v % 10);
4623 stb(mem_ref++, v);
4624 }
4625 while (mem_ref < mem_end) {
4626 stb(mem_ref++, 0);
4627 }
4628}
4629
4630void helper_f2xm1(void)
4631{
4632 ST0 = pow(2.0,ST0) - 1.0;
4633}
4634
4635void helper_fyl2x(void)
4636{
4637 CPU86_LDouble fptemp;
4638
4639 fptemp = ST0;
4640 if (fptemp>0.0){
4641 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4642 ST1 *= fptemp;
4643 fpop();
4644 } else {
4645 env->fpus &= (~0x4700);
4646 env->fpus |= 0x400;
4647 }
4648}
4649
4650void helper_fptan(void)
4651{
4652 CPU86_LDouble fptemp;
4653
4654 fptemp = ST0;
4655 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4656 env->fpus |= 0x400;
4657 } else {
4658 ST0 = tan(fptemp);
4659 fpush();
4660 ST0 = 1.0;
4661 env->fpus &= (~0x400); /* C2 <-- 0 */
4662 /* the above code is for |arg| < 2**52 only */
4663 }
4664}
4665
4666void helper_fpatan(void)
4667{
4668 CPU86_LDouble fptemp, fpsrcop;
4669
4670 fpsrcop = ST1;
4671 fptemp = ST0;
4672 ST1 = atan2(fpsrcop,fptemp);
4673 fpop();
4674}
4675
4676void helper_fxtract(void)
4677{
4678 CPU86_LDoubleU temp;
4679 unsigned int expdif;
4680
4681 temp.d = ST0;
4682 expdif = EXPD(temp) - EXPBIAS;
4683 /*DP exponent bias*/
4684 ST0 = expdif;
4685 fpush();
4686 BIASEXPONENT(temp);
4687 ST0 = temp.d;
4688}
4689
4690#ifdef VBOX
4691#ifdef _MSC_VER
4692/* MSC cannot divide by zero */
4693extern double _Nan;
4694#define NaN _Nan
4695#else
4696#define NaN (0.0 / 0.0)
4697#endif
4698#endif /* VBOX */
4699
4700void helper_fprem1(void)
4701{
4702 CPU86_LDouble dblq, fpsrcop, fptemp;
4703 CPU86_LDoubleU fpsrcop1, fptemp1;
4704 int expdif;
4705 signed long long int q;
4706
4707#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4708 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4709#else
4710 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4711#endif
4712 ST0 = 0.0 / 0.0; /* NaN */
4713 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4714 return;
4715 }
4716
4717 fpsrcop = ST0;
4718 fptemp = ST1;
4719 fpsrcop1.d = fpsrcop;
4720 fptemp1.d = fptemp;
4721 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4722
4723 if (expdif < 0) {
4724 /* optimisation? taken from the AMD docs */
4725 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4726 /* ST0 is unchanged */
4727 return;
4728 }
4729
4730 if (expdif < 53) {
4731 dblq = fpsrcop / fptemp;
4732 /* round dblq towards nearest integer */
4733 dblq = rint(dblq);
4734 ST0 = fpsrcop - fptemp * dblq;
4735
4736 /* convert dblq to q by truncating towards zero */
4737 if (dblq < 0.0)
4738 q = (signed long long int)(-dblq);
4739 else
4740 q = (signed long long int)dblq;
4741
4742 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4743 /* (C0,C3,C1) <-- (q2,q1,q0) */
4744 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4745 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4746 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4747 } else {
4748 env->fpus |= 0x400; /* C2 <-- 1 */
4749 fptemp = pow(2.0, expdif - 50);
4750 fpsrcop = (ST0 / ST1) / fptemp;
4751 /* fpsrcop = integer obtained by chopping */
4752 fpsrcop = (fpsrcop < 0.0) ?
4753 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4754 ST0 -= (ST1 * fpsrcop * fptemp);
4755 }
4756}
4757
4758void helper_fprem(void)
4759{
4760 CPU86_LDouble dblq, fpsrcop, fptemp;
4761 CPU86_LDoubleU fpsrcop1, fptemp1;
4762 int expdif;
4763 signed long long int q;
4764
4765#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4766 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4767#else
4768 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4769#endif
4770 ST0 = 0.0 / 0.0; /* NaN */
4771 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4772 return;
4773 }
4774
4775 fpsrcop = (CPU86_LDouble)ST0;
4776 fptemp = (CPU86_LDouble)ST1;
4777 fpsrcop1.d = fpsrcop;
4778 fptemp1.d = fptemp;
4779 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4780
4781 if (expdif < 0) {
4782 /* optimisation? taken from the AMD docs */
4783 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4784 /* ST0 is unchanged */
4785 return;
4786 }
4787
4788 if ( expdif < 53 ) {
4789 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4790 /* round dblq towards zero */
4791 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4792 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4793
4794 /* convert dblq to q by truncating towards zero */
4795 if (dblq < 0.0)
4796 q = (signed long long int)(-dblq);
4797 else
4798 q = (signed long long int)dblq;
4799
4800 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4801 /* (C0,C3,C1) <-- (q2,q1,q0) */
4802 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4803 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4804 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4805 } else {
4806 int N = 32 + (expdif % 32); /* as per AMD docs */
4807 env->fpus |= 0x400; /* C2 <-- 1 */
4808 fptemp = pow(2.0, (double)(expdif - N));
4809 fpsrcop = (ST0 / ST1) / fptemp;
4810 /* fpsrcop = integer obtained by chopping */
4811 fpsrcop = (fpsrcop < 0.0) ?
4812 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4813 ST0 -= (ST1 * fpsrcop * fptemp);
4814 }
4815}
4816
4817void helper_fyl2xp1(void)
4818{
4819 CPU86_LDouble fptemp;
4820
4821 fptemp = ST0;
4822 if ((fptemp+1.0)>0.0) {
4823 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4824 ST1 *= fptemp;
4825 fpop();
4826 } else {
4827 env->fpus &= (~0x4700);
4828 env->fpus |= 0x400;
4829 }
4830}
4831
4832void helper_fsqrt(void)
4833{
4834 CPU86_LDouble fptemp;
4835
4836 fptemp = ST0;
4837 if (fptemp<0.0) {
4838 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4839 env->fpus |= 0x400;
4840 }
4841 ST0 = sqrt(fptemp);
4842}
4843
4844void helper_fsincos(void)
4845{
4846 CPU86_LDouble fptemp;
4847
4848 fptemp = ST0;
4849 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4850 env->fpus |= 0x400;
4851 } else {
4852 ST0 = sin(fptemp);
4853 fpush();
4854 ST0 = cos(fptemp);
4855 env->fpus &= (~0x400); /* C2 <-- 0 */
4856 /* the above code is for |arg| < 2**63 only */
4857 }
4858}
4859
4860void helper_frndint(void)
4861{
4862 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4863}
4864
4865void helper_fscale(void)
4866{
4867 ST0 = ldexp (ST0, (int)(ST1));
4868}
4869
4870void helper_fsin(void)
4871{
4872 CPU86_LDouble fptemp;
4873
4874 fptemp = ST0;
4875 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4876 env->fpus |= 0x400;
4877 } else {
4878 ST0 = sin(fptemp);
4879 env->fpus &= (~0x400); /* C2 <-- 0 */
4880 /* the above code is for |arg| < 2**53 only */
4881 }
4882}
4883
4884void helper_fcos(void)
4885{
4886 CPU86_LDouble fptemp;
4887
4888 fptemp = ST0;
4889 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4890 env->fpus |= 0x400;
4891 } else {
4892 ST0 = cos(fptemp);
4893 env->fpus &= (~0x400); /* C2 <-- 0 */
4894 /* the above code is for |arg5 < 2**63 only */
4895 }
4896}
4897
4898void helper_fxam_ST0(void)
4899{
4900 CPU86_LDoubleU temp;
4901 int expdif;
4902
4903 temp.d = ST0;
4904
4905 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4906 if (SIGND(temp))
4907 env->fpus |= 0x200; /* C1 <-- 1 */
4908
4909 /* XXX: test fptags too */
4910 expdif = EXPD(temp);
4911 if (expdif == MAXEXPD) {
4912#ifdef USE_X86LDOUBLE
4913 if (MANTD(temp) == 0x8000000000000000ULL)
4914#else
4915 if (MANTD(temp) == 0)
4916#endif
4917 env->fpus |= 0x500 /*Infinity*/;
4918 else
4919 env->fpus |= 0x100 /*NaN*/;
4920 } else if (expdif == 0) {
4921 if (MANTD(temp) == 0)
4922 env->fpus |= 0x4000 /*Zero*/;
4923 else
4924 env->fpus |= 0x4400 /*Denormal*/;
4925 } else {
4926 env->fpus |= 0x400;
4927 }
4928}
4929
4930void helper_fstenv(target_ulong ptr, int data32)
4931{
4932 int fpus, fptag, exp, i;
4933 uint64_t mant;
4934 CPU86_LDoubleU tmp;
4935
4936 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4937 fptag = 0;
4938 for (i=7; i>=0; i--) {
4939 fptag <<= 2;
4940 if (env->fptags[i]) {
4941 fptag |= 3;
4942 } else {
4943 tmp.d = env->fpregs[i].d;
4944 exp = EXPD(tmp);
4945 mant = MANTD(tmp);
4946 if (exp == 0 && mant == 0) {
4947 /* zero */
4948 fptag |= 1;
4949 } else if (exp == 0 || exp == MAXEXPD
4950#ifdef USE_X86LDOUBLE
4951 || (mant & (1LL << 63)) == 0
4952#endif
4953 ) {
4954 /* NaNs, infinity, denormal */
4955 fptag |= 2;
4956 }
4957 }
4958 }
4959 if (data32) {
4960 /* 32 bit */
4961 stl(ptr, env->fpuc);
4962 stl(ptr + 4, fpus);
4963 stl(ptr + 8, fptag);
4964 stl(ptr + 12, 0); /* fpip */
4965 stl(ptr + 16, 0); /* fpcs */
4966 stl(ptr + 20, 0); /* fpoo */
4967 stl(ptr + 24, 0); /* fpos */
4968 } else {
4969 /* 16 bit */
4970 stw(ptr, env->fpuc);
4971 stw(ptr + 2, fpus);
4972 stw(ptr + 4, fptag);
4973 stw(ptr + 6, 0);
4974 stw(ptr + 8, 0);
4975 stw(ptr + 10, 0);
4976 stw(ptr + 12, 0);
4977 }
4978}
4979
4980void helper_fldenv(target_ulong ptr, int data32)
4981{
4982 int i, fpus, fptag;
4983
4984 if (data32) {
4985 env->fpuc = lduw(ptr);
4986 fpus = lduw(ptr + 4);
4987 fptag = lduw(ptr + 8);
4988 }
4989 else {
4990 env->fpuc = lduw(ptr);
4991 fpus = lduw(ptr + 2);
4992 fptag = lduw(ptr + 4);
4993 }
4994 env->fpstt = (fpus >> 11) & 7;
4995 env->fpus = fpus & ~0x3800;
4996 for(i = 0;i < 8; i++) {
4997 env->fptags[i] = ((fptag & 3) == 3);
4998 fptag >>= 2;
4999 }
5000}
5001
5002void helper_fsave(target_ulong ptr, int data32)
5003{
5004 CPU86_LDouble tmp;
5005 int i;
5006
5007 helper_fstenv(ptr, data32);
5008
5009 ptr += (14 << data32);
5010 for(i = 0;i < 8; i++) {
5011 tmp = ST(i);
5012 helper_fstt(tmp, ptr);
5013 ptr += 10;
5014 }
5015
5016 /* fninit */
5017 env->fpus = 0;
5018 env->fpstt = 0;
5019 env->fpuc = 0x37f;
5020 env->fptags[0] = 1;
5021 env->fptags[1] = 1;
5022 env->fptags[2] = 1;
5023 env->fptags[3] = 1;
5024 env->fptags[4] = 1;
5025 env->fptags[5] = 1;
5026 env->fptags[6] = 1;
5027 env->fptags[7] = 1;
5028}
5029
5030void helper_frstor(target_ulong ptr, int data32)
5031{
5032 CPU86_LDouble tmp;
5033 int i;
5034
5035 helper_fldenv(ptr, data32);
5036 ptr += (14 << data32);
5037
5038 for(i = 0;i < 8; i++) {
5039 tmp = helper_fldt(ptr);
5040 ST(i) = tmp;
5041 ptr += 10;
5042 }
5043}
5044
5045void helper_fxsave(target_ulong ptr, int data64)
5046{
5047 int fpus, fptag, i, nb_xmm_regs;
5048 CPU86_LDouble tmp;
5049 target_ulong addr;
5050
5051 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5052 fptag = 0;
5053 for(i = 0; i < 8; i++) {
5054 fptag |= (env->fptags[i] << i);
5055 }
5056 stw(ptr, env->fpuc);
5057 stw(ptr + 2, fpus);
5058 stw(ptr + 4, fptag ^ 0xff);
5059#ifdef TARGET_X86_64
5060 if (data64) {
5061 stq(ptr + 0x08, 0); /* rip */
5062 stq(ptr + 0x10, 0); /* rdp */
5063 } else
5064#endif
5065 {
5066 stl(ptr + 0x08, 0); /* eip */
5067 stl(ptr + 0x0c, 0); /* sel */
5068 stl(ptr + 0x10, 0); /* dp */
5069 stl(ptr + 0x14, 0); /* sel */
5070 }
5071
5072 addr = ptr + 0x20;
5073 for(i = 0;i < 8; i++) {
5074 tmp = ST(i);
5075 helper_fstt(tmp, addr);
5076 addr += 16;
5077 }
5078
5079 if (env->cr[4] & CR4_OSFXSR_MASK) {
5080 /* XXX: finish it */
5081 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5082 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5083 if (env->hflags & HF_CS64_MASK)
5084 nb_xmm_regs = 16;
5085 else
5086 nb_xmm_regs = 8;
5087 addr = ptr + 0xa0;
5088 for(i = 0; i < nb_xmm_regs; i++) {
5089 stq(addr, env->xmm_regs[i].XMM_Q(0));
5090 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5091 addr += 16;
5092 }
5093 }
5094}
5095
5096void helper_fxrstor(target_ulong ptr, int data64)
5097{
5098 int i, fpus, fptag, nb_xmm_regs;
5099 CPU86_LDouble tmp;
5100 target_ulong addr;
5101
5102 env->fpuc = lduw(ptr);
5103 fpus = lduw(ptr + 2);
5104 fptag = lduw(ptr + 4);
5105 env->fpstt = (fpus >> 11) & 7;
5106 env->fpus = fpus & ~0x3800;
5107 fptag ^= 0xff;
5108 for(i = 0;i < 8; i++) {
5109 env->fptags[i] = ((fptag >> i) & 1);
5110 }
5111
5112 addr = ptr + 0x20;
5113 for(i = 0;i < 8; i++) {
5114 tmp = helper_fldt(addr);
5115 ST(i) = tmp;
5116 addr += 16;
5117 }
5118
5119 if (env->cr[4] & CR4_OSFXSR_MASK) {
5120 /* XXX: finish it */
5121 env->mxcsr = ldl(ptr + 0x18);
5122 //ldl(ptr + 0x1c);
5123 if (env->hflags & HF_CS64_MASK)
5124 nb_xmm_regs = 16;
5125 else
5126 nb_xmm_regs = 8;
5127 addr = ptr + 0xa0;
5128 for(i = 0; i < nb_xmm_regs; i++) {
5129#if !defined(VBOX) || __GNUC__ < 4
5130 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5131 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5132#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5133# if 1
5134 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5135 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5136 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5137 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5138# else
5139 /* this works fine on Mac OS X, gcc 4.0.1 */
5140 uint64_t u64 = ldq(addr);
5141 env->xmm_regs[i].XMM_Q(0);
5142 u64 = ldq(addr + 4);
5143 env->xmm_regs[i].XMM_Q(1) = u64;
5144# endif
5145#endif
5146 addr += 16;
5147 }
5148 }
5149}
5150
5151#ifndef USE_X86LDOUBLE
5152
5153void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5154{
5155 CPU86_LDoubleU temp;
5156 int e;
5157
5158 temp.d = f;
5159 /* mantissa */
5160 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5161 /* exponent + sign */
5162 e = EXPD(temp) - EXPBIAS + 16383;
5163 e |= SIGND(temp) >> 16;
5164 *pexp = e;
5165}
5166
5167CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5168{
5169 CPU86_LDoubleU temp;
5170 int e;
5171 uint64_t ll;
5172
5173 /* XXX: handle overflow ? */
5174 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5175 e |= (upper >> 4) & 0x800; /* sign */
5176 ll = (mant >> 11) & ((1LL << 52) - 1);
5177#ifdef __arm__
5178 temp.l.upper = (e << 20) | (ll >> 32);
5179 temp.l.lower = ll;
5180#else
5181 temp.ll = ll | ((uint64_t)e << 52);
5182#endif
5183 return temp.d;
5184}
5185
5186#else
5187
5188void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5189{
5190 CPU86_LDoubleU temp;
5191
5192 temp.d = f;
5193 *pmant = temp.l.lower;
5194 *pexp = temp.l.upper;
5195}
5196
5197CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5198{
5199 CPU86_LDoubleU temp;
5200
5201 temp.l.upper = upper;
5202 temp.l.lower = mant;
5203 return temp.d;
5204}
5205#endif
5206
5207#ifdef TARGET_X86_64
5208
5209//#define DEBUG_MULDIV
5210
5211static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5212{
5213 *plow += a;
5214 /* carry test */
5215 if (*plow < a)
5216 (*phigh)++;
5217 *phigh += b;
5218}
5219
5220static void neg128(uint64_t *plow, uint64_t *phigh)
5221{
5222 *plow = ~ *plow;
5223 *phigh = ~ *phigh;
5224 add128(plow, phigh, 1, 0);
5225}
5226
5227/* return TRUE if overflow */
5228static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5229{
5230 uint64_t q, r, a1, a0;
5231 int i, qb, ab;
5232
5233 a0 = *plow;
5234 a1 = *phigh;
5235 if (a1 == 0) {
5236 q = a0 / b;
5237 r = a0 % b;
5238 *plow = q;
5239 *phigh = r;
5240 } else {
5241 if (a1 >= b)
5242 return 1;
5243 /* XXX: use a better algorithm */
5244 for(i = 0; i < 64; i++) {
5245 ab = a1 >> 63;
5246 a1 = (a1 << 1) | (a0 >> 63);
5247 if (ab || a1 >= b) {
5248 a1 -= b;
5249 qb = 1;
5250 } else {
5251 qb = 0;
5252 }
5253 a0 = (a0 << 1) | qb;
5254 }
5255#if defined(DEBUG_MULDIV)
5256 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5257 *phigh, *plow, b, a0, a1);
5258#endif
5259 *plow = a0;
5260 *phigh = a1;
5261 }
5262 return 0;
5263}
5264
5265/* return TRUE if overflow */
5266static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5267{
5268 int sa, sb;
5269 sa = ((int64_t)*phigh < 0);
5270 if (sa)
5271 neg128(plow, phigh);
5272 sb = (b < 0);
5273 if (sb)
5274 b = -b;
5275 if (div64(plow, phigh, b) != 0)
5276 return 1;
5277 if (sa ^ sb) {
5278 if (*plow > (1ULL << 63))
5279 return 1;
5280 *plow = - *plow;
5281 } else {
5282 if (*plow >= (1ULL << 63))
5283 return 1;
5284 }
5285 if (sa)
5286 *phigh = - *phigh;
5287 return 0;
5288}
5289
5290void helper_mulq_EAX_T0(target_ulong t0)
5291{
5292 uint64_t r0, r1;
5293
5294 mulu64(&r0, &r1, EAX, t0);
5295 EAX = r0;
5296 EDX = r1;
5297 CC_DST = r0;
5298 CC_SRC = r1;
5299}
5300
5301void helper_imulq_EAX_T0(target_ulong t0)
5302{
5303 uint64_t r0, r1;
5304
5305 muls64(&r0, &r1, EAX, t0);
5306 EAX = r0;
5307 EDX = r1;
5308 CC_DST = r0;
5309 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5310}
5311
5312target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5313{
5314 uint64_t r0, r1;
5315
5316 muls64(&r0, &r1, t0, t1);
5317 CC_DST = r0;
5318 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5319 return r0;
5320}
5321
5322void helper_divq_EAX(target_ulong t0)
5323{
5324 uint64_t r0, r1;
5325 if (t0 == 0) {
5326 raise_exception(EXCP00_DIVZ);
5327 }
5328 r0 = EAX;
5329 r1 = EDX;
5330 if (div64(&r0, &r1, t0))
5331 raise_exception(EXCP00_DIVZ);
5332 EAX = r0;
5333 EDX = r1;
5334}
5335
5336void helper_idivq_EAX(target_ulong t0)
5337{
5338 uint64_t r0, r1;
5339 if (t0 == 0) {
5340 raise_exception(EXCP00_DIVZ);
5341 }
5342 r0 = EAX;
5343 r1 = EDX;
5344 if (idiv64(&r0, &r1, t0))
5345 raise_exception(EXCP00_DIVZ);
5346 EAX = r0;
5347 EDX = r1;
5348}
5349#endif
5350
5351static void do_hlt(void)
5352{
5353 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5354 env->halted = 1;
5355 env->exception_index = EXCP_HLT;
5356 cpu_loop_exit();
5357}
5358
5359void helper_hlt(int next_eip_addend)
5360{
5361 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5362 EIP += next_eip_addend;
5363
5364 do_hlt();
5365}
5366
5367void helper_monitor(target_ulong ptr)
5368{
5369#ifdef VBOX
5370 if ((uint32_t)ECX > 1)
5371 raise_exception(EXCP0D_GPF);
5372#else
5373 if ((uint32_t)ECX != 0)
5374 raise_exception(EXCP0D_GPF);
5375#endif
5376 /* XXX: store address ? */
5377 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5378}
5379
5380void helper_mwait(int next_eip_addend)
5381{
5382 if ((uint32_t)ECX != 0)
5383 raise_exception(EXCP0D_GPF);
5384#ifdef VBOX
5385 helper_hlt(next_eip_addend);
5386#else
5387 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5388 EIP += next_eip_addend;
5389
5390 /* XXX: not complete but not completely erroneous */
5391 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5392 /* more than one CPU: do not sleep because another CPU may
5393 wake this one */
5394 } else {
5395 do_hlt();
5396 }
5397#endif
5398}
5399
5400void helper_debug(void)
5401{
5402 env->exception_index = EXCP_DEBUG;
5403 cpu_loop_exit();
5404}
5405
5406void helper_raise_interrupt(int intno, int next_eip_addend)
5407{
5408 raise_interrupt(intno, 1, 0, next_eip_addend);
5409}
5410
5411void helper_raise_exception(int exception_index)
5412{
5413 raise_exception(exception_index);
5414}
5415
5416void helper_cli(void)
5417{
5418 env->eflags &= ~IF_MASK;
5419}
5420
5421void helper_sti(void)
5422{
5423 env->eflags |= IF_MASK;
5424}
5425
5426#ifdef VBOX
5427void helper_cli_vme(void)
5428{
5429 env->eflags &= ~VIF_MASK;
5430}
5431
5432void helper_sti_vme(void)
5433{
5434 /* First check, then change eflags according to the AMD manual */
5435 if (env->eflags & VIP_MASK) {
5436 raise_exception(EXCP0D_GPF);
5437 }
5438 env->eflags |= VIF_MASK;
5439}
5440#endif
5441
5442#if 0
5443/* vm86plus instructions */
5444void helper_cli_vm(void)
5445{
5446 env->eflags &= ~VIF_MASK;
5447}
5448
5449void helper_sti_vm(void)
5450{
5451 env->eflags |= VIF_MASK;
5452 if (env->eflags & VIP_MASK) {
5453 raise_exception(EXCP0D_GPF);
5454 }
5455}
5456#endif
5457
5458void helper_set_inhibit_irq(void)
5459{
5460 env->hflags |= HF_INHIBIT_IRQ_MASK;
5461}
5462
5463void helper_reset_inhibit_irq(void)
5464{
5465 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5466}
5467
5468void helper_boundw(target_ulong a0, int v)
5469{
5470 int low, high;
5471 low = ldsw(a0);
5472 high = ldsw(a0 + 2);
5473 v = (int16_t)v;
5474 if (v < low || v > high) {
5475 raise_exception(EXCP05_BOUND);
5476 }
5477 FORCE_RET();
5478}
5479
5480void helper_boundl(target_ulong a0, int v)
5481{
5482 int low, high;
5483 low = ldl(a0);
5484 high = ldl(a0 + 4);
5485 if (v < low || v > high) {
5486 raise_exception(EXCP05_BOUND);
5487 }
5488 FORCE_RET();
5489}
5490
5491static float approx_rsqrt(float a)
5492{
5493 return 1.0 / sqrt(a);
5494}
5495
5496static float approx_rcp(float a)
5497{
5498 return 1.0 / a;
5499}
5500
5501#if !defined(CONFIG_USER_ONLY)
5502
5503#define MMUSUFFIX _mmu
5504
5505#define SHIFT 0
5506#include "softmmu_template.h"
5507
5508#define SHIFT 1
5509#include "softmmu_template.h"
5510
5511#define SHIFT 2
5512#include "softmmu_template.h"
5513
5514#define SHIFT 3
5515#include "softmmu_template.h"
5516
5517#endif
5518
5519#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5520/* This code assumes real physical address always fit into host CPU reg,
5521 which is wrong in general, but true for our current use cases. */
5522RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5523{
5524 return remR3PhysReadS8(addr);
5525}
5526RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5527{
5528 return remR3PhysReadU8(addr);
5529}
5530void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5531{
5532 remR3PhysWriteU8(addr, val);
5533}
5534RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5535{
5536 return remR3PhysReadS16(addr);
5537}
5538RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5539{
5540 return remR3PhysReadU16(addr);
5541}
5542void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5543{
5544 remR3PhysWriteU16(addr, val);
5545}
5546RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5547{
5548 return remR3PhysReadS32(addr);
5549}
5550RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5551{
5552 return remR3PhysReadU32(addr);
5553}
5554void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5555{
5556 remR3PhysWriteU32(addr, val);
5557}
5558uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5559{
5560 return remR3PhysReadU64(addr);
5561}
5562void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5563{
5564 remR3PhysWriteU64(addr, val);
5565}
5566#endif
5567
5568/* try to fill the TLB and return an exception if error. If retaddr is
5569 NULL, it means that the function was called in C code (i.e. not
5570 from generated code or from helper.c) */
5571/* XXX: fix it to restore all registers */
5572void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5573{
5574 TranslationBlock *tb;
5575 int ret;
5576 unsigned long pc;
5577 CPUX86State *saved_env;
5578
5579 /* XXX: hack to restore env in all cases, even if not called from
5580 generated code */
5581 saved_env = env;
5582 env = cpu_single_env;
5583
5584 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5585 if (ret) {
5586 if (retaddr) {
5587 /* now we have a real cpu fault */
5588 pc = (unsigned long)retaddr;
5589 tb = tb_find_pc(pc);
5590 if (tb) {
5591 /* the PC is inside the translated code. It means that we have
5592 a virtual CPU fault */
5593 cpu_restore_state(tb, env, pc, NULL);
5594 }
5595 }
5596 raise_exception_err(env->exception_index, env->error_code);
5597 }
5598 env = saved_env;
5599}
5600
5601#ifdef VBOX
5602
5603/**
5604 * Correctly computes the eflags.
5605 * @returns eflags.
5606 * @param env1 CPU environment.
5607 */
5608uint32_t raw_compute_eflags(CPUX86State *env1)
5609{
5610 CPUX86State *savedenv = env;
5611 uint32_t efl;
5612 env = env1;
5613 efl = compute_eflags();
5614 env = savedenv;
5615 return efl;
5616}
5617
5618/**
5619 * Reads byte from virtual address in guest memory area.
5620 * XXX: is it working for any addresses? swapped out pages?
5621 * @returns readed data byte.
5622 * @param env1 CPU environment.
5623 * @param pvAddr GC Virtual address.
5624 */
5625uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5626{
5627 CPUX86State *savedenv = env;
5628 uint8_t u8;
5629 env = env1;
5630 u8 = ldub_kernel(addr);
5631 env = savedenv;
5632 return u8;
5633}
5634
5635/**
5636 * Reads byte from virtual address in guest memory area.
5637 * XXX: is it working for any addresses? swapped out pages?
5638 * @returns readed data byte.
5639 * @param env1 CPU environment.
5640 * @param pvAddr GC Virtual address.
5641 */
5642uint16_t read_word(CPUX86State *env1, target_ulong addr)
5643{
5644 CPUX86State *savedenv = env;
5645 uint16_t u16;
5646 env = env1;
5647 u16 = lduw_kernel(addr);
5648 env = savedenv;
5649 return u16;
5650}
5651
5652/**
5653 * Reads byte from virtual address in guest memory area.
5654 * XXX: is it working for any addresses? swapped out pages?
5655 * @returns readed data byte.
5656 * @param env1 CPU environment.
5657 * @param pvAddr GC Virtual address.
5658 */
5659uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5660{
5661 CPUX86State *savedenv = env;
5662 uint32_t u32;
5663 env = env1;
5664 u32 = ldl_kernel(addr);
5665 env = savedenv;
5666 return u32;
5667}
5668
5669/**
5670 * Writes byte to virtual address in guest memory area.
5671 * XXX: is it working for any addresses? swapped out pages?
5672 * @returns readed data byte.
5673 * @param env1 CPU environment.
5674 * @param pvAddr GC Virtual address.
5675 * @param val byte value
5676 */
5677void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5678{
5679 CPUX86State *savedenv = env;
5680 env = env1;
5681 stb(addr, val);
5682 env = savedenv;
5683}
5684
5685void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5686{
5687 CPUX86State *savedenv = env;
5688 env = env1;
5689 stw(addr, val);
5690 env = savedenv;
5691}
5692
5693void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5694{
5695 CPUX86State *savedenv = env;
5696 env = env1;
5697 stl(addr, val);
5698 env = savedenv;
5699}
5700
5701/**
5702 * Correctly loads selector into segment register with updating internal
5703 * qemu data/caches.
5704 * @param env1 CPU environment.
5705 * @param seg_reg Segment register.
5706 * @param selector Selector to load.
5707 */
5708void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5709{
5710 CPUX86State *savedenv = env;
5711#ifdef FORCE_SEGMENT_SYNC
5712 jmp_buf old_buf;
5713#endif
5714
5715 env = env1;
5716
5717 if ( env->eflags & X86_EFL_VM
5718 || !(env->cr[0] & X86_CR0_PE))
5719 {
5720 load_seg_vm(seg_reg, selector);
5721
5722 env = savedenv;
5723
5724 /* Successful sync. */
5725 env1->segs[seg_reg].newselector = 0;
5726 }
5727 else
5728 {
5729 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5730 time critical - let's not do that */
5731#ifdef FORCE_SEGMENT_SYNC
5732 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5733#endif
5734 if (setjmp(env1->jmp_env) == 0)
5735 {
5736 if (seg_reg == R_CS)
5737 {
5738 uint32_t e1, e2;
5739 e1 = e2 = 0;
5740 load_segment(&e1, &e2, selector);
5741 cpu_x86_load_seg_cache(env, R_CS, selector,
5742 get_seg_base(e1, e2),
5743 get_seg_limit(e1, e2),
5744 e2);
5745 }
5746 else
5747 helper_load_seg(seg_reg, selector);
5748 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5749 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5750
5751 env = savedenv;
5752
5753 /* Successful sync. */
5754 env1->segs[seg_reg].newselector = 0;
5755 }
5756 else
5757 {
5758 env = savedenv;
5759
5760 /* Postpone sync until the guest uses the selector. */
5761 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5762 env1->segs[seg_reg].newselector = selector;
5763 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5764 env1->exception_index = -1;
5765 env1->error_code = 0;
5766 env1->old_exception = -1;
5767 }
5768#ifdef FORCE_SEGMENT_SYNC
5769 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5770#endif
5771 }
5772
5773}
5774
5775DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5776{
5777 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5778}
5779
5780
5781int emulate_single_instr(CPUX86State *env1)
5782{
5783 TranslationBlock *tb;
5784 TranslationBlock *current;
5785 int flags;
5786 uint8_t *tc_ptr;
5787 target_ulong old_eip;
5788
5789 /* ensures env is loaded! */
5790 CPUX86State *savedenv = env;
5791 env = env1;
5792
5793 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5794
5795 current = env->current_tb;
5796 env->current_tb = NULL;
5797 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5798
5799 /*
5800 * Translate only one instruction.
5801 */
5802 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5803 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5804 env->segs[R_CS].base, flags, 0);
5805
5806 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5807
5808
5809 /* tb_link_phys: */
5810 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5811 tb->jmp_next[0] = NULL;
5812 tb->jmp_next[1] = NULL;
5813 Assert(tb->jmp_next[0] == NULL);
5814 Assert(tb->jmp_next[1] == NULL);
5815 if (tb->tb_next_offset[0] != 0xffff)
5816 tb_reset_jump(tb, 0);
5817 if (tb->tb_next_offset[1] != 0xffff)
5818 tb_reset_jump(tb, 1);
5819
5820 /*
5821 * Execute it using emulation
5822 */
5823 old_eip = env->eip;
5824 env->current_tb = tb;
5825
5826 /*
5827 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5828 * perhaps not a very safe hack
5829 */
5830 while(old_eip == env->eip)
5831 {
5832 tc_ptr = tb->tc_ptr;
5833
5834#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5835 int fake_ret;
5836 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5837#else
5838 tcg_qemu_tb_exec(tc_ptr);
5839#endif
5840 /*
5841 * Exit once we detect an external interrupt and interrupts are enabled
5842 */
5843 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5844 ( (env->eflags & IF_MASK) &&
5845 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5846 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5847 {
5848 break;
5849 }
5850 }
5851 env->current_tb = current;
5852
5853 tb_phys_invalidate(tb, -1);
5854 tb_free(tb);
5855/*
5856 Assert(tb->tb_next_offset[0] == 0xffff);
5857 Assert(tb->tb_next_offset[1] == 0xffff);
5858 Assert(tb->tb_next[0] == 0xffff);
5859 Assert(tb->tb_next[1] == 0xffff);
5860 Assert(tb->jmp_next[0] == NULL);
5861 Assert(tb->jmp_next[1] == NULL);
5862 Assert(tb->jmp_first == NULL); */
5863
5864 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5865
5866 /*
5867 * Execute the next instruction when we encounter instruction fusing.
5868 */
5869 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5870 {
5871 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5872 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5873 emulate_single_instr(env);
5874 }
5875
5876 env = savedenv;
5877 return 0;
5878}
5879
5880/**
5881 * Correctly loads a new ldtr selector.
5882 *
5883 * @param env1 CPU environment.
5884 * @param selector Selector to load.
5885 */
5886void sync_ldtr(CPUX86State *env1, int selector)
5887{
5888 CPUX86State *saved_env = env;
5889 if (setjmp(env1->jmp_env) == 0)
5890 {
5891 env = env1;
5892 helper_lldt(selector);
5893 env = saved_env;
5894 }
5895 else
5896 {
5897 env = saved_env;
5898#ifdef VBOX_STRICT
5899 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5900#endif
5901 }
5902}
5903
5904int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5905 uint32_t *esp_ptr, int dpl)
5906{
5907 int type, index, shift;
5908
5909 CPUX86State *savedenv = env;
5910 env = env1;
5911
5912 if (!(env->tr.flags & DESC_P_MASK))
5913 cpu_abort(env, "invalid tss");
5914 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5915 if ((type & 7) != 1)
5916 cpu_abort(env, "invalid tss type %d", type);
5917 shift = type >> 3;
5918 index = (dpl * 4 + 2) << shift;
5919 if (index + (4 << shift) - 1 > env->tr.limit)
5920 {
5921 env = savedenv;
5922 return 0;
5923 }
5924 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5925
5926 if (shift == 0) {
5927 *esp_ptr = lduw_kernel(env->tr.base + index);
5928 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5929 } else {
5930 *esp_ptr = ldl_kernel(env->tr.base + index);
5931 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5932 }
5933
5934 env = savedenv;
5935 return 1;
5936}
5937
5938//*****************************************************************************
5939// Needs to be at the bottom of the file (overriding macros)
5940
5941#ifndef VBOX
5942static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5943#else /* VBOX */
5944DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5945#endif /* VBOX */
5946{
5947 return *(CPU86_LDouble *)ptr;
5948}
5949
5950#ifndef VBOX
5951static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5952#else /* VBOX */
5953DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5954#endif /* VBOX */
5955{
5956 *(CPU86_LDouble *)ptr = f;
5957}
5958
5959#undef stw
5960#undef stl
5961#undef stq
5962#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5963#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5964#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5965
5966//*****************************************************************************
5967void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5968{
5969 int fpus, fptag, i, nb_xmm_regs;
5970 CPU86_LDouble tmp;
5971 uint8_t *addr;
5972 int data64 = !!(env->hflags & HF_LMA_MASK);
5973
5974 if (env->cpuid_features & CPUID_FXSR)
5975 {
5976 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5977 fptag = 0;
5978 for(i = 0; i < 8; i++) {
5979 fptag |= (env->fptags[i] << i);
5980 }
5981 stw(ptr, env->fpuc);
5982 stw(ptr + 2, fpus);
5983 stw(ptr + 4, fptag ^ 0xff);
5984
5985 addr = ptr + 0x20;
5986 for(i = 0;i < 8; i++) {
5987 tmp = ST(i);
5988 helper_fstt_raw(tmp, addr);
5989 addr += 16;
5990 }
5991
5992 if (env->cr[4] & CR4_OSFXSR_MASK) {
5993 /* XXX: finish it */
5994 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5995 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5996 nb_xmm_regs = 8 << data64;
5997 addr = ptr + 0xa0;
5998 for(i = 0; i < nb_xmm_regs; i++) {
5999#if __GNUC__ < 4
6000 stq(addr, env->xmm_regs[i].XMM_Q(0));
6001 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6002#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6003 stl(addr, env->xmm_regs[i].XMM_L(0));
6004 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6005 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6006 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6007#endif
6008 addr += 16;
6009 }
6010 }
6011 }
6012 else
6013 {
6014 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6015 int fptag;
6016
6017 fp->FCW = env->fpuc;
6018 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6019 fptag = 0;
6020 for (i=7; i>=0; i--) {
6021 fptag <<= 2;
6022 if (env->fptags[i]) {
6023 fptag |= 3;
6024 } else {
6025 /* the FPU automatically computes it */
6026 }
6027 }
6028 fp->FTW = fptag;
6029
6030 for(i = 0;i < 8; i++) {
6031 tmp = ST(i);
6032 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6033 }
6034 }
6035}
6036
6037//*****************************************************************************
6038#undef lduw
6039#undef ldl
6040#undef ldq
6041#define lduw(a) *(uint16_t *)(a)
6042#define ldl(a) *(uint32_t *)(a)
6043#define ldq(a) *(uint64_t *)(a)
6044//*****************************************************************************
6045void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6046{
6047 int i, fpus, fptag, nb_xmm_regs;
6048 CPU86_LDouble tmp;
6049 uint8_t *addr;
6050 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6051
6052 if (env->cpuid_features & CPUID_FXSR)
6053 {
6054 env->fpuc = lduw(ptr);
6055 fpus = lduw(ptr + 2);
6056 fptag = lduw(ptr + 4);
6057 env->fpstt = (fpus >> 11) & 7;
6058 env->fpus = fpus & ~0x3800;
6059 fptag ^= 0xff;
6060 for(i = 0;i < 8; i++) {
6061 env->fptags[i] = ((fptag >> i) & 1);
6062 }
6063
6064 addr = ptr + 0x20;
6065 for(i = 0;i < 8; i++) {
6066 tmp = helper_fldt_raw(addr);
6067 ST(i) = tmp;
6068 addr += 16;
6069 }
6070
6071 if (env->cr[4] & CR4_OSFXSR_MASK) {
6072 /* XXX: finish it, endianness */
6073 env->mxcsr = ldl(ptr + 0x18);
6074 //ldl(ptr + 0x1c);
6075 nb_xmm_regs = 8 << data64;
6076 addr = ptr + 0xa0;
6077 for(i = 0; i < nb_xmm_regs; i++) {
6078#if HC_ARCH_BITS == 32
6079 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6080 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6081 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6082 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6083 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6084#else
6085 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6086 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6087#endif
6088 addr += 16;
6089 }
6090 }
6091 }
6092 else
6093 {
6094 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6095 int fptag, j;
6096
6097 env->fpuc = fp->FCW;
6098 env->fpstt = (fp->FSW >> 11) & 7;
6099 env->fpus = fp->FSW & ~0x3800;
6100 fptag = fp->FTW;
6101 for(i = 0;i < 8; i++) {
6102 env->fptags[i] = ((fptag & 3) == 3);
6103 fptag >>= 2;
6104 }
6105 j = env->fpstt;
6106 for(i = 0;i < 8; i++) {
6107 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6108 ST(i) = tmp;
6109 }
6110 }
6111}
6112//*****************************************************************************
6113//*****************************************************************************
6114
6115#endif /* VBOX */
6116
6117/* Secure Virtual Machine helpers */
6118
6119#if defined(CONFIG_USER_ONLY)
6120
6121void helper_vmrun(int aflag, int next_eip_addend)
6122{
6123}
6124void helper_vmmcall(void)
6125{
6126}
6127void helper_vmload(int aflag)
6128{
6129}
6130void helper_vmsave(int aflag)
6131{
6132}
6133void helper_stgi(void)
6134{
6135}
6136void helper_clgi(void)
6137{
6138}
6139void helper_skinit(void)
6140{
6141}
6142void helper_invlpga(int aflag)
6143{
6144}
6145void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6146{
6147}
6148void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6149{
6150}
6151
6152void helper_svm_check_io(uint32_t port, uint32_t param,
6153 uint32_t next_eip_addend)
6154{
6155}
6156#else
6157
6158#ifndef VBOX
6159static inline void svm_save_seg(target_phys_addr_t addr,
6160#else /* VBOX */
6161DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6162#endif /* VBOX */
6163 const SegmentCache *sc)
6164{
6165 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6166 sc->selector);
6167 stq_phys(addr + offsetof(struct vmcb_seg, base),
6168 sc->base);
6169 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6170 sc->limit);
6171 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6172 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6173}
6174
6175#ifndef VBOX
6176static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6177#else /* VBOX */
6178DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6179#endif /* VBOX */
6180{
6181 unsigned int flags;
6182
6183 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6184 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6185 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6186 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6187 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6188}
6189
6190#ifndef VBOX
6191static inline void svm_load_seg_cache(target_phys_addr_t addr,
6192#else /* VBOX */
6193DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6194#endif /* VBOX */
6195 CPUState *env, int seg_reg)
6196{
6197 SegmentCache sc1, *sc = &sc1;
6198 svm_load_seg(addr, sc);
6199 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6200 sc->base, sc->limit, sc->flags);
6201}
6202
6203void helper_vmrun(int aflag, int next_eip_addend)
6204{
6205 target_ulong addr;
6206 uint32_t event_inj;
6207 uint32_t int_ctl;
6208
6209 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6210
6211 if (aflag == 2)
6212 addr = EAX;
6213 else
6214 addr = (uint32_t)EAX;
6215
6216 if (loglevel & CPU_LOG_TB_IN_ASM)
6217 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6218
6219 env->vm_vmcb = addr;
6220
6221 /* save the current CPU state in the hsave page */
6222 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6223 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6224
6225 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6226 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6227
6228 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6229 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6230 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6231 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6232 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6233 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6234
6235 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6236 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6237
6238 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6239 &env->segs[R_ES]);
6240 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6241 &env->segs[R_CS]);
6242 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6243 &env->segs[R_SS]);
6244 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6245 &env->segs[R_DS]);
6246
6247 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6248 EIP + next_eip_addend);
6249 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6250 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6251
6252 /* load the interception bitmaps so we do not need to access the
6253 vmcb in svm mode */
6254 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6255 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6256 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6257 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6258 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6259 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6260
6261 /* enable intercepts */
6262 env->hflags |= HF_SVMI_MASK;
6263
6264 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6265
6266 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6267 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6268
6269 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6270 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6271
6272 /* clear exit_info_2 so we behave like the real hardware */
6273 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6274
6275 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6276 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6277 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6278 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6279 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6280 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6281 if (int_ctl & V_INTR_MASKING_MASK) {
6282 env->v_tpr = int_ctl & V_TPR_MASK;
6283 env->hflags2 |= HF2_VINTR_MASK;
6284 if (env->eflags & IF_MASK)
6285 env->hflags2 |= HF2_HIF_MASK;
6286 }
6287
6288 cpu_load_efer(env,
6289 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6290 env->eflags = 0;
6291 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6292 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6293 CC_OP = CC_OP_EFLAGS;
6294
6295 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6296 env, R_ES);
6297 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6298 env, R_CS);
6299 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6300 env, R_SS);
6301 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6302 env, R_DS);
6303
6304 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6305 env->eip = EIP;
6306 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6307 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6308 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6309 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6310 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6311
6312 /* FIXME: guest state consistency checks */
6313
6314 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6315 case TLB_CONTROL_DO_NOTHING:
6316 break;
6317 case TLB_CONTROL_FLUSH_ALL_ASID:
6318 /* FIXME: this is not 100% correct but should work for now */
6319 tlb_flush(env, 1);
6320 break;
6321 }
6322
6323 env->hflags2 |= HF2_GIF_MASK;
6324
6325 if (int_ctl & V_IRQ_MASK) {
6326 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6327 }
6328
6329 /* maybe we need to inject an event */
6330 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6331 if (event_inj & SVM_EVTINJ_VALID) {
6332 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6333 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6334 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6335 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6336
6337 if (loglevel & CPU_LOG_TB_IN_ASM)
6338 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6339 /* FIXME: need to implement valid_err */
6340 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6341 case SVM_EVTINJ_TYPE_INTR:
6342 env->exception_index = vector;
6343 env->error_code = event_inj_err;
6344 env->exception_is_int = 0;
6345 env->exception_next_eip = -1;
6346 if (loglevel & CPU_LOG_TB_IN_ASM)
6347 fprintf(logfile, "INTR");
6348 /* XXX: is it always correct ? */
6349 do_interrupt(vector, 0, 0, 0, 1);
6350 break;
6351 case SVM_EVTINJ_TYPE_NMI:
6352 env->exception_index = EXCP02_NMI;
6353 env->error_code = event_inj_err;
6354 env->exception_is_int = 0;
6355 env->exception_next_eip = EIP;
6356 if (loglevel & CPU_LOG_TB_IN_ASM)
6357 fprintf(logfile, "NMI");
6358 cpu_loop_exit();
6359 break;
6360 case SVM_EVTINJ_TYPE_EXEPT:
6361 env->exception_index = vector;
6362 env->error_code = event_inj_err;
6363 env->exception_is_int = 0;
6364 env->exception_next_eip = -1;
6365 if (loglevel & CPU_LOG_TB_IN_ASM)
6366 fprintf(logfile, "EXEPT");
6367 cpu_loop_exit();
6368 break;
6369 case SVM_EVTINJ_TYPE_SOFT:
6370 env->exception_index = vector;
6371 env->error_code = event_inj_err;
6372 env->exception_is_int = 1;
6373 env->exception_next_eip = EIP;
6374 if (loglevel & CPU_LOG_TB_IN_ASM)
6375 fprintf(logfile, "SOFT");
6376 cpu_loop_exit();
6377 break;
6378 }
6379 if (loglevel & CPU_LOG_TB_IN_ASM)
6380 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6381 }
6382}
6383
6384void helper_vmmcall(void)
6385{
6386 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6387 raise_exception(EXCP06_ILLOP);
6388}
6389
6390void helper_vmload(int aflag)
6391{
6392 target_ulong addr;
6393 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6394
6395 if (aflag == 2)
6396 addr = EAX;
6397 else
6398 addr = (uint32_t)EAX;
6399
6400 if (loglevel & CPU_LOG_TB_IN_ASM)
6401 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6402 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6403 env->segs[R_FS].base);
6404
6405 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6406 env, R_FS);
6407 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6408 env, R_GS);
6409 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6410 &env->tr);
6411 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6412 &env->ldt);
6413
6414#ifdef TARGET_X86_64
6415 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6416 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6417 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6418 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6419#endif
6420 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6421 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6422 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6423 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6424}
6425
6426void helper_vmsave(int aflag)
6427{
6428 target_ulong addr;
6429 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6430
6431 if (aflag == 2)
6432 addr = EAX;
6433 else
6434 addr = (uint32_t)EAX;
6435
6436 if (loglevel & CPU_LOG_TB_IN_ASM)
6437 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6438 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6439 env->segs[R_FS].base);
6440
6441 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6442 &env->segs[R_FS]);
6443 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6444 &env->segs[R_GS]);
6445 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6446 &env->tr);
6447 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6448 &env->ldt);
6449
6450#ifdef TARGET_X86_64
6451 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6452 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6453 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6454 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6455#endif
6456 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6457 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6458 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6459 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6460}
6461
6462void helper_stgi(void)
6463{
6464 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6465 env->hflags2 |= HF2_GIF_MASK;
6466}
6467
6468void helper_clgi(void)
6469{
6470 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6471 env->hflags2 &= ~HF2_GIF_MASK;
6472}
6473
6474void helper_skinit(void)
6475{
6476 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6477 /* XXX: not implemented */
6478 raise_exception(EXCP06_ILLOP);
6479}
6480
6481void helper_invlpga(int aflag)
6482{
6483 target_ulong addr;
6484 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6485
6486 if (aflag == 2)
6487 addr = EAX;
6488 else
6489 addr = (uint32_t)EAX;
6490
6491 /* XXX: could use the ASID to see if it is needed to do the
6492 flush */
6493 tlb_flush_page(env, addr);
6494}
6495
6496void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6497{
6498 if (likely(!(env->hflags & HF_SVMI_MASK)))
6499 return;
6500#ifndef VBOX
6501 switch(type) {
6502#ifndef VBOX
6503 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6504#else
6505 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6506 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6507 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6508#endif
6509 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6510 helper_vmexit(type, param);
6511 }
6512 break;
6513#ifndef VBOX
6514 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6515#else
6516 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6517 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6518 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6519#endif
6520 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6521 helper_vmexit(type, param);
6522 }
6523 break;
6524 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6525 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6526 helper_vmexit(type, param);
6527 }
6528 break;
6529 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6530 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6531 helper_vmexit(type, param);
6532 }
6533 break;
6534 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6535 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6536 helper_vmexit(type, param);
6537 }
6538 break;
6539 case SVM_EXIT_MSR:
6540 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6541 /* FIXME: this should be read in at vmrun (faster this way?) */
6542 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6543 uint32_t t0, t1;
6544 switch((uint32_t)ECX) {
6545 case 0 ... 0x1fff:
6546 t0 = (ECX * 2) % 8;
6547 t1 = ECX / 8;
6548 break;
6549 case 0xc0000000 ... 0xc0001fff:
6550 t0 = (8192 + ECX - 0xc0000000) * 2;
6551 t1 = (t0 / 8);
6552 t0 %= 8;
6553 break;
6554 case 0xc0010000 ... 0xc0011fff:
6555 t0 = (16384 + ECX - 0xc0010000) * 2;
6556 t1 = (t0 / 8);
6557 t0 %= 8;
6558 break;
6559 default:
6560 helper_vmexit(type, param);
6561 t0 = 0;
6562 t1 = 0;
6563 break;
6564 }
6565 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6566 helper_vmexit(type, param);
6567 }
6568 break;
6569 default:
6570 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6571 helper_vmexit(type, param);
6572 }
6573 break;
6574 }
6575#else
6576 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6577#endif
6578}
6579
6580void helper_svm_check_io(uint32_t port, uint32_t param,
6581 uint32_t next_eip_addend)
6582{
6583 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6584 /* FIXME: this should be read in at vmrun (faster this way?) */
6585 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6586 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6587 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6588 /* next EIP */
6589 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6590 env->eip + next_eip_addend);
6591 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6592 }
6593 }
6594}
6595
6596/* Note: currently only 32 bits of exit_code are used */
6597void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6598{
6599 uint32_t int_ctl;
6600
6601 if (loglevel & CPU_LOG_TB_IN_ASM)
6602 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6603 exit_code, exit_info_1,
6604 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6605 EIP);
6606
6607 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6608 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6609 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6610 } else {
6611 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6612 }
6613
6614 /* Save the VM state in the vmcb */
6615 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6616 &env->segs[R_ES]);
6617 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6618 &env->segs[R_CS]);
6619 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6620 &env->segs[R_SS]);
6621 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6622 &env->segs[R_DS]);
6623
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6625 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6626
6627 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6628 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6629
6630 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6631 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6632 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6633 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6635
6636 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6637 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6638 int_ctl |= env->v_tpr & V_TPR_MASK;
6639 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6640 int_ctl |= V_IRQ_MASK;
6641 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6642
6643 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6644 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6645 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6646 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6647 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6649 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6650
6651 /* Reload the host state from vm_hsave */
6652 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6653 env->hflags &= ~HF_SVMI_MASK;
6654 env->intercept = 0;
6655 env->intercept_exceptions = 0;
6656 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6657 env->tsc_offset = 0;
6658
6659 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6660 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6661
6662 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6663 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6664
6665 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6666 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6667 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6668 /* we need to set the efer after the crs so the hidden flags get
6669 set properly */
6670 cpu_load_efer(env,
6671 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6672 env->eflags = 0;
6673 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6674 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6675 CC_OP = CC_OP_EFLAGS;
6676
6677 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6678 env, R_ES);
6679 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6680 env, R_CS);
6681 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6682 env, R_SS);
6683 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6684 env, R_DS);
6685
6686 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6687 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6688 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6689
6690 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6691 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6692
6693 /* other setups */
6694 cpu_x86_set_cpl(env, 0);
6695 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6696 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6697
6698 env->hflags2 &= ~HF2_GIF_MASK;
6699 /* FIXME: Resets the current ASID register to zero (host ASID). */
6700
6701 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6702
6703 /* Clears the TSC_OFFSET inside the processor. */
6704
6705 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6706 from the page table indicated the host's CR3. If the PDPEs contain
6707 illegal state, the processor causes a shutdown. */
6708
6709 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6710 env->cr[0] |= CR0_PE_MASK;
6711 env->eflags &= ~VM_MASK;
6712
6713 /* Disables all breakpoints in the host DR7 register. */
6714
6715 /* Checks the reloaded host state for consistency. */
6716
6717 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6718 host's code segment or non-canonical (in the case of long mode), a
6719 #GP fault is delivered inside the host.) */
6720
6721 /* remove any pending exception */
6722 env->exception_index = -1;
6723 env->error_code = 0;
6724 env->old_exception = -1;
6725
6726 cpu_loop_exit();
6727}
6728
6729#endif
6730
6731/* MMX/SSE */
6732/* XXX: optimize by storing fptt and fptags in the static cpu state */
6733void helper_enter_mmx(void)
6734{
6735 env->fpstt = 0;
6736 *(uint32_t *)(env->fptags) = 0;
6737 *(uint32_t *)(env->fptags + 4) = 0;
6738}
6739
6740void helper_emms(void)
6741{
6742 /* set to empty state */
6743 *(uint32_t *)(env->fptags) = 0x01010101;
6744 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6745}
6746
6747/* XXX: suppress */
6748void helper_movq(uint64_t *d, uint64_t *s)
6749{
6750 *d = *s;
6751}
6752
6753#define SHIFT 0
6754#include "ops_sse.h"
6755
6756#define SHIFT 1
6757#include "ops_sse.h"
6758
6759#define SHIFT 0
6760#include "helper_template.h"
6761#undef SHIFT
6762
6763#define SHIFT 1
6764#include "helper_template.h"
6765#undef SHIFT
6766
6767#define SHIFT 2
6768#include "helper_template.h"
6769#undef SHIFT
6770
6771#ifdef TARGET_X86_64
6772
6773#define SHIFT 3
6774#include "helper_template.h"
6775#undef SHIFT
6776
6777#endif
6778
6779/* bit operations */
6780target_ulong helper_bsf(target_ulong t0)
6781{
6782 int count;
6783 target_ulong res;
6784
6785 res = t0;
6786 count = 0;
6787 while ((res & 1) == 0) {
6788 count++;
6789 res >>= 1;
6790 }
6791 return count;
6792}
6793
6794target_ulong helper_bsr(target_ulong t0)
6795{
6796 int count;
6797 target_ulong res, mask;
6798
6799 res = t0;
6800 count = TARGET_LONG_BITS - 1;
6801 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6802 while ((res & mask) == 0) {
6803 count--;
6804 res <<= 1;
6805 }
6806 return count;
6807}
6808
6809
6810static int compute_all_eflags(void)
6811{
6812 return CC_SRC;
6813}
6814
6815static int compute_c_eflags(void)
6816{
6817 return CC_SRC & CC_C;
6818}
6819
6820#ifndef VBOX
6821CCTable cc_table[CC_OP_NB] = {
6822 [CC_OP_DYNAMIC] = { /* should never happen */ },
6823
6824 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6825
6826 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6827 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6828 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6829
6830 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6831 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6832 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6833
6834 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6835 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6836 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6837
6838 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6839 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6840 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6841
6842 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6843 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6844 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6845
6846 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6847 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6848 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6849
6850 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6851 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6852 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6853
6854 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6855 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6856 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6857
6858 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6859 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6860 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6861
6862 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6863 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6864 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6865
6866#ifdef TARGET_X86_64
6867 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6868
6869 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6870
6871 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6872
6873 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6874
6875 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6876
6877 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6878
6879 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6880
6881 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6882
6883 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6884
6885 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6886#endif
6887};
6888#else /* VBOX */
6889/* Sync carefully with cpu.h */
6890CCTable cc_table[CC_OP_NB] = {
6891 /* CC_OP_DYNAMIC */ { 0, 0 },
6892
6893 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6894
6895 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6896 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6897 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6898#ifdef TARGET_X86_64
6899 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6900#else
6901 /* CC_OP_MULQ */ { 0, 0 },
6902#endif
6903
6904 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6905 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6906 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6907#ifdef TARGET_X86_64
6908 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6909#else
6910 /* CC_OP_ADDQ */ { 0, 0 },
6911#endif
6912
6913 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6914 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6915 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6916#ifdef TARGET_X86_64
6917 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6918#else
6919 /* CC_OP_ADCQ */ { 0, 0 },
6920#endif
6921
6922 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6923 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6924 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6925#ifdef TARGET_X86_64
6926 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6927#else
6928 /* CC_OP_SUBQ */ { 0, 0 },
6929#endif
6930
6931 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6932 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6933 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6934#ifdef TARGET_X86_64
6935 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6936#else
6937 /* CC_OP_SBBQ */ { 0, 0 },
6938#endif
6939
6940 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6941 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6942 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6943#ifdef TARGET_X86_64
6944 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6945#else
6946 /* CC_OP_LOGICQ */ { 0, 0 },
6947#endif
6948
6949 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6950 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6951 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6952#ifdef TARGET_X86_64
6953 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6954#else
6955 /* CC_OP_INCQ */ { 0, 0 },
6956#endif
6957
6958 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6959 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6960 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6961#ifdef TARGET_X86_64
6962 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6963#else
6964 /* CC_OP_DECQ */ { 0, 0 },
6965#endif
6966
6967 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6968 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6969 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6970#ifdef TARGET_X86_64
6971 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6972#else
6973 /* CC_OP_SHLQ */ { 0, 0 },
6974#endif
6975
6976 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6977 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6978 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6979#ifdef TARGET_X86_64
6980 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6981#else
6982 /* CC_OP_SARQ */ { 0, 0 },
6983#endif
6984};
6985#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette