VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 9486

Last change on this file since 9486 was 9212, checked in by vboxsync, 16 years ago

Major changes for sizeof(RTGCPTR) == uint64_t.
Introduced RCPTRTYPE for pointers valid in raw mode only (RTGCPTR32).

Disabled by default. Enable by adding VBOX_WITH_64_BITS_GUESTS to your LocalConfig.kmk.

  • Property svn:eol-style set to native
File size: 134.3 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifdef VBOX
21# include <VBox/err.h>
22#endif
23#include "exec.h"
24
25//#define DEBUG_PCALL
26
27#if 0
28#define raise_exception_err(a, b)\
29do {\
30 if (logfile)\
31 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
32 (raise_exception_err)(a, b);\
33} while (0)
34#endif
35
36const uint8_t parity_table[256] = {
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69};
70
71/* modulo 17 table */
72const uint8_t rclw_table[32] = {
73 0, 1, 2, 3, 4, 5, 6, 7,
74 8, 9,10,11,12,13,14,15,
75 16, 0, 1, 2, 3, 4, 5, 6,
76 7, 8, 9,10,11,12,13,14,
77};
78
79/* modulo 9 table */
80const uint8_t rclb_table[32] = {
81 0, 1, 2, 3, 4, 5, 6, 7,
82 8, 0, 1, 2, 3, 4, 5, 6,
83 7, 8, 0, 1, 2, 3, 4, 5,
84 6, 7, 8, 0, 1, 2, 3, 4,
85};
86
87const CPU86_LDouble f15rk[7] =
88{
89 0.00000000000000000000L,
90 1.00000000000000000000L,
91 3.14159265358979323851L, /*pi*/
92 0.30102999566398119523L, /*lg2*/
93 0.69314718055994530943L, /*ln2*/
94 1.44269504088896340739L, /*l2e*/
95 3.32192809488736234781L, /*l2t*/
96};
97
98/* thread support */
99
100spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101
102void cpu_lock(void)
103{
104 spin_lock(&global_cpu_lock);
105}
106
107void cpu_unlock(void)
108{
109 spin_unlock(&global_cpu_lock);
110}
111
112void cpu_loop_exit(void)
113{
114 /* NOTE: the register at this point must be saved by hand because
115 longjmp restore them */
116 regs_to_env();
117 longjmp(env->jmp_env, 1);
118}
119
120/* return non zero if error */
121static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
122 int selector)
123{
124 SegmentCache *dt;
125 int index;
126 target_ulong ptr;
127
128 if (selector & 0x4)
129 dt = &env->ldt;
130 else
131 dt = &env->gdt;
132 index = selector & ~7;
133 if ((index + 7) > dt->limit)
134 return -1;
135 ptr = dt->base + index;
136 *e1_ptr = ldl_kernel(ptr);
137 *e2_ptr = ldl_kernel(ptr + 4);
138 return 0;
139}
140
141static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
142{
143 unsigned int limit;
144 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
145 if (e2 & DESC_G_MASK)
146 limit = (limit << 12) | 0xfff;
147 return limit;
148}
149
150static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
151{
152 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
153}
154
155static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
156{
157 sc->base = get_seg_base(e1, e2);
158 sc->limit = get_seg_limit(e1, e2);
159 sc->flags = e2;
160}
161
162/* init the segment cache in vm86 mode. */
163static inline void load_seg_vm(int seg, int selector)
164{
165 selector &= 0xffff;
166 cpu_x86_load_seg_cache(env, seg, selector,
167 (selector << 4), 0xffff, 0);
168}
169
170static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
171 uint32_t *esp_ptr, int dpl)
172{
173 int type, index, shift;
174
175#if 0
176 {
177 int i;
178 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
179 for(i=0;i<env->tr.limit;i++) {
180 printf("%02x ", env->tr.base[i]);
181 if ((i & 7) == 7) printf("\n");
182 }
183 printf("\n");
184 }
185#endif
186
187 if (!(env->tr.flags & DESC_P_MASK))
188 cpu_abort(env, "invalid tss");
189 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
190 if ((type & 7) != 1)
191 cpu_abort(env, "invalid tss type %d", type);
192 shift = type >> 3;
193 index = (dpl * 4 + 2) << shift;
194 if (index + (4 << shift) - 1 > env->tr.limit)
195 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
196 if (shift == 0) {
197 *esp_ptr = lduw_kernel(env->tr.base + index);
198 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
199 } else {
200 *esp_ptr = ldl_kernel(env->tr.base + index);
201 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
202 }
203}
204
205/* XXX: merge with load_seg() */
206static void tss_load_seg(int seg_reg, int selector)
207{
208 uint32_t e1, e2;
209 int rpl, dpl, cpl;
210
211 if ((selector & 0xfffc) != 0) {
212 if (load_segment(&e1, &e2, selector) != 0)
213 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
214 if (!(e2 & DESC_S_MASK))
215 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216 rpl = selector & 3;
217 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
218 cpl = env->hflags & HF_CPL_MASK;
219 if (seg_reg == R_CS) {
220 if (!(e2 & DESC_CS_MASK))
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222 /* XXX: is it correct ? */
223 if (dpl != rpl)
224 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 if ((e2 & DESC_C_MASK) && dpl > rpl)
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 } else if (seg_reg == R_SS) {
228 /* SS must be writable data */
229 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 if (dpl != cpl || dpl != rpl)
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 } else {
234 /* not readable code */
235 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 /* if data or non conforming code, checks the rights */
238 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
239 if (dpl < cpl || dpl < rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 }
242 }
243 if (!(e2 & DESC_P_MASK))
244 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
245 cpu_x86_load_seg_cache(env, seg_reg, selector,
246 get_seg_base(e1, e2),
247 get_seg_limit(e1, e2),
248 e2);
249 } else {
250 if (seg_reg == R_SS || seg_reg == R_CS)
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 }
253}
254
255#define SWITCH_TSS_JMP 0
256#define SWITCH_TSS_IRET 1
257#define SWITCH_TSS_CALL 2
258
259/* XXX: restore CPU state in registers (PowerPC case) */
260static void switch_tss(int tss_selector,
261 uint32_t e1, uint32_t e2, int source,
262 uint32_t next_eip)
263{
264 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
265 target_ulong tss_base;
266 uint32_t new_regs[8], new_segs[6];
267 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
268 uint32_t old_eflags, eflags_mask;
269 SegmentCache *dt;
270 int index;
271 target_ulong ptr;
272
273 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
274#ifdef DEBUG_PCALL
275 if (loglevel & CPU_LOG_PCALL)
276 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
277#endif
278
279#if defined(VBOX) && defined(DEBUG)
280 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
281#endif
282
283 /* if task gate, we read the TSS segment and we load it */
284 if (type == 5) {
285 if (!(e2 & DESC_P_MASK))
286 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287 tss_selector = e1 >> 16;
288 if (tss_selector & 4)
289 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
290 if (load_segment(&e1, &e2, tss_selector) != 0)
291 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
292 if (e2 & DESC_S_MASK)
293 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
295 if ((type & 7) != 1)
296 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
297 }
298
299 if (!(e2 & DESC_P_MASK))
300 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
301
302 if (type & 8)
303 tss_limit_max = 103;
304 else
305 tss_limit_max = 43;
306 tss_limit = get_seg_limit(e1, e2);
307 tss_base = get_seg_base(e1, e2);
308 if ((tss_selector & 4) != 0 ||
309 tss_limit < tss_limit_max)
310 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
311 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
312 if (old_type & 8)
313 old_tss_limit_max = 103;
314 else
315 old_tss_limit_max = 43;
316
317 /* read all the registers from the new TSS */
318 if (type & 8) {
319 /* 32 bit */
320 new_cr3 = ldl_kernel(tss_base + 0x1c);
321 new_eip = ldl_kernel(tss_base + 0x20);
322 new_eflags = ldl_kernel(tss_base + 0x24);
323 for(i = 0; i < 8; i++)
324 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
325 for(i = 0; i < 6; i++)
326 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
327 new_ldt = lduw_kernel(tss_base + 0x60);
328 new_trap = ldl_kernel(tss_base + 0x64);
329 } else {
330 /* 16 bit */
331 new_cr3 = 0;
332 new_eip = lduw_kernel(tss_base + 0x0e);
333 new_eflags = lduw_kernel(tss_base + 0x10);
334 for(i = 0; i < 8; i++)
335 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
336 for(i = 0; i < 4; i++)
337 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
338 new_ldt = lduw_kernel(tss_base + 0x2a);
339 new_segs[R_FS] = 0;
340 new_segs[R_GS] = 0;
341 new_trap = 0;
342 }
343
344 /* NOTE: we must avoid memory exceptions during the task switch,
345 so we make dummy accesses before */
346 /* XXX: it can still fail in some cases, so a bigger hack is
347 necessary to valid the TLB after having done the accesses */
348
349 v1 = ldub_kernel(env->tr.base);
350 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
351 stb_kernel(env->tr.base, v1);
352 stb_kernel(env->tr.base + old_tss_limit_max, v2);
353
354 /* clear busy bit (it is restartable) */
355 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
356 target_ulong ptr;
357 uint32_t e2;
358 ptr = env->gdt.base + (env->tr.selector & ~7);
359 e2 = ldl_kernel(ptr + 4);
360 e2 &= ~DESC_TSS_BUSY_MASK;
361 stl_kernel(ptr + 4, e2);
362 }
363 old_eflags = compute_eflags();
364 if (source == SWITCH_TSS_IRET)
365 old_eflags &= ~NT_MASK;
366
367 /* save the current state in the old TSS */
368 if (type & 8) {
369 /* 32 bit */
370 stl_kernel(env->tr.base + 0x20, next_eip);
371 stl_kernel(env->tr.base + 0x24, old_eflags);
372 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
373 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
374 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
375 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
376 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
377 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
378 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
379 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
380 for(i = 0; i < 6; i++)
381 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
382#if defined(VBOX) && defined(DEBUG)
383 printf("TSS 32 bits switch\n");
384 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
385#endif
386 } else {
387 /* 16 bit */
388 stw_kernel(env->tr.base + 0x0e, next_eip);
389 stw_kernel(env->tr.base + 0x10, old_eflags);
390 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
391 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
392 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
393 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
394 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
395 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
396 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
397 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
398 for(i = 0; i < 4; i++)
399 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
400 }
401
402 /* now if an exception occurs, it will occurs in the next task
403 context */
404
405 if (source == SWITCH_TSS_CALL) {
406 stw_kernel(tss_base, env->tr.selector);
407 new_eflags |= NT_MASK;
408 }
409
410 /* set busy bit */
411 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
412 target_ulong ptr;
413 uint32_t e2;
414 ptr = env->gdt.base + (tss_selector & ~7);
415 e2 = ldl_kernel(ptr + 4);
416 e2 |= DESC_TSS_BUSY_MASK;
417 stl_kernel(ptr + 4, e2);
418 }
419
420 /* set the new CPU state */
421 /* from this point, any exception which occurs can give problems */
422 env->cr[0] |= CR0_TS_MASK;
423 env->hflags |= HF_TS_MASK;
424 env->tr.selector = tss_selector;
425 env->tr.base = tss_base;
426 env->tr.limit = tss_limit;
427 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
428
429 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
430 cpu_x86_update_cr3(env, new_cr3);
431 }
432
433 /* load all registers without an exception, then reload them with
434 possible exception */
435 env->eip = new_eip;
436 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
437 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
438 if (!(type & 8))
439 eflags_mask &= 0xffff;
440 load_eflags(new_eflags, eflags_mask);
441 /* XXX: what to do in 16 bit case ? */
442 EAX = new_regs[0];
443 ECX = new_regs[1];
444 EDX = new_regs[2];
445 EBX = new_regs[3];
446 ESP = new_regs[4];
447 EBP = new_regs[5];
448 ESI = new_regs[6];
449 EDI = new_regs[7];
450 if (new_eflags & VM_MASK) {
451 for(i = 0; i < 6; i++)
452 load_seg_vm(i, new_segs[i]);
453 /* in vm86, CPL is always 3 */
454 cpu_x86_set_cpl(env, 3);
455 } else {
456 /* CPL is set the RPL of CS */
457 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
458 /* first just selectors as the rest may trigger exceptions */
459 for(i = 0; i < 6; i++)
460 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
461 }
462
463 env->ldt.selector = new_ldt & ~4;
464 env->ldt.base = 0;
465 env->ldt.limit = 0;
466 env->ldt.flags = 0;
467
468 /* load the LDT */
469 if (new_ldt & 4)
470 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
471
472 if ((new_ldt & 0xfffc) != 0) {
473 dt = &env->gdt;
474 index = new_ldt & ~7;
475 if ((index + 7) > dt->limit)
476 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477 ptr = dt->base + index;
478 e1 = ldl_kernel(ptr);
479 e2 = ldl_kernel(ptr + 4);
480 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 if (!(e2 & DESC_P_MASK))
483 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
489 tss_load_seg(R_CS, new_segs[R_CS]);
490 tss_load_seg(R_SS, new_segs[R_SS]);
491 tss_load_seg(R_ES, new_segs[R_ES]);
492 tss_load_seg(R_DS, new_segs[R_DS]);
493 tss_load_seg(R_FS, new_segs[R_FS]);
494 tss_load_seg(R_GS, new_segs[R_GS]);
495 }
496
497 /* check that EIP is in the CS segment limits */
498 if (new_eip > env->segs[R_CS].limit) {
499 /* XXX: different exception if CALL ? */
500 raise_exception_err(EXCP0D_GPF, 0);
501 }
502}
503
504/* check if Port I/O is allowed in TSS */
505static inline void check_io(int addr, int size)
506{
507 int io_offset, val, mask;
508
509 /* TSS must be a valid 32 bit one */
510 if (!(env->tr.flags & DESC_P_MASK) ||
511 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
512 env->tr.limit < 103)
513 goto fail;
514 io_offset = lduw_kernel(env->tr.base + 0x66);
515 io_offset += (addr >> 3);
516 /* Note: the check needs two bytes */
517 if ((io_offset + 1) > env->tr.limit)
518 goto fail;
519 val = lduw_kernel(env->tr.base + io_offset);
520 val >>= (addr & 7);
521 mask = (1 << size) - 1;
522 /* all bits must be zero to allow the I/O */
523 if ((val & mask) != 0) {
524 fail:
525 raise_exception_err(EXCP0D_GPF, 0);
526 }
527}
528
529void check_iob_T0(void)
530{
531 check_io(T0, 1);
532}
533
534void check_iow_T0(void)
535{
536 check_io(T0, 2);
537}
538
539void check_iol_T0(void)
540{
541 check_io(T0, 4);
542}
543
544void check_iob_DX(void)
545{
546 check_io(EDX & 0xffff, 1);
547}
548
549void check_iow_DX(void)
550{
551 check_io(EDX & 0xffff, 2);
552}
553
554void check_iol_DX(void)
555{
556 check_io(EDX & 0xffff, 4);
557}
558
559static inline unsigned int get_sp_mask(unsigned int e2)
560{
561 if (e2 & DESC_B_MASK)
562 return 0xffffffff;
563 else
564 return 0xffff;
565}
566
567#ifdef TARGET_X86_64
568#define SET_ESP(val, sp_mask)\
569do {\
570 if ((sp_mask) == 0xffff)\
571 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
572 else if ((sp_mask) == 0xffffffffLL)\
573 ESP = (uint32_t)(val);\
574 else\
575 ESP = (val);\
576} while (0)
577#else
578#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
579#endif
580
581/* XXX: add a is_user flag to have proper security support */
582#define PUSHW(ssp, sp, sp_mask, val)\
583{\
584 sp -= 2;\
585 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
586}
587
588#define PUSHL(ssp, sp, sp_mask, val)\
589{\
590 sp -= 4;\
591 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
592}
593
594#define POPW(ssp, sp, sp_mask, val)\
595{\
596 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
597 sp += 2;\
598}
599
600#define POPL(ssp, sp, sp_mask, val)\
601{\
602 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
603 sp += 4;\
604}
605
606/* protected mode interrupt */
607static void do_interrupt_protected(int intno, int is_int, int error_code,
608 unsigned int next_eip, int is_hw)
609{
610 SegmentCache *dt;
611 target_ulong ptr, ssp;
612 int type, dpl, selector, ss_dpl, cpl;
613 int has_error_code, new_stack, shift;
614 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
615 uint32_t old_eip, sp_mask;
616
617#ifdef VBOX
618 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
619 cpu_loop_exit();
620#endif
621
622 has_error_code = 0;
623 if (!is_int && !is_hw) {
624 switch(intno) {
625 case 8:
626 case 10:
627 case 11:
628 case 12:
629 case 13:
630 case 14:
631 case 17:
632 has_error_code = 1;
633 break;
634 }
635 }
636 if (is_int)
637 old_eip = next_eip;
638 else
639 old_eip = env->eip;
640
641 dt = &env->idt;
642 if (intno * 8 + 7 > dt->limit)
643 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
644 ptr = dt->base + intno * 8;
645 e1 = ldl_kernel(ptr);
646 e2 = ldl_kernel(ptr + 4);
647 /* check gate type */
648 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
649 switch(type) {
650 case 5: /* task gate */
651 /* must do that check here to return the correct error code */
652 if (!(e2 & DESC_P_MASK))
653 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
654 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
655 if (has_error_code) {
656 int type;
657 uint32_t mask;
658 /* push the error code */
659 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
660 shift = type >> 3;
661 if (env->segs[R_SS].flags & DESC_B_MASK)
662 mask = 0xffffffff;
663 else
664 mask = 0xffff;
665 esp = (ESP - (2 << shift)) & mask;
666 ssp = env->segs[R_SS].base + esp;
667 if (shift)
668 stl_kernel(ssp, error_code);
669 else
670 stw_kernel(ssp, error_code);
671 SET_ESP(esp, mask);
672 }
673 return;
674 case 6: /* 286 interrupt gate */
675 case 7: /* 286 trap gate */
676 case 14: /* 386 interrupt gate */
677 case 15: /* 386 trap gate */
678 break;
679 default:
680 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
681 break;
682 }
683 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
684 cpl = env->hflags & HF_CPL_MASK;
685 /* check privledge if software int */
686 if (is_int && dpl < cpl)
687 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688 /* check valid bit */
689 if (!(e2 & DESC_P_MASK))
690 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
691 selector = e1 >> 16;
692 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
693 if ((selector & 0xfffc) == 0)
694 raise_exception_err(EXCP0D_GPF, 0);
695
696 if (load_segment(&e1, &e2, selector) != 0)
697 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
698 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
700 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
701 if (dpl > cpl)
702 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
703 if (!(e2 & DESC_P_MASK))
704 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
705 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
706 /* to inner priviledge */
707 get_ss_esp_from_tss(&ss, &esp, dpl);
708 if ((ss & 0xfffc) == 0)
709 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
710 if ((ss & 3) != dpl)
711 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
712 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
713 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
714 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
715 if (ss_dpl != dpl)
716 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717 if (!(ss_e2 & DESC_S_MASK) ||
718 (ss_e2 & DESC_CS_MASK) ||
719 !(ss_e2 & DESC_W_MASK))
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if (!(ss_e2 & DESC_P_MASK))
722#ifdef VBOX /* See page 3-477 of 253666.pdf */
723 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
724#else
725 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
726#endif
727 new_stack = 1;
728 sp_mask = get_sp_mask(ss_e2);
729 ssp = get_seg_base(ss_e1, ss_e2);
730#if defined(VBOX) && defined(DEBUG)
731 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
732#endif
733 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
734 /* to same priviledge */
735 if (env->eflags & VM_MASK)
736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0;
738 sp_mask = get_sp_mask(env->segs[R_SS].flags);
739 ssp = env->segs[R_SS].base;
740 esp = ESP;
741 dpl = cpl;
742 } else {
743 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
744 new_stack = 0; /* avoid warning */
745 sp_mask = 0; /* avoid warning */
746 ssp = 0; /* avoid warning */
747 esp = 0; /* avoid warning */
748 }
749
750 shift = type >> 3;
751
752#if 0
753 /* XXX: check that enough room is available */
754 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
755 if (env->eflags & VM_MASK)
756 push_size += 8;
757 push_size <<= shift;
758#endif
759 if (shift == 1) {
760 if (new_stack) {
761 if (env->eflags & VM_MASK) {
762 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
766 }
767 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768 PUSHL(ssp, esp, sp_mask, ESP);
769 }
770 PUSHL(ssp, esp, sp_mask, compute_eflags());
771 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772 PUSHL(ssp, esp, sp_mask, old_eip);
773 if (has_error_code) {
774 PUSHL(ssp, esp, sp_mask, error_code);
775 }
776 } else {
777 if (new_stack) {
778 if (env->eflags & VM_MASK) {
779 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
783 }
784 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785 PUSHW(ssp, esp, sp_mask, ESP);
786 }
787 PUSHW(ssp, esp, sp_mask, compute_eflags());
788 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789 PUSHW(ssp, esp, sp_mask, old_eip);
790 if (has_error_code) {
791 PUSHW(ssp, esp, sp_mask, error_code);
792 }
793 }
794
795 if (new_stack) {
796 if (env->eflags & VM_MASK) {
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
801 }
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
805 }
806 SET_ESP(esp, sp_mask);
807
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
813 cpu_x86_set_cpl(env, dpl);
814 env->eip = offset;
815
816 /* interrupt gate clear IF mask */
817 if ((type & 1) == 0) {
818 env->eflags &= ~IF_MASK;
819 }
820 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
821}
822
823#ifdef VBOX
824
825/* check if VME interrupt redirection is enabled in TSS */
826static inline bool is_vme_irq_redirected(int intno)
827{
828 int io_offset, intredir_offset;
829 unsigned char val, mask;
830
831 /* TSS must be a valid 32 bit one */
832 if (!(env->tr.flags & DESC_P_MASK) ||
833 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
834 env->tr.limit < 103)
835 goto fail;
836 io_offset = lduw_kernel(env->tr.base + 0x66);
837 /* the virtual interrupt redirection bitmap is located below the io bitmap */
838 intredir_offset = io_offset - 0x20;
839
840 intredir_offset += (intno >> 3);
841 if ((intredir_offset) > env->tr.limit)
842 goto fail;
843
844 val = ldub_kernel(env->tr.base + intredir_offset);
845 mask = 1 << (unsigned char)(intno & 7);
846
847 /* bit set means no redirection. */
848 if ((val & mask) != 0) {
849 return false;
850 }
851 return true;
852
853fail:
854 raise_exception_err(EXCP0D_GPF, 0);
855 return true;
856}
857
858/* V86 mode software interrupt with CR4.VME=1 */
859static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
860{
861 target_ulong ptr, ssp;
862 int selector;
863 uint32_t offset, esp;
864 uint32_t old_cs, old_eflags;
865 uint32_t iopl;
866
867 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
868
869 if (!is_vme_irq_redirected(intno))
870 {
871 if (iopl == 3)
872 /* normal protected mode handler call */
873 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
874 else
875 raise_exception_err(EXCP0D_GPF, 0);
876 }
877
878 /* virtual mode idt is at linear address 0 */
879 ptr = 0 + intno * 4;
880 offset = lduw_kernel(ptr);
881 selector = lduw_kernel(ptr + 2);
882 esp = ESP;
883 ssp = env->segs[R_SS].base;
884 old_cs = env->segs[R_CS].selector;
885
886 old_eflags = compute_eflags();
887 if (iopl < 3)
888 {
889 /* copy VIF into IF and set IOPL to 3 */
890 if (env->eflags & VIF_MASK)
891 old_eflags |= IF_MASK;
892 else
893 old_eflags &= ~IF_MASK;
894
895 old_eflags |= (3 << IOPL_SHIFT);
896 }
897
898 /* XXX: use SS segment size ? */
899 PUSHW(ssp, esp, 0xffff, old_eflags);
900 PUSHW(ssp, esp, 0xffff, old_cs);
901 PUSHW(ssp, esp, 0xffff, next_eip);
902
903 /* update processor state */
904 ESP = (ESP & ~0xffff) | (esp & 0xffff);
905 env->eip = offset;
906 env->segs[R_CS].selector = selector;
907 env->segs[R_CS].base = (selector << 4);
908 env->eflags &= ~(TF_MASK | RF_MASK);
909
910 if (iopl < 3)
911 env->eflags &= ~VIF_MASK;
912 else
913 env->eflags &= ~IF_MASK;
914}
915#endif /* VBOX */
916
917#ifdef TARGET_X86_64
918
919#define PUSHQ(sp, val)\
920{\
921 sp -= 8;\
922 stq_kernel(sp, (val));\
923}
924
925#define POPQ(sp, val)\
926{\
927 val = ldq_kernel(sp);\
928 sp += 8;\
929}
930
931static inline target_ulong get_rsp_from_tss(int level)
932{
933 int index;
934
935#if 0
936 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
937 env->tr.base, env->tr.limit);
938#endif
939
940 if (!(env->tr.flags & DESC_P_MASK))
941 cpu_abort(env, "invalid tss");
942 index = 8 * level + 4;
943 if ((index + 7) > env->tr.limit)
944 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
945 return ldq_kernel(env->tr.base + index);
946}
947
948/* 64 bit interrupt */
949static void do_interrupt64(int intno, int is_int, int error_code,
950 target_ulong next_eip, int is_hw)
951{
952 SegmentCache *dt;
953 target_ulong ptr;
954 int type, dpl, selector, cpl, ist;
955 int has_error_code, new_stack;
956 uint32_t e1, e2, e3, ss;
957 target_ulong old_eip, esp, offset;
958
959 has_error_code = 0;
960 if (!is_int && !is_hw) {
961 switch(intno) {
962 case 8:
963 case 10:
964 case 11:
965 case 12:
966 case 13:
967 case 14:
968 case 17:
969 has_error_code = 1;
970 break;
971 }
972 }
973 if (is_int)
974 old_eip = next_eip;
975 else
976 old_eip = env->eip;
977
978 dt = &env->idt;
979 if (intno * 16 + 15 > dt->limit)
980 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
981 ptr = dt->base + intno * 16;
982 e1 = ldl_kernel(ptr);
983 e2 = ldl_kernel(ptr + 4);
984 e3 = ldl_kernel(ptr + 8);
985 /* check gate type */
986 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
987 switch(type) {
988 case 14: /* 386 interrupt gate */
989 case 15: /* 386 trap gate */
990 break;
991 default:
992 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
993 break;
994 }
995 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
996 cpl = env->hflags & HF_CPL_MASK;
997 /* check privledge if software int */
998 if (is_int && dpl < cpl)
999 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1000 /* check valid bit */
1001 if (!(e2 & DESC_P_MASK))
1002 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1003 selector = e1 >> 16;
1004 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1005 ist = e2 & 7;
1006 if ((selector & 0xfffc) == 0)
1007 raise_exception_err(EXCP0D_GPF, 0);
1008
1009 if (load_segment(&e1, &e2, selector) != 0)
1010 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1011 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1012 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1013 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1014 if (dpl > cpl)
1015 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1016 if (!(e2 & DESC_P_MASK))
1017 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1018 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1019 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1020 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1021 /* to inner priviledge */
1022 if (ist != 0)
1023 esp = get_rsp_from_tss(ist + 3);
1024 else
1025 esp = get_rsp_from_tss(dpl);
1026 esp &= ~0xfLL; /* align stack */
1027 ss = 0;
1028 new_stack = 1;
1029 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1030 /* to same priviledge */
1031 if (env->eflags & VM_MASK)
1032 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1033 new_stack = 0;
1034 if (ist != 0)
1035 esp = get_rsp_from_tss(ist + 3);
1036 else
1037 esp = ESP;
1038 esp &= ~0xfLL; /* align stack */
1039 dpl = cpl;
1040 } else {
1041 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1042 new_stack = 0; /* avoid warning */
1043 esp = 0; /* avoid warning */
1044 }
1045
1046 PUSHQ(esp, env->segs[R_SS].selector);
1047 PUSHQ(esp, ESP);
1048 PUSHQ(esp, compute_eflags());
1049 PUSHQ(esp, env->segs[R_CS].selector);
1050 PUSHQ(esp, old_eip);
1051 if (has_error_code) {
1052 PUSHQ(esp, error_code);
1053 }
1054
1055 if (new_stack) {
1056 ss = 0 | dpl;
1057 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1058 }
1059 ESP = esp;
1060
1061 selector = (selector & ~3) | dpl;
1062 cpu_x86_load_seg_cache(env, R_CS, selector,
1063 get_seg_base(e1, e2),
1064 get_seg_limit(e1, e2),
1065 e2);
1066 cpu_x86_set_cpl(env, dpl);
1067 env->eip = offset;
1068
1069 /* interrupt gate clear IF mask */
1070 if ((type & 1) == 0) {
1071 env->eflags &= ~IF_MASK;
1072 }
1073 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1074}
1075#endif
1076
1077void helper_syscall(int next_eip_addend)
1078{
1079 int selector;
1080
1081 if (!(env->efer & MSR_EFER_SCE)) {
1082 raise_exception_err(EXCP06_ILLOP, 0);
1083 }
1084 selector = (env->star >> 32) & 0xffff;
1085#ifdef TARGET_X86_64
1086 if (env->hflags & HF_LMA_MASK) {
1087 int code64;
1088
1089 ECX = env->eip + next_eip_addend;
1090 env->regs[11] = compute_eflags();
1091
1092 code64 = env->hflags & HF_CS64_MASK;
1093
1094 cpu_x86_set_cpl(env, 0);
1095 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1096 0, 0xffffffff,
1097 DESC_G_MASK | DESC_P_MASK |
1098 DESC_S_MASK |
1099 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1100 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1101 0, 0xffffffff,
1102 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1103 DESC_S_MASK |
1104 DESC_W_MASK | DESC_A_MASK);
1105 env->eflags &= ~env->fmask;
1106 if (code64)
1107 env->eip = env->lstar;
1108 else
1109 env->eip = env->cstar;
1110 } else
1111#endif
1112 {
1113 ECX = (uint32_t)(env->eip + next_eip_addend);
1114
1115 cpu_x86_set_cpl(env, 0);
1116 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1117 0, 0xffffffff,
1118 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1119 DESC_S_MASK |
1120 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1121 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1122 0, 0xffffffff,
1123 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1124 DESC_S_MASK |
1125 DESC_W_MASK | DESC_A_MASK);
1126 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1127 env->eip = (uint32_t)env->star;
1128 }
1129}
1130
1131void helper_sysret(int dflag)
1132{
1133 int cpl, selector;
1134
1135 if (!(env->efer & MSR_EFER_SCE)) {
1136 raise_exception_err(EXCP06_ILLOP, 0);
1137 }
1138 cpl = env->hflags & HF_CPL_MASK;
1139 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1140 raise_exception_err(EXCP0D_GPF, 0);
1141 }
1142 selector = (env->star >> 48) & 0xffff;
1143#ifdef TARGET_X86_64
1144 if (env->hflags & HF_LMA_MASK) {
1145 if (dflag == 2) {
1146 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1147 0, 0xffffffff,
1148 DESC_G_MASK | DESC_P_MASK |
1149 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1150 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1151 DESC_L_MASK);
1152 env->eip = ECX;
1153 } else {
1154 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1155 0, 0xffffffff,
1156 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1157 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1158 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1159 env->eip = (uint32_t)ECX;
1160 }
1161 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1162 0, 0xffffffff,
1163 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1164 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1165 DESC_W_MASK | DESC_A_MASK);
1166 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1167 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1168 cpu_x86_set_cpl(env, 3);
1169 } else
1170#endif
1171 {
1172 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1173 0, 0xffffffff,
1174 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1175 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1176 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1177 env->eip = (uint32_t)ECX;
1178 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1179 0, 0xffffffff,
1180 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1181 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1182 DESC_W_MASK | DESC_A_MASK);
1183 env->eflags |= IF_MASK;
1184 cpu_x86_set_cpl(env, 3);
1185 }
1186#ifdef USE_KQEMU
1187 if (kqemu_is_ok(env)) {
1188 if (env->hflags & HF_LMA_MASK)
1189 CC_OP = CC_OP_EFLAGS;
1190 env->exception_index = -1;
1191 cpu_loop_exit();
1192 }
1193#endif
1194}
1195
1196#ifdef VBOX
1197/**
1198 * Checks and processes external VMM events.
1199 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1200 */
1201void helper_external_event(void)
1202{
1203#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1204 uintptr_t uESP;
1205 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1206 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1207#endif
1208 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1209 {
1210 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1211 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1212 }
1213 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1214 {
1215 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1216 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1217 }
1218 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1219 {
1220 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1221 remR3DmaRun(env);
1222 }
1223 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1224 {
1225 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1226 remR3TimersRun(env);
1227 }
1228}
1229/* helper for recording call instruction addresses for later scanning */
1230void helper_record_call()
1231{
1232 if ( !(env->state & CPU_RAW_RING0)
1233 && (env->cr[0] & CR0_PG_MASK)
1234 && !(env->eflags & X86_EFL_IF))
1235 remR3RecordCall(env);
1236}
1237#endif /* VBOX */
1238
1239/* real mode interrupt */
1240static void do_interrupt_real(int intno, int is_int, int error_code,
1241 unsigned int next_eip)
1242{
1243 SegmentCache *dt;
1244 target_ulong ptr, ssp;
1245 int selector;
1246 uint32_t offset, esp;
1247 uint32_t old_cs, old_eip;
1248
1249 /* real mode (simpler !) */
1250 dt = &env->idt;
1251 if (intno * 4 + 3 > dt->limit)
1252 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1253 ptr = dt->base + intno * 4;
1254 offset = lduw_kernel(ptr);
1255 selector = lduw_kernel(ptr + 2);
1256 esp = ESP;
1257 ssp = env->segs[R_SS].base;
1258 if (is_int)
1259 old_eip = next_eip;
1260 else
1261 old_eip = env->eip;
1262 old_cs = env->segs[R_CS].selector;
1263 /* XXX: use SS segment size ? */
1264 PUSHW(ssp, esp, 0xffff, compute_eflags());
1265 PUSHW(ssp, esp, 0xffff, old_cs);
1266 PUSHW(ssp, esp, 0xffff, old_eip);
1267
1268 /* update processor state */
1269 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1270 env->eip = offset;
1271 env->segs[R_CS].selector = selector;
1272 env->segs[R_CS].base = (selector << 4);
1273 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1274}
1275
1276/* fake user mode interrupt */
1277void do_interrupt_user(int intno, int is_int, int error_code,
1278 target_ulong next_eip)
1279{
1280 SegmentCache *dt;
1281 target_ulong ptr;
1282 int dpl, cpl;
1283 uint32_t e2;
1284
1285 dt = &env->idt;
1286 ptr = dt->base + (intno * 8);
1287 e2 = ldl_kernel(ptr + 4);
1288
1289 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1290 cpl = env->hflags & HF_CPL_MASK;
1291 /* check privledge if software int */
1292 if (is_int && dpl < cpl)
1293 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1294
1295 /* Since we emulate only user space, we cannot do more than
1296 exiting the emulation with the suitable exception and error
1297 code */
1298 if (is_int)
1299 EIP = next_eip;
1300}
1301
1302/*
1303 * Begin execution of an interruption. is_int is TRUE if coming from
1304 * the int instruction. next_eip is the EIP value AFTER the interrupt
1305 * instruction. It is only relevant if is_int is TRUE.
1306 */
1307void do_interrupt(int intno, int is_int, int error_code,
1308 target_ulong next_eip, int is_hw)
1309{
1310 if (loglevel & CPU_LOG_INT) {
1311 if ((env->cr[0] & CR0_PE_MASK)) {
1312 static int count;
1313 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1314 count, intno, error_code, is_int,
1315 env->hflags & HF_CPL_MASK,
1316 env->segs[R_CS].selector, EIP,
1317 (int)env->segs[R_CS].base + EIP,
1318 env->segs[R_SS].selector, ESP);
1319 if (intno == 0x0e) {
1320 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1321 } else {
1322 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1323 }
1324 fprintf(logfile, "\n");
1325 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1326#if 0
1327 {
1328 int i;
1329 uint8_t *ptr;
1330 fprintf(logfile, " code=");
1331 ptr = env->segs[R_CS].base + env->eip;
1332 for(i = 0; i < 16; i++) {
1333 fprintf(logfile, " %02x", ldub(ptr + i));
1334 }
1335 fprintf(logfile, "\n");
1336 }
1337#endif
1338 count++;
1339 }
1340 }
1341 if (env->cr[0] & CR0_PE_MASK) {
1342#ifdef TARGET_X86_64
1343 if (env->hflags & HF_LMA_MASK) {
1344 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1345 } else
1346#endif
1347 {
1348#ifdef VBOX
1349 /* int xx *, v86 code and VME enabled? */
1350 if ( (env->eflags & VM_MASK)
1351 && (env->cr[4] & CR4_VME_MASK)
1352 && is_int
1353 && !is_hw
1354 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1355 )
1356 do_soft_interrupt_vme(intno, error_code, next_eip);
1357 else
1358#endif /* VBOX */
1359 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1360 }
1361 } else {
1362 do_interrupt_real(intno, is_int, error_code, next_eip);
1363 }
1364}
1365
1366/*
1367 * Signal an interruption. It is executed in the main CPU loop.
1368 * is_int is TRUE if coming from the int instruction. next_eip is the
1369 * EIP value AFTER the interrupt instruction. It is only relevant if
1370 * is_int is TRUE.
1371 */
1372void raise_interrupt(int intno, int is_int, int error_code,
1373 int next_eip_addend)
1374{
1375#if defined(VBOX) && defined(DEBUG)
1376 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %08x\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1377#endif
1378 env->exception_index = intno;
1379 env->error_code = error_code;
1380 env->exception_is_int = is_int;
1381 env->exception_next_eip = env->eip + next_eip_addend;
1382 cpu_loop_exit();
1383}
1384
1385/* same as raise_exception_err, but do not restore global registers */
1386static void raise_exception_err_norestore(int exception_index, int error_code)
1387{
1388 env->exception_index = exception_index;
1389 env->error_code = error_code;
1390 env->exception_is_int = 0;
1391 env->exception_next_eip = 0;
1392 longjmp(env->jmp_env, 1);
1393}
1394
1395/* shortcuts to generate exceptions */
1396
1397void (raise_exception_err)(int exception_index, int error_code)
1398{
1399 raise_interrupt(exception_index, 0, error_code, 0);
1400}
1401
1402void raise_exception(int exception_index)
1403{
1404 raise_interrupt(exception_index, 0, 0, 0);
1405}
1406
1407/* SMM support */
1408
1409#if defined(CONFIG_USER_ONLY)
1410
1411void do_smm_enter(void)
1412{
1413}
1414
1415void helper_rsm(void)
1416{
1417}
1418
1419#else
1420
1421#ifdef TARGET_X86_64
1422#define SMM_REVISION_ID 0x00020064
1423#else
1424#define SMM_REVISION_ID 0x00020000
1425#endif
1426
1427void do_smm_enter(void)
1428{
1429#ifdef VBOX
1430 cpu_abort(env, "do_ssm_enter");
1431#else /* !VBOX */
1432 target_ulong sm_state;
1433 SegmentCache *dt;
1434 int i, offset;
1435
1436 if (loglevel & CPU_LOG_INT) {
1437 fprintf(logfile, "SMM: enter\n");
1438 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1439 }
1440
1441 env->hflags |= HF_SMM_MASK;
1442 cpu_smm_update(env);
1443
1444 sm_state = env->smbase + 0x8000;
1445
1446#ifdef TARGET_X86_64
1447 for(i = 0; i < 6; i++) {
1448 dt = &env->segs[i];
1449 offset = 0x7e00 + i * 16;
1450 stw_phys(sm_state + offset, dt->selector);
1451 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1452 stl_phys(sm_state + offset + 4, dt->limit);
1453 stq_phys(sm_state + offset + 8, dt->base);
1454 }
1455
1456 stq_phys(sm_state + 0x7e68, env->gdt.base);
1457 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1458
1459 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1460 stq_phys(sm_state + 0x7e78, env->ldt.base);
1461 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1462 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1463
1464 stq_phys(sm_state + 0x7e88, env->idt.base);
1465 stl_phys(sm_state + 0x7e84, env->idt.limit);
1466
1467 stw_phys(sm_state + 0x7e90, env->tr.selector);
1468 stq_phys(sm_state + 0x7e98, env->tr.base);
1469 stl_phys(sm_state + 0x7e94, env->tr.limit);
1470 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1471
1472 stq_phys(sm_state + 0x7ed0, env->efer);
1473
1474 stq_phys(sm_state + 0x7ff8, EAX);
1475 stq_phys(sm_state + 0x7ff0, ECX);
1476 stq_phys(sm_state + 0x7fe8, EDX);
1477 stq_phys(sm_state + 0x7fe0, EBX);
1478 stq_phys(sm_state + 0x7fd8, ESP);
1479 stq_phys(sm_state + 0x7fd0, EBP);
1480 stq_phys(sm_state + 0x7fc8, ESI);
1481 stq_phys(sm_state + 0x7fc0, EDI);
1482 for(i = 8; i < 16; i++)
1483 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1484 stq_phys(sm_state + 0x7f78, env->eip);
1485 stl_phys(sm_state + 0x7f70, compute_eflags());
1486 stl_phys(sm_state + 0x7f68, env->dr[6]);
1487 stl_phys(sm_state + 0x7f60, env->dr[7]);
1488
1489 stl_phys(sm_state + 0x7f48, env->cr[4]);
1490 stl_phys(sm_state + 0x7f50, env->cr[3]);
1491 stl_phys(sm_state + 0x7f58, env->cr[0]);
1492
1493 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1494 stl_phys(sm_state + 0x7f00, env->smbase);
1495#else
1496 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1497 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1498 stl_phys(sm_state + 0x7ff4, compute_eflags());
1499 stl_phys(sm_state + 0x7ff0, env->eip);
1500 stl_phys(sm_state + 0x7fec, EDI);
1501 stl_phys(sm_state + 0x7fe8, ESI);
1502 stl_phys(sm_state + 0x7fe4, EBP);
1503 stl_phys(sm_state + 0x7fe0, ESP);
1504 stl_phys(sm_state + 0x7fdc, EBX);
1505 stl_phys(sm_state + 0x7fd8, EDX);
1506 stl_phys(sm_state + 0x7fd4, ECX);
1507 stl_phys(sm_state + 0x7fd0, EAX);
1508 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1509 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1510
1511 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1512 stl_phys(sm_state + 0x7f64, env->tr.base);
1513 stl_phys(sm_state + 0x7f60, env->tr.limit);
1514 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1515
1516 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1517 stl_phys(sm_state + 0x7f80, env->ldt.base);
1518 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1519 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1520
1521 stl_phys(sm_state + 0x7f74, env->gdt.base);
1522 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1523
1524 stl_phys(sm_state + 0x7f58, env->idt.base);
1525 stl_phys(sm_state + 0x7f54, env->idt.limit);
1526
1527 for(i = 0; i < 6; i++) {
1528 dt = &env->segs[i];
1529 if (i < 3)
1530 offset = 0x7f84 + i * 12;
1531 else
1532 offset = 0x7f2c + (i - 3) * 12;
1533 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1534 stl_phys(sm_state + offset + 8, dt->base);
1535 stl_phys(sm_state + offset + 4, dt->limit);
1536 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1537 }
1538 stl_phys(sm_state + 0x7f14, env->cr[4]);
1539
1540 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1541 stl_phys(sm_state + 0x7ef8, env->smbase);
1542#endif
1543 /* init SMM cpu state */
1544
1545#ifdef TARGET_X86_64
1546 env->efer = 0;
1547 env->hflags &= ~HF_LMA_MASK;
1548#endif
1549 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1550 env->eip = 0x00008000;
1551 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1552 0xffffffff, 0);
1553 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1554 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1555 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1556 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1557 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1558
1559 cpu_x86_update_cr0(env,
1560 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1561 cpu_x86_update_cr4(env, 0);
1562 env->dr[7] = 0x00000400;
1563 CC_OP = CC_OP_EFLAGS;
1564#endif /* VBOX */
1565}
1566
1567void helper_rsm(void)
1568{
1569#ifdef VBOX
1570 cpu_abort(env, "helper_rsm");
1571#else /* !VBOX */
1572 target_ulong sm_state;
1573 int i, offset;
1574 uint32_t val;
1575
1576 sm_state = env->smbase + 0x8000;
1577#ifdef TARGET_X86_64
1578 env->efer = ldq_phys(sm_state + 0x7ed0);
1579 if (env->efer & MSR_EFER_LMA)
1580 env->hflags |= HF_LMA_MASK;
1581 else
1582 env->hflags &= ~HF_LMA_MASK;
1583
1584 for(i = 0; i < 6; i++) {
1585 offset = 0x7e00 + i * 16;
1586 cpu_x86_load_seg_cache(env, i,
1587 lduw_phys(sm_state + offset),
1588 ldq_phys(sm_state + offset + 8),
1589 ldl_phys(sm_state + offset + 4),
1590 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1591 }
1592
1593 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1594 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1595
1596 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1597 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1598 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1599 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1600
1601 env->idt.base = ldq_phys(sm_state + 0x7e88);
1602 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1603
1604 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1605 env->tr.base = ldq_phys(sm_state + 0x7e98);
1606 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1607 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1608
1609 EAX = ldq_phys(sm_state + 0x7ff8);
1610 ECX = ldq_phys(sm_state + 0x7ff0);
1611 EDX = ldq_phys(sm_state + 0x7fe8);
1612 EBX = ldq_phys(sm_state + 0x7fe0);
1613 ESP = ldq_phys(sm_state + 0x7fd8);
1614 EBP = ldq_phys(sm_state + 0x7fd0);
1615 ESI = ldq_phys(sm_state + 0x7fc8);
1616 EDI = ldq_phys(sm_state + 0x7fc0);
1617 for(i = 8; i < 16; i++)
1618 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1619 env->eip = ldq_phys(sm_state + 0x7f78);
1620 load_eflags(ldl_phys(sm_state + 0x7f70),
1621 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1622 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1623 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1624
1625 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1626 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1627 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1628
1629 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1630 if (val & 0x20000) {
1631 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1632 }
1633#else
1634 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1635 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1636 load_eflags(ldl_phys(sm_state + 0x7ff4),
1637 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1638 env->eip = ldl_phys(sm_state + 0x7ff0);
1639 EDI = ldl_phys(sm_state + 0x7fec);
1640 ESI = ldl_phys(sm_state + 0x7fe8);
1641 EBP = ldl_phys(sm_state + 0x7fe4);
1642 ESP = ldl_phys(sm_state + 0x7fe0);
1643 EBX = ldl_phys(sm_state + 0x7fdc);
1644 EDX = ldl_phys(sm_state + 0x7fd8);
1645 ECX = ldl_phys(sm_state + 0x7fd4);
1646 EAX = ldl_phys(sm_state + 0x7fd0);
1647 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1648 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1649
1650 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1651 env->tr.base = ldl_phys(sm_state + 0x7f64);
1652 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1653 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1654
1655 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1656 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1657 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1658 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1659
1660 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1661 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1662
1663 env->idt.base = ldl_phys(sm_state + 0x7f58);
1664 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1665
1666 for(i = 0; i < 6; i++) {
1667 if (i < 3)
1668 offset = 0x7f84 + i * 12;
1669 else
1670 offset = 0x7f2c + (i - 3) * 12;
1671 cpu_x86_load_seg_cache(env, i,
1672 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1673 ldl_phys(sm_state + offset + 8),
1674 ldl_phys(sm_state + offset + 4),
1675 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1676 }
1677 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1678
1679 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1680 if (val & 0x20000) {
1681 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1682 }
1683#endif
1684 CC_OP = CC_OP_EFLAGS;
1685 env->hflags &= ~HF_SMM_MASK;
1686 cpu_smm_update(env);
1687
1688 if (loglevel & CPU_LOG_INT) {
1689 fprintf(logfile, "SMM: after RSM\n");
1690 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1691 }
1692#endif /* !VBOX */
1693}
1694
1695#endif /* !CONFIG_USER_ONLY */
1696
1697
1698#ifdef BUGGY_GCC_DIV64
1699/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1700 call it from another function */
1701uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1702{
1703 *q_ptr = num / den;
1704 return num % den;
1705}
1706
1707int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1708{
1709 *q_ptr = num / den;
1710 return num % den;
1711}
1712#endif
1713
1714void helper_divl_EAX_T0(void)
1715{
1716 unsigned int den, r;
1717 uint64_t num, q;
1718
1719 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1720 den = T0;
1721 if (den == 0) {
1722 raise_exception(EXCP00_DIVZ);
1723 }
1724#ifdef BUGGY_GCC_DIV64
1725 r = div32(&q, num, den);
1726#else
1727 q = (num / den);
1728 r = (num % den);
1729#endif
1730 if (q > 0xffffffff)
1731 raise_exception(EXCP00_DIVZ);
1732 EAX = (uint32_t)q;
1733 EDX = (uint32_t)r;
1734}
1735
1736void helper_idivl_EAX_T0(void)
1737{
1738 int den, r;
1739 int64_t num, q;
1740
1741 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1742 den = T0;
1743 if (den == 0) {
1744 raise_exception(EXCP00_DIVZ);
1745 }
1746#ifdef BUGGY_GCC_DIV64
1747 r = idiv32(&q, num, den);
1748#else
1749 q = (num / den);
1750 r = (num % den);
1751#endif
1752 if (q != (int32_t)q)
1753 raise_exception(EXCP00_DIVZ);
1754 EAX = (uint32_t)q;
1755 EDX = (uint32_t)r;
1756}
1757
1758void helper_cmpxchg8b(void)
1759{
1760 uint64_t d;
1761 int eflags;
1762
1763 eflags = cc_table[CC_OP].compute_all();
1764 d = ldq(A0);
1765 if (d == (((uint64_t)EDX << 32) | EAX)) {
1766 stq(A0, ((uint64_t)ECX << 32) | EBX);
1767 eflags |= CC_Z;
1768 } else {
1769 EDX = d >> 32;
1770 EAX = d;
1771 eflags &= ~CC_Z;
1772 }
1773 CC_SRC = eflags;
1774}
1775
1776void helper_single_step()
1777{
1778 env->dr[6] |= 0x4000;
1779 raise_exception(EXCP01_SSTP);
1780}
1781
1782void helper_cpuid(void)
1783{
1784#ifndef VBOX
1785 uint32_t index;
1786 index = (uint32_t)EAX;
1787
1788 /* test if maximum index reached */
1789 if (index & 0x80000000) {
1790 if (index > env->cpuid_xlevel)
1791 index = env->cpuid_level;
1792 } else {
1793 if (index > env->cpuid_level)
1794 index = env->cpuid_level;
1795 }
1796
1797 switch(index) {
1798 case 0:
1799 EAX = env->cpuid_level;
1800 EBX = env->cpuid_vendor1;
1801 EDX = env->cpuid_vendor2;
1802 ECX = env->cpuid_vendor3;
1803 break;
1804 case 1:
1805 EAX = env->cpuid_version;
1806 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1807 ECX = env->cpuid_ext_features;
1808 EDX = env->cpuid_features;
1809 break;
1810 case 2:
1811 /* cache info: needed for Pentium Pro compatibility */
1812 EAX = 0x410601;
1813 EBX = 0;
1814 ECX = 0;
1815 EDX = 0;
1816 break;
1817 case 0x80000000:
1818 EAX = env->cpuid_xlevel;
1819 EBX = env->cpuid_vendor1;
1820 EDX = env->cpuid_vendor2;
1821 ECX = env->cpuid_vendor3;
1822 break;
1823 case 0x80000001:
1824 EAX = env->cpuid_features;
1825 EBX = 0;
1826 ECX = 0;
1827 EDX = env->cpuid_ext2_features;
1828 break;
1829 case 0x80000002:
1830 case 0x80000003:
1831 case 0x80000004:
1832 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1833 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1834 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1835 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1836 break;
1837 case 0x80000005:
1838 /* cache info (L1 cache) */
1839 EAX = 0x01ff01ff;
1840 EBX = 0x01ff01ff;
1841 ECX = 0x40020140;
1842 EDX = 0x40020140;
1843 break;
1844 case 0x80000006:
1845 /* cache info (L2 cache) */
1846 EAX = 0;
1847 EBX = 0x42004200;
1848 ECX = 0x02008140;
1849 EDX = 0;
1850 break;
1851 case 0x80000008:
1852 /* virtual & phys address size in low 2 bytes. */
1853 EAX = 0x00003028;
1854 EBX = 0;
1855 ECX = 0;
1856 EDX = 0;
1857 break;
1858 default:
1859 /* reserved values: zero */
1860 EAX = 0;
1861 EBX = 0;
1862 ECX = 0;
1863 EDX = 0;
1864 break;
1865 }
1866#else /* VBOX */
1867 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1868#endif /* VBOX */
1869}
1870
1871void helper_enter_level(int level, int data32)
1872{
1873 target_ulong ssp;
1874 uint32_t esp_mask, esp, ebp;
1875
1876 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1877 ssp = env->segs[R_SS].base;
1878 ebp = EBP;
1879 esp = ESP;
1880 if (data32) {
1881 /* 32 bit */
1882 esp -= 4;
1883 while (--level) {
1884 esp -= 4;
1885 ebp -= 4;
1886 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1887 }
1888 esp -= 4;
1889 stl(ssp + (esp & esp_mask), T1);
1890 } else {
1891 /* 16 bit */
1892 esp -= 2;
1893 while (--level) {
1894 esp -= 2;
1895 ebp -= 2;
1896 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1897 }
1898 esp -= 2;
1899 stw(ssp + (esp & esp_mask), T1);
1900 }
1901}
1902
1903#ifdef TARGET_X86_64
1904void helper_enter64_level(int level, int data64)
1905{
1906 target_ulong esp, ebp;
1907 ebp = EBP;
1908 esp = ESP;
1909
1910 if (data64) {
1911 /* 64 bit */
1912 esp -= 8;
1913 while (--level) {
1914 esp -= 8;
1915 ebp -= 8;
1916 stq(esp, ldq(ebp));
1917 }
1918 esp -= 8;
1919 stq(esp, T1);
1920 } else {
1921 /* 16 bit */
1922 esp -= 2;
1923 while (--level) {
1924 esp -= 2;
1925 ebp -= 2;
1926 stw(esp, lduw(ebp));
1927 }
1928 esp -= 2;
1929 stw(esp, T1);
1930 }
1931}
1932#endif
1933
1934void helper_lldt_T0(void)
1935{
1936 int selector;
1937 SegmentCache *dt;
1938 uint32_t e1, e2;
1939 int index, entry_limit;
1940 target_ulong ptr;
1941#ifdef VBOX
1942 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n",
1943 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1944#endif
1945
1946 selector = T0 & 0xffff;
1947 if ((selector & 0xfffc) == 0) {
1948 /* XXX: NULL selector case: invalid LDT */
1949 env->ldt.base = 0;
1950 env->ldt.limit = 0;
1951 } else {
1952 if (selector & 0x4)
1953 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1954 dt = &env->gdt;
1955 index = selector & ~7;
1956#ifdef TARGET_X86_64
1957 if (env->hflags & HF_LMA_MASK)
1958 entry_limit = 15;
1959 else
1960#endif
1961 entry_limit = 7;
1962 if ((index + entry_limit) > dt->limit)
1963 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1964 ptr = dt->base + index;
1965 e1 = ldl_kernel(ptr);
1966 e2 = ldl_kernel(ptr + 4);
1967 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1969 if (!(e2 & DESC_P_MASK))
1970 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1971#ifdef TARGET_X86_64
1972 if (env->hflags & HF_LMA_MASK) {
1973 uint32_t e3;
1974 e3 = ldl_kernel(ptr + 8);
1975 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1976 env->ldt.base |= (target_ulong)e3 << 32;
1977 } else
1978#endif
1979 {
1980 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1981 }
1982 }
1983 env->ldt.selector = selector;
1984#ifdef VBOX
1985 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n",
1986 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
1987#endif
1988}
1989
1990void helper_ltr_T0(void)
1991{
1992 int selector;
1993 SegmentCache *dt;
1994 uint32_t e1, e2;
1995 int index, type, entry_limit;
1996 target_ulong ptr;
1997
1998#ifdef VBOX
1999 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2000 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2001 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2002#endif
2003
2004 selector = T0 & 0xffff;
2005 if ((selector & 0xfffc) == 0) {
2006 /* NULL selector case: invalid TR */
2007 env->tr.base = 0;
2008 env->tr.limit = 0;
2009 env->tr.flags = 0;
2010 } else {
2011 if (selector & 0x4)
2012 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2013 dt = &env->gdt;
2014 index = selector & ~7;
2015#ifdef TARGET_X86_64
2016 if (env->hflags & HF_LMA_MASK)
2017 entry_limit = 15;
2018 else
2019#endif
2020 entry_limit = 7;
2021 if ((index + entry_limit) > dt->limit)
2022 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2023 ptr = dt->base + index;
2024 e1 = ldl_kernel(ptr);
2025 e2 = ldl_kernel(ptr + 4);
2026 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2027 if ((e2 & DESC_S_MASK) ||
2028 (type != 1 && type != 9))
2029 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030 if (!(e2 & DESC_P_MASK))
2031 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2032#ifdef TARGET_X86_64
2033 if (env->hflags & HF_LMA_MASK) {
2034 uint32_t e3;
2035 e3 = ldl_kernel(ptr + 8);
2036 load_seg_cache_raw_dt(&env->tr, e1, e2);
2037 env->tr.base |= (target_ulong)e3 << 32;
2038 } else
2039#endif
2040 {
2041 load_seg_cache_raw_dt(&env->tr, e1, e2);
2042 }
2043 e2 |= DESC_TSS_BUSY_MASK;
2044 stl_kernel(ptr + 4, e2);
2045 }
2046 env->tr.selector = selector;
2047#ifdef VBOX
2048 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2049 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2050 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2051#endif
2052}
2053
2054/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2055void load_seg(int seg_reg, int selector)
2056{
2057 uint32_t e1, e2;
2058 int cpl, dpl, rpl;
2059 SegmentCache *dt;
2060 int index;
2061 target_ulong ptr;
2062
2063 selector &= 0xffff;
2064 cpl = env->hflags & HF_CPL_MASK;
2065
2066#ifdef VBOX
2067 /* Trying to load a selector with CPL=1? */
2068 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2069 {
2070 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2071 selector = selector & 0xfffc;
2072 }
2073#endif
2074
2075 if ((selector & 0xfffc) == 0) {
2076 /* null selector case */
2077 if (seg_reg == R_SS
2078#ifdef TARGET_X86_64
2079 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2080#endif
2081 )
2082 raise_exception_err(EXCP0D_GPF, 0);
2083 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2084 } else {
2085
2086 if (selector & 0x4)
2087 dt = &env->ldt;
2088 else
2089 dt = &env->gdt;
2090 index = selector & ~7;
2091 if ((index + 7) > dt->limit)
2092 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093 ptr = dt->base + index;
2094 e1 = ldl_kernel(ptr);
2095 e2 = ldl_kernel(ptr + 4);
2096
2097 if (!(e2 & DESC_S_MASK))
2098 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2099 rpl = selector & 3;
2100 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2101 if (seg_reg == R_SS) {
2102 /* must be writable segment */
2103 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2104 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2105 if (rpl != cpl || dpl != cpl)
2106 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2107 } else {
2108 /* must be readable segment */
2109 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2110 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2111
2112 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2113 /* if not conforming code, test rights */
2114 if (dpl < cpl || dpl < rpl)
2115 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116 }
2117 }
2118
2119 if (!(e2 & DESC_P_MASK)) {
2120 if (seg_reg == R_SS)
2121 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2122 else
2123 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2124 }
2125
2126 /* set the access bit if not already set */
2127 if (!(e2 & DESC_A_MASK)) {
2128 e2 |= DESC_A_MASK;
2129 stl_kernel(ptr + 4, e2);
2130 }
2131
2132 cpu_x86_load_seg_cache(env, seg_reg, selector,
2133 get_seg_base(e1, e2),
2134 get_seg_limit(e1, e2),
2135 e2);
2136#if 0
2137 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2138 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2139#endif
2140 }
2141}
2142
2143/* protected mode jump */
2144void helper_ljmp_protected_T0_T1(int next_eip_addend)
2145{
2146 int new_cs, gate_cs, type;
2147 uint32_t e1, e2, cpl, dpl, rpl, limit;
2148 target_ulong new_eip, next_eip;
2149
2150 new_cs = T0;
2151 new_eip = T1;
2152 if ((new_cs & 0xfffc) == 0)
2153 raise_exception_err(EXCP0D_GPF, 0);
2154 if (load_segment(&e1, &e2, new_cs) != 0)
2155 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156 cpl = env->hflags & HF_CPL_MASK;
2157 if (e2 & DESC_S_MASK) {
2158 if (!(e2 & DESC_CS_MASK))
2159 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2160 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2161 if (e2 & DESC_C_MASK) {
2162 /* conforming code segment */
2163 if (dpl > cpl)
2164 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2165 } else {
2166 /* non conforming code segment */
2167 rpl = new_cs & 3;
2168 if (rpl > cpl)
2169 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2170 if (dpl != cpl)
2171 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2172 }
2173 if (!(e2 & DESC_P_MASK))
2174 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2175 limit = get_seg_limit(e1, e2);
2176 if (new_eip > limit &&
2177 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2178 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2179 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2180 get_seg_base(e1, e2), limit, e2);
2181 EIP = new_eip;
2182 } else {
2183 /* jump to call or task gate */
2184 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2185 rpl = new_cs & 3;
2186 cpl = env->hflags & HF_CPL_MASK;
2187 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2188 switch(type) {
2189 case 1: /* 286 TSS */
2190 case 9: /* 386 TSS */
2191 case 5: /* task gate */
2192 if (dpl < cpl || dpl < rpl)
2193 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2194 next_eip = env->eip + next_eip_addend;
2195 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2196 CC_OP = CC_OP_EFLAGS;
2197 break;
2198 case 4: /* 286 call gate */
2199 case 12: /* 386 call gate */
2200 if ((dpl < cpl) || (dpl < rpl))
2201 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202 if (!(e2 & DESC_P_MASK))
2203 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2204 gate_cs = e1 >> 16;
2205 new_eip = (e1 & 0xffff);
2206 if (type == 12)
2207 new_eip |= (e2 & 0xffff0000);
2208 if (load_segment(&e1, &e2, gate_cs) != 0)
2209 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2210 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2211 /* must be code segment */
2212 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2213 (DESC_S_MASK | DESC_CS_MASK)))
2214 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2215 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2216 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2217 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2218 if (!(e2 & DESC_P_MASK))
2219#ifdef VBOX /* See page 3-514 of 253666.pdf */
2220 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2221#else
2222 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2223#endif
2224 limit = get_seg_limit(e1, e2);
2225 if (new_eip > limit)
2226 raise_exception_err(EXCP0D_GPF, 0);
2227 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2228 get_seg_base(e1, e2), limit, e2);
2229 EIP = new_eip;
2230 break;
2231 default:
2232 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2233 break;
2234 }
2235 }
2236}
2237
2238/* real mode call */
2239void helper_lcall_real_T0_T1(int shift, int next_eip)
2240{
2241 int new_cs, new_eip;
2242 uint32_t esp, esp_mask;
2243 target_ulong ssp;
2244
2245 new_cs = T0;
2246 new_eip = T1;
2247 esp = ESP;
2248 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2249 ssp = env->segs[R_SS].base;
2250 if (shift) {
2251 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2252 PUSHL(ssp, esp, esp_mask, next_eip);
2253 } else {
2254 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2255 PUSHW(ssp, esp, esp_mask, next_eip);
2256 }
2257
2258 SET_ESP(esp, esp_mask);
2259 env->eip = new_eip;
2260 env->segs[R_CS].selector = new_cs;
2261 env->segs[R_CS].base = (new_cs << 4);
2262}
2263
2264/* protected mode call */
2265void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2266{
2267 int new_cs, new_stack, i;
2268 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2269 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2270 uint32_t val, limit, old_sp_mask;
2271 target_ulong ssp, old_ssp, next_eip, new_eip;
2272
2273 new_cs = T0;
2274 new_eip = T1;
2275 next_eip = env->eip + next_eip_addend;
2276#ifdef DEBUG_PCALL
2277 if (loglevel & CPU_LOG_PCALL) {
2278 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2279 new_cs, (uint32_t)new_eip, shift);
2280 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2281 }
2282#endif
2283 if ((new_cs & 0xfffc) == 0)
2284 raise_exception_err(EXCP0D_GPF, 0);
2285 if (load_segment(&e1, &e2, new_cs) != 0)
2286 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2287 cpl = env->hflags & HF_CPL_MASK;
2288#ifdef DEBUG_PCALL
2289 if (loglevel & CPU_LOG_PCALL) {
2290 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2291 }
2292#endif
2293 if (e2 & DESC_S_MASK) {
2294 if (!(e2 & DESC_CS_MASK))
2295 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2296 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2297 if (e2 & DESC_C_MASK) {
2298 /* conforming code segment */
2299 if (dpl > cpl)
2300 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2301 } else {
2302 /* non conforming code segment */
2303 rpl = new_cs & 3;
2304 if (rpl > cpl)
2305 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2306 if (dpl != cpl)
2307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2308 }
2309 if (!(e2 & DESC_P_MASK))
2310 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2311
2312#ifdef TARGET_X86_64
2313 /* XXX: check 16/32 bit cases in long mode */
2314 if (shift == 2) {
2315 target_ulong rsp;
2316 /* 64 bit case */
2317 rsp = ESP;
2318 PUSHQ(rsp, env->segs[R_CS].selector);
2319 PUSHQ(rsp, next_eip);
2320 /* from this point, not restartable */
2321 ESP = rsp;
2322 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2323 get_seg_base(e1, e2),
2324 get_seg_limit(e1, e2), e2);
2325 EIP = new_eip;
2326 } else
2327#endif
2328 {
2329 sp = ESP;
2330 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2331 ssp = env->segs[R_SS].base;
2332 if (shift) {
2333 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2334 PUSHL(ssp, sp, sp_mask, next_eip);
2335 } else {
2336 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2337 PUSHW(ssp, sp, sp_mask, next_eip);
2338 }
2339
2340 limit = get_seg_limit(e1, e2);
2341 if (new_eip > limit)
2342 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2343 /* from this point, not restartable */
2344 SET_ESP(sp, sp_mask);
2345 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2346 get_seg_base(e1, e2), limit, e2);
2347 EIP = new_eip;
2348 }
2349 } else {
2350 /* check gate type */
2351 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2352 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2353 rpl = new_cs & 3;
2354 switch(type) {
2355 case 1: /* available 286 TSS */
2356 case 9: /* available 386 TSS */
2357 case 5: /* task gate */
2358 if (dpl < cpl || dpl < rpl)
2359 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2360 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2361 CC_OP = CC_OP_EFLAGS;
2362 return;
2363 case 4: /* 286 call gate */
2364 case 12: /* 386 call gate */
2365 break;
2366 default:
2367 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2368 break;
2369 }
2370 shift = type >> 3;
2371
2372 if (dpl < cpl || dpl < rpl)
2373 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2374 /* check valid bit */
2375 if (!(e2 & DESC_P_MASK))
2376 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2377 selector = e1 >> 16;
2378 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2379 param_count = e2 & 0x1f;
2380 if ((selector & 0xfffc) == 0)
2381 raise_exception_err(EXCP0D_GPF, 0);
2382
2383 if (load_segment(&e1, &e2, selector) != 0)
2384 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2385 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2386 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2387 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2388 if (dpl > cpl)
2389 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2390 if (!(e2 & DESC_P_MASK))
2391 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2392
2393 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2394 /* to inner priviledge */
2395 get_ss_esp_from_tss(&ss, &sp, dpl);
2396#ifdef DEBUG_PCALL
2397 if (loglevel & CPU_LOG_PCALL)
2398 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2399 ss, sp, param_count, ESP);
2400#endif
2401 if ((ss & 0xfffc) == 0)
2402 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2403 if ((ss & 3) != dpl)
2404 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2405 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2406 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2407 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2408 if (ss_dpl != dpl)
2409 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2410 if (!(ss_e2 & DESC_S_MASK) ||
2411 (ss_e2 & DESC_CS_MASK) ||
2412 !(ss_e2 & DESC_W_MASK))
2413 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2414 if (!(ss_e2 & DESC_P_MASK))
2415#ifdef VBOX /* See page 3-99 of 253666.pdf */
2416 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2417#else
2418 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2419#endif
2420
2421 // push_size = ((param_count * 2) + 8) << shift;
2422
2423 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2424 old_ssp = env->segs[R_SS].base;
2425
2426 sp_mask = get_sp_mask(ss_e2);
2427 ssp = get_seg_base(ss_e1, ss_e2);
2428 if (shift) {
2429 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2430 PUSHL(ssp, sp, sp_mask, ESP);
2431 for(i = param_count - 1; i >= 0; i--) {
2432 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2433 PUSHL(ssp, sp, sp_mask, val);
2434 }
2435 } else {
2436 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2437 PUSHW(ssp, sp, sp_mask, ESP);
2438 for(i = param_count - 1; i >= 0; i--) {
2439 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2440 PUSHW(ssp, sp, sp_mask, val);
2441 }
2442 }
2443 new_stack = 1;
2444 } else {
2445 /* to same priviledge */
2446 sp = ESP;
2447 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2448 ssp = env->segs[R_SS].base;
2449 // push_size = (4 << shift);
2450 new_stack = 0;
2451 }
2452
2453 if (shift) {
2454 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2455 PUSHL(ssp, sp, sp_mask, next_eip);
2456 } else {
2457 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2458 PUSHW(ssp, sp, sp_mask, next_eip);
2459 }
2460
2461 /* from this point, not restartable */
2462
2463 if (new_stack) {
2464 ss = (ss & ~3) | dpl;
2465 cpu_x86_load_seg_cache(env, R_SS, ss,
2466 ssp,
2467 get_seg_limit(ss_e1, ss_e2),
2468 ss_e2);
2469 }
2470
2471 selector = (selector & ~3) | dpl;
2472 cpu_x86_load_seg_cache(env, R_CS, selector,
2473 get_seg_base(e1, e2),
2474 get_seg_limit(e1, e2),
2475 e2);
2476 cpu_x86_set_cpl(env, dpl);
2477 SET_ESP(sp, sp_mask);
2478 EIP = offset;
2479 }
2480#ifdef USE_KQEMU
2481 if (kqemu_is_ok(env)) {
2482 env->exception_index = -1;
2483 cpu_loop_exit();
2484 }
2485#endif
2486}
2487
2488/* real and vm86 mode iret */
2489void helper_iret_real(int shift)
2490{
2491 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2492 target_ulong ssp;
2493 int eflags_mask;
2494#ifdef VBOX
2495 bool fVME = false;
2496
2497 remR3TrapClear(env->pVM);
2498#endif /* VBOX */
2499
2500 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2501 sp = ESP;
2502 ssp = env->segs[R_SS].base;
2503 if (shift == 1) {
2504 /* 32 bits */
2505 POPL(ssp, sp, sp_mask, new_eip);
2506 POPL(ssp, sp, sp_mask, new_cs);
2507 new_cs &= 0xffff;
2508 POPL(ssp, sp, sp_mask, new_eflags);
2509 } else {
2510 /* 16 bits */
2511 POPW(ssp, sp, sp_mask, new_eip);
2512 POPW(ssp, sp, sp_mask, new_cs);
2513 POPW(ssp, sp, sp_mask, new_eflags);
2514 }
2515#ifdef VBOX
2516 if ( (env->eflags & VM_MASK)
2517 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2518 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2519 {
2520 fVME = true;
2521 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2522 /* if TF will be set -> #GP */
2523 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2524 || (new_eflags & TF_MASK))
2525 raise_exception(EXCP0D_GPF);
2526 }
2527#endif /* VBOX */
2528
2529 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2530 load_seg_vm(R_CS, new_cs);
2531 env->eip = new_eip;
2532#ifdef VBOX
2533 if (fVME)
2534 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2535 else
2536#endif
2537 if (env->eflags & VM_MASK)
2538 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2539 else
2540 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2541 if (shift == 0)
2542 eflags_mask &= 0xffff;
2543 load_eflags(new_eflags, eflags_mask);
2544
2545#ifdef VBOX
2546 if (fVME)
2547 {
2548 if (new_eflags & IF_MASK)
2549 env->eflags |= VIF_MASK;
2550 else
2551 env->eflags &= ~VIF_MASK;
2552 }
2553#endif /* VBOX */
2554}
2555
2556static inline void validate_seg(int seg_reg, int cpl)
2557{
2558 int dpl;
2559 uint32_t e2;
2560
2561 /* XXX: on x86_64, we do not want to nullify FS and GS because
2562 they may still contain a valid base. I would be interested to
2563 know how a real x86_64 CPU behaves */
2564 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2565 (env->segs[seg_reg].selector & 0xfffc) == 0)
2566 return;
2567
2568 e2 = env->segs[seg_reg].flags;
2569 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2570 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2571 /* data or non conforming code segment */
2572 if (dpl < cpl) {
2573 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2574 }
2575 }
2576}
2577
2578/* protected mode iret */
2579static inline void helper_ret_protected(int shift, int is_iret, int addend)
2580{
2581 uint32_t new_cs, new_eflags, new_ss;
2582 uint32_t new_es, new_ds, new_fs, new_gs;
2583 uint32_t e1, e2, ss_e1, ss_e2;
2584 int cpl, dpl, rpl, eflags_mask, iopl;
2585 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2586
2587#ifdef TARGET_X86_64
2588 if (shift == 2)
2589 sp_mask = -1;
2590 else
2591#endif
2592 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2593 sp = ESP;
2594 ssp = env->segs[R_SS].base;
2595 new_eflags = 0; /* avoid warning */
2596#ifdef TARGET_X86_64
2597 if (shift == 2) {
2598 POPQ(sp, new_eip);
2599 POPQ(sp, new_cs);
2600 new_cs &= 0xffff;
2601 if (is_iret) {
2602 POPQ(sp, new_eflags);
2603 }
2604 } else
2605#endif
2606 if (shift == 1) {
2607 /* 32 bits */
2608 POPL(ssp, sp, sp_mask, new_eip);
2609 POPL(ssp, sp, sp_mask, new_cs);
2610 new_cs &= 0xffff;
2611 if (is_iret) {
2612 POPL(ssp, sp, sp_mask, new_eflags);
2613#if defined(VBOX) && defined(DEBUG)
2614 printf("iret: new CS %04X\n", new_cs);
2615 printf("iret: new EIP %08X\n", new_eip);
2616 printf("iret: new EFLAGS %08X\n", new_eflags);
2617 printf("iret: EAX=%08x\n", EAX);
2618#endif
2619
2620 if (new_eflags & VM_MASK)
2621 goto return_to_vm86;
2622 }
2623#ifdef VBOX
2624 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2625 {
2626#ifdef DEBUG
2627 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2628#endif
2629 new_cs = new_cs & 0xfffc;
2630 }
2631#endif
2632 } else {
2633 /* 16 bits */
2634 POPW(ssp, sp, sp_mask, new_eip);
2635 POPW(ssp, sp, sp_mask, new_cs);
2636 if (is_iret)
2637 POPW(ssp, sp, sp_mask, new_eflags);
2638 }
2639#ifdef DEBUG_PCALL
2640 if (loglevel & CPU_LOG_PCALL) {
2641 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2642 new_cs, new_eip, shift, addend);
2643 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2644 }
2645#endif
2646 if ((new_cs & 0xfffc) == 0)
2647 {
2648#if defined(VBOX) && defined(DEBUG)
2649 printf("new_cs & 0xfffc) == 0\n");
2650#endif
2651 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2652 }
2653 if (load_segment(&e1, &e2, new_cs) != 0)
2654 {
2655#if defined(VBOX) && defined(DEBUG)
2656 printf("load_segment failed\n");
2657#endif
2658 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2659 }
2660 if (!(e2 & DESC_S_MASK) ||
2661 !(e2 & DESC_CS_MASK))
2662 {
2663#if defined(VBOX) && defined(DEBUG)
2664 printf("e2 mask %08x\n", e2);
2665#endif
2666 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2667 }
2668 cpl = env->hflags & HF_CPL_MASK;
2669 rpl = new_cs & 3;
2670 if (rpl < cpl)
2671 {
2672#if defined(VBOX) && defined(DEBUG)
2673 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2674#endif
2675 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2676 }
2677 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2678 if (e2 & DESC_C_MASK) {
2679 if (dpl > rpl)
2680 {
2681#if defined(VBOX) && defined(DEBUG)
2682 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2683#endif
2684 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2685 }
2686 } else {
2687 if (dpl != rpl)
2688 {
2689#if defined(VBOX) && defined(DEBUG)
2690 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2691#endif
2692 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2693 }
2694 }
2695 if (!(e2 & DESC_P_MASK))
2696 {
2697#if defined(VBOX) && defined(DEBUG)
2698 printf("DESC_P_MASK e2=%08x\n", e2);
2699#endif
2700 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2701 }
2702 sp += addend;
2703 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2704 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2705 /* return to same priledge level */
2706 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2707 get_seg_base(e1, e2),
2708 get_seg_limit(e1, e2),
2709 e2);
2710 } else {
2711 /* return to different priviledge level */
2712#ifdef TARGET_X86_64
2713 if (shift == 2) {
2714 POPQ(sp, new_esp);
2715 POPQ(sp, new_ss);
2716 new_ss &= 0xffff;
2717 } else
2718#endif
2719 if (shift == 1) {
2720 /* 32 bits */
2721 POPL(ssp, sp, sp_mask, new_esp);
2722 POPL(ssp, sp, sp_mask, new_ss);
2723 new_ss &= 0xffff;
2724 } else {
2725 /* 16 bits */
2726 POPW(ssp, sp, sp_mask, new_esp);
2727 POPW(ssp, sp, sp_mask, new_ss);
2728 }
2729#ifdef DEBUG_PCALL
2730 if (loglevel & CPU_LOG_PCALL) {
2731 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2732 new_ss, new_esp);
2733 }
2734#endif
2735 if ((new_ss & 0xfffc) == 0) {
2736#ifdef TARGET_X86_64
2737 /* NULL ss is allowed in long mode if cpl != 3*/
2738 /* XXX: test CS64 ? */
2739 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2740 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2741 0, 0xffffffff,
2742 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2743 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2744 DESC_W_MASK | DESC_A_MASK);
2745 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2746 } else
2747#endif
2748 {
2749 raise_exception_err(EXCP0D_GPF, 0);
2750 }
2751 } else {
2752 if ((new_ss & 3) != rpl)
2753 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2754 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2755 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2756 if (!(ss_e2 & DESC_S_MASK) ||
2757 (ss_e2 & DESC_CS_MASK) ||
2758 !(ss_e2 & DESC_W_MASK))
2759 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2760 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2761 if (dpl != rpl)
2762 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2763 if (!(ss_e2 & DESC_P_MASK))
2764 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2765 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2766 get_seg_base(ss_e1, ss_e2),
2767 get_seg_limit(ss_e1, ss_e2),
2768 ss_e2);
2769 }
2770
2771 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2772 get_seg_base(e1, e2),
2773 get_seg_limit(e1, e2),
2774 e2);
2775 cpu_x86_set_cpl(env, rpl);
2776 sp = new_esp;
2777#ifdef TARGET_X86_64
2778 if (env->hflags & HF_CS64_MASK)
2779 sp_mask = -1;
2780 else
2781#endif
2782 sp_mask = get_sp_mask(ss_e2);
2783
2784 /* validate data segments */
2785 validate_seg(R_ES, rpl);
2786 validate_seg(R_DS, rpl);
2787 validate_seg(R_FS, rpl);
2788 validate_seg(R_GS, rpl);
2789
2790 sp += addend;
2791 }
2792 SET_ESP(sp, sp_mask);
2793 env->eip = new_eip;
2794 if (is_iret) {
2795 /* NOTE: 'cpl' is the _old_ CPL */
2796 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2797 if (cpl == 0)
2798#ifdef VBOX
2799 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2800#else
2801 eflags_mask |= IOPL_MASK;
2802#endif
2803 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2804 if (cpl <= iopl)
2805 eflags_mask |= IF_MASK;
2806 if (shift == 0)
2807 eflags_mask &= 0xffff;
2808 load_eflags(new_eflags, eflags_mask);
2809 }
2810 return;
2811
2812 return_to_vm86:
2813
2814#if 0 // defined(VBOX) && defined(DEBUG)
2815 printf("V86: new CS %04X\n", new_cs);
2816 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2817 printf("V86: new EIP %08X\n", new_eip);
2818 printf("V86: new EFLAGS %08X\n", new_eflags);
2819#endif
2820
2821 POPL(ssp, sp, sp_mask, new_esp);
2822 POPL(ssp, sp, sp_mask, new_ss);
2823 POPL(ssp, sp, sp_mask, new_es);
2824 POPL(ssp, sp, sp_mask, new_ds);
2825 POPL(ssp, sp, sp_mask, new_fs);
2826 POPL(ssp, sp, sp_mask, new_gs);
2827
2828 /* modify processor state */
2829 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2830 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2831 load_seg_vm(R_CS, new_cs & 0xffff);
2832 cpu_x86_set_cpl(env, 3);
2833 load_seg_vm(R_SS, new_ss & 0xffff);
2834 load_seg_vm(R_ES, new_es & 0xffff);
2835 load_seg_vm(R_DS, new_ds & 0xffff);
2836 load_seg_vm(R_FS, new_fs & 0xffff);
2837 load_seg_vm(R_GS, new_gs & 0xffff);
2838
2839 env->eip = new_eip & 0xffff;
2840 ESP = new_esp;
2841}
2842
2843void helper_iret_protected(int shift, int next_eip)
2844{
2845 int tss_selector, type;
2846 uint32_t e1, e2;
2847
2848#ifdef VBOX
2849 remR3TrapClear(env->pVM);
2850#endif
2851
2852 /* specific case for TSS */
2853 if (env->eflags & NT_MASK) {
2854#ifdef TARGET_X86_64
2855 if (env->hflags & HF_LMA_MASK)
2856 raise_exception_err(EXCP0D_GPF, 0);
2857#endif
2858 tss_selector = lduw_kernel(env->tr.base + 0);
2859 if (tss_selector & 4)
2860 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2861 if (load_segment(&e1, &e2, tss_selector) != 0)
2862 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2863 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2864 /* NOTE: we check both segment and busy TSS */
2865 if (type != 3)
2866 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2867 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2868 } else {
2869 helper_ret_protected(shift, 1, 0);
2870 }
2871#ifdef USE_KQEMU
2872 if (kqemu_is_ok(env)) {
2873 CC_OP = CC_OP_EFLAGS;
2874 env->exception_index = -1;
2875 cpu_loop_exit();
2876 }
2877#endif
2878}
2879
2880void helper_lret_protected(int shift, int addend)
2881{
2882 helper_ret_protected(shift, 0, addend);
2883#ifdef USE_KQEMU
2884 if (kqemu_is_ok(env)) {
2885 env->exception_index = -1;
2886 cpu_loop_exit();
2887 }
2888#endif
2889}
2890
2891void helper_sysenter(void)
2892{
2893 if (env->sysenter_cs == 0) {
2894 raise_exception_err(EXCP0D_GPF, 0);
2895 }
2896 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2897 cpu_x86_set_cpl(env, 0);
2898 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2899 0, 0xffffffff,
2900 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2901 DESC_S_MASK |
2902 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2903 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2904 0, 0xffffffff,
2905 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2906 DESC_S_MASK |
2907 DESC_W_MASK | DESC_A_MASK);
2908 ESP = env->sysenter_esp;
2909 EIP = env->sysenter_eip;
2910}
2911
2912void helper_sysexit(void)
2913{
2914 int cpl;
2915
2916 cpl = env->hflags & HF_CPL_MASK;
2917 if (env->sysenter_cs == 0 || cpl != 0) {
2918 raise_exception_err(EXCP0D_GPF, 0);
2919 }
2920 cpu_x86_set_cpl(env, 3);
2921 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2922 0, 0xffffffff,
2923 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2924 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2925 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2926 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2927 0, 0xffffffff,
2928 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2929 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2930 DESC_W_MASK | DESC_A_MASK);
2931 ESP = ECX;
2932 EIP = EDX;
2933#ifdef USE_KQEMU
2934 if (kqemu_is_ok(env)) {
2935 env->exception_index = -1;
2936 cpu_loop_exit();
2937 }
2938#endif
2939}
2940
2941void helper_movl_crN_T0(int reg)
2942{
2943#if !defined(CONFIG_USER_ONLY)
2944 switch(reg) {
2945 case 0:
2946 cpu_x86_update_cr0(env, T0);
2947 break;
2948 case 3:
2949 cpu_x86_update_cr3(env, T0);
2950 break;
2951 case 4:
2952 cpu_x86_update_cr4(env, T0);
2953 break;
2954 case 8:
2955 cpu_set_apic_tpr(env, T0);
2956 break;
2957 default:
2958 env->cr[reg] = T0;
2959 break;
2960 }
2961#endif
2962}
2963
2964/* XXX: do more */
2965void helper_movl_drN_T0(int reg)
2966{
2967 env->dr[reg] = T0;
2968}
2969
2970void helper_invlpg(target_ulong addr)
2971{
2972 cpu_x86_flush_tlb(env, addr);
2973}
2974
2975void helper_rdtsc(void)
2976{
2977 uint64_t val;
2978
2979 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2980 raise_exception(EXCP0D_GPF);
2981 }
2982 val = cpu_get_tsc(env);
2983 EAX = (uint32_t)(val);
2984 EDX = (uint32_t)(val >> 32);
2985}
2986
2987#if defined(CONFIG_USER_ONLY)
2988void helper_wrmsr(void)
2989{
2990}
2991
2992void helper_rdmsr(void)
2993{
2994}
2995#else
2996void helper_wrmsr(void)
2997{
2998 uint64_t val;
2999
3000 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3001
3002 switch((uint32_t)ECX) {
3003 case MSR_IA32_SYSENTER_CS:
3004 env->sysenter_cs = val & 0xffff;
3005 break;
3006 case MSR_IA32_SYSENTER_ESP:
3007 env->sysenter_esp = val;
3008 break;
3009 case MSR_IA32_SYSENTER_EIP:
3010 env->sysenter_eip = val;
3011 break;
3012 case MSR_IA32_APICBASE:
3013 cpu_set_apic_base(env, val);
3014 break;
3015 case MSR_EFER:
3016 {
3017 uint64_t update_mask;
3018 update_mask = 0;
3019 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3020 update_mask |= MSR_EFER_SCE;
3021 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3022 update_mask |= MSR_EFER_LME;
3023 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3024 update_mask |= MSR_EFER_FFXSR;
3025 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3026 update_mask |= MSR_EFER_NXE;
3027 env->efer = (env->efer & ~update_mask) |
3028 (val & update_mask);
3029 }
3030 break;
3031 case MSR_STAR:
3032 env->star = val;
3033 break;
3034 case MSR_PAT:
3035 env->pat = val;
3036 break;
3037#ifdef TARGET_X86_64
3038 case MSR_LSTAR:
3039 env->lstar = val;
3040 break;
3041 case MSR_CSTAR:
3042 env->cstar = val;
3043 break;
3044 case MSR_FMASK:
3045 env->fmask = val;
3046 break;
3047 case MSR_FSBASE:
3048 env->segs[R_FS].base = val;
3049 break;
3050 case MSR_GSBASE:
3051 env->segs[R_GS].base = val;
3052 break;
3053 case MSR_KERNELGSBASE:
3054 env->kernelgsbase = val;
3055 break;
3056#endif
3057 default:
3058 /* XXX: exception ? */
3059 break;
3060 }
3061}
3062
3063void helper_rdmsr(void)
3064{
3065 uint64_t val;
3066 switch((uint32_t)ECX) {
3067 case MSR_IA32_SYSENTER_CS:
3068 val = env->sysenter_cs;
3069 break;
3070 case MSR_IA32_SYSENTER_ESP:
3071 val = env->sysenter_esp;
3072 break;
3073 case MSR_IA32_SYSENTER_EIP:
3074 val = env->sysenter_eip;
3075 break;
3076 case MSR_IA32_APICBASE:
3077 val = cpu_get_apic_base(env);
3078 break;
3079 case MSR_EFER:
3080 val = env->efer;
3081 break;
3082 case MSR_STAR:
3083 val = env->star;
3084 break;
3085 case MSR_PAT:
3086 val = env->pat;
3087 break;
3088#ifdef TARGET_X86_64
3089 case MSR_LSTAR:
3090 val = env->lstar;
3091 break;
3092 case MSR_CSTAR:
3093 val = env->cstar;
3094 break;
3095 case MSR_FMASK:
3096 val = env->fmask;
3097 break;
3098 case MSR_FSBASE:
3099 val = env->segs[R_FS].base;
3100 break;
3101 case MSR_GSBASE:
3102 val = env->segs[R_GS].base;
3103 break;
3104 case MSR_KERNELGSBASE:
3105 val = env->kernelgsbase;
3106 break;
3107#endif
3108 default:
3109 /* XXX: exception ? */
3110 val = 0;
3111 break;
3112 }
3113 EAX = (uint32_t)(val);
3114 EDX = (uint32_t)(val >> 32);
3115}
3116#endif
3117
3118void helper_lsl(void)
3119{
3120 unsigned int selector, limit;
3121 uint32_t e1, e2, eflags;
3122 int rpl, dpl, cpl, type;
3123
3124 eflags = cc_table[CC_OP].compute_all();
3125 selector = T0 & 0xffff;
3126 if (load_segment(&e1, &e2, selector) != 0)
3127 goto fail;
3128 rpl = selector & 3;
3129 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3130 cpl = env->hflags & HF_CPL_MASK;
3131 if (e2 & DESC_S_MASK) {
3132 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3133 /* conforming */
3134 } else {
3135 if (dpl < cpl || dpl < rpl)
3136 goto fail;
3137 }
3138 } else {
3139 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3140 switch(type) {
3141 case 1:
3142 case 2:
3143 case 3:
3144 case 9:
3145 case 11:
3146 break;
3147 default:
3148 goto fail;
3149 }
3150 if (dpl < cpl || dpl < rpl) {
3151 fail:
3152 CC_SRC = eflags & ~CC_Z;
3153 return;
3154 }
3155 }
3156 limit = get_seg_limit(e1, e2);
3157 T1 = limit;
3158 CC_SRC = eflags | CC_Z;
3159}
3160
3161void helper_lar(void)
3162{
3163 unsigned int selector;
3164 uint32_t e1, e2, eflags;
3165 int rpl, dpl, cpl, type;
3166
3167 eflags = cc_table[CC_OP].compute_all();
3168 selector = T0 & 0xffff;
3169 if ((selector & 0xfffc) == 0)
3170 goto fail;
3171 if (load_segment(&e1, &e2, selector) != 0)
3172 goto fail;
3173 rpl = selector & 3;
3174 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3175 cpl = env->hflags & HF_CPL_MASK;
3176 if (e2 & DESC_S_MASK) {
3177 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3178 /* conforming */
3179 } else {
3180 if (dpl < cpl || dpl < rpl)
3181 goto fail;
3182 }
3183 } else {
3184 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3185 switch(type) {
3186 case 1:
3187 case 2:
3188 case 3:
3189 case 4:
3190 case 5:
3191 case 9:
3192 case 11:
3193 case 12:
3194 break;
3195 default:
3196 goto fail;
3197 }
3198 if (dpl < cpl || dpl < rpl) {
3199 fail:
3200 CC_SRC = eflags & ~CC_Z;
3201 return;
3202 }
3203 }
3204 T1 = e2 & 0x00f0ff00;
3205 CC_SRC = eflags | CC_Z;
3206}
3207
3208void helper_verr(void)
3209{
3210 unsigned int selector;
3211 uint32_t e1, e2, eflags;
3212 int rpl, dpl, cpl;
3213
3214 eflags = cc_table[CC_OP].compute_all();
3215 selector = T0 & 0xffff;
3216 if ((selector & 0xfffc) == 0)
3217 goto fail;
3218 if (load_segment(&e1, &e2, selector) != 0)
3219 goto fail;
3220 if (!(e2 & DESC_S_MASK))
3221 goto fail;
3222 rpl = selector & 3;
3223 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3224 cpl = env->hflags & HF_CPL_MASK;
3225 if (e2 & DESC_CS_MASK) {
3226 if (!(e2 & DESC_R_MASK))
3227 goto fail;
3228 if (!(e2 & DESC_C_MASK)) {
3229 if (dpl < cpl || dpl < rpl)
3230 goto fail;
3231 }
3232 } else {
3233 if (dpl < cpl || dpl < rpl) {
3234 fail:
3235 CC_SRC = eflags & ~CC_Z;
3236 return;
3237 }
3238 }
3239 CC_SRC = eflags | CC_Z;
3240}
3241
3242void helper_verw(void)
3243{
3244 unsigned int selector;
3245 uint32_t e1, e2, eflags;
3246 int rpl, dpl, cpl;
3247
3248 eflags = cc_table[CC_OP].compute_all();
3249 selector = T0 & 0xffff;
3250 if ((selector & 0xfffc) == 0)
3251 goto fail;
3252 if (load_segment(&e1, &e2, selector) != 0)
3253 goto fail;
3254 if (!(e2 & DESC_S_MASK))
3255 goto fail;
3256 rpl = selector & 3;
3257 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3258 cpl = env->hflags & HF_CPL_MASK;
3259 if (e2 & DESC_CS_MASK) {
3260 goto fail;
3261 } else {
3262 if (dpl < cpl || dpl < rpl)
3263 goto fail;
3264 if (!(e2 & DESC_W_MASK)) {
3265 fail:
3266 CC_SRC = eflags & ~CC_Z;
3267 return;
3268 }
3269 }
3270 CC_SRC = eflags | CC_Z;
3271}
3272
3273/* FPU helpers */
3274
3275void helper_fldt_ST0_A0(void)
3276{
3277 int new_fpstt;
3278 new_fpstt = (env->fpstt - 1) & 7;
3279 env->fpregs[new_fpstt].d = helper_fldt(A0);
3280 env->fpstt = new_fpstt;
3281 env->fptags[new_fpstt] = 0; /* validate stack entry */
3282}
3283
3284void helper_fstt_ST0_A0(void)
3285{
3286 helper_fstt(ST0, A0);
3287}
3288
3289void fpu_set_exception(int mask)
3290{
3291 env->fpus |= mask;
3292 if (env->fpus & (~env->fpuc & FPUC_EM))
3293 env->fpus |= FPUS_SE | FPUS_B;
3294}
3295
3296CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3297{
3298 if (b == 0.0)
3299 fpu_set_exception(FPUS_ZE);
3300 return a / b;
3301}
3302
3303void fpu_raise_exception(void)
3304{
3305 if (env->cr[0] & CR0_NE_MASK) {
3306 raise_exception(EXCP10_COPR);
3307 }
3308#if !defined(CONFIG_USER_ONLY)
3309 else {
3310 cpu_set_ferr(env);
3311 }
3312#endif
3313}
3314
3315/* BCD ops */
3316
3317void helper_fbld_ST0_A0(void)
3318{
3319 CPU86_LDouble tmp;
3320 uint64_t val;
3321 unsigned int v;
3322 int i;
3323
3324 val = 0;
3325 for(i = 8; i >= 0; i--) {
3326 v = ldub(A0 + i);
3327 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3328 }
3329 tmp = val;
3330 if (ldub(A0 + 9) & 0x80)
3331 tmp = -tmp;
3332 fpush();
3333 ST0 = tmp;
3334}
3335
3336void helper_fbst_ST0_A0(void)
3337{
3338 int v;
3339 target_ulong mem_ref, mem_end;
3340 int64_t val;
3341
3342 val = floatx_to_int64(ST0, &env->fp_status);
3343 mem_ref = A0;
3344 mem_end = mem_ref + 9;
3345 if (val < 0) {
3346 stb(mem_end, 0x80);
3347 val = -val;
3348 } else {
3349 stb(mem_end, 0x00);
3350 }
3351 while (mem_ref < mem_end) {
3352 if (val == 0)
3353 break;
3354 v = val % 100;
3355 val = val / 100;
3356 v = ((v / 10) << 4) | (v % 10);
3357 stb(mem_ref++, v);
3358 }
3359 while (mem_ref < mem_end) {
3360 stb(mem_ref++, 0);
3361 }
3362}
3363
3364void helper_f2xm1(void)
3365{
3366 ST0 = pow(2.0,ST0) - 1.0;
3367}
3368
3369void helper_fyl2x(void)
3370{
3371 CPU86_LDouble fptemp;
3372
3373 fptemp = ST0;
3374 if (fptemp>0.0){
3375 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3376 ST1 *= fptemp;
3377 fpop();
3378 } else {
3379 env->fpus &= (~0x4700);
3380 env->fpus |= 0x400;
3381 }
3382}
3383
3384void helper_fptan(void)
3385{
3386 CPU86_LDouble fptemp;
3387
3388 fptemp = ST0;
3389 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3390 env->fpus |= 0x400;
3391 } else {
3392 ST0 = tan(fptemp);
3393 fpush();
3394 ST0 = 1.0;
3395 env->fpus &= (~0x400); /* C2 <-- 0 */
3396 /* the above code is for |arg| < 2**52 only */
3397 }
3398}
3399
3400void helper_fpatan(void)
3401{
3402 CPU86_LDouble fptemp, fpsrcop;
3403
3404 fpsrcop = ST1;
3405 fptemp = ST0;
3406 ST1 = atan2(fpsrcop,fptemp);
3407 fpop();
3408}
3409
3410void helper_fxtract(void)
3411{
3412 CPU86_LDoubleU temp;
3413 unsigned int expdif;
3414
3415 temp.d = ST0;
3416 expdif = EXPD(temp) - EXPBIAS;
3417 /*DP exponent bias*/
3418 ST0 = expdif;
3419 fpush();
3420 BIASEXPONENT(temp);
3421 ST0 = temp.d;
3422}
3423
3424void helper_fprem1(void)
3425{
3426 CPU86_LDouble dblq, fpsrcop, fptemp;
3427 CPU86_LDoubleU fpsrcop1, fptemp1;
3428 int expdif;
3429 int q;
3430
3431 fpsrcop = ST0;
3432 fptemp = ST1;
3433 fpsrcop1.d = fpsrcop;
3434 fptemp1.d = fptemp;
3435 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3436 if (expdif < 53) {
3437 dblq = fpsrcop / fptemp;
3438 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3439 ST0 = fpsrcop - fptemp*dblq;
3440 q = (int)dblq; /* cutting off top bits is assumed here */
3441 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3442 /* (C0,C1,C3) <-- (q2,q1,q0) */
3443 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3444 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3445 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3446 } else {
3447 env->fpus |= 0x400; /* C2 <-- 1 */
3448 fptemp = pow(2.0, expdif-50);
3449 fpsrcop = (ST0 / ST1) / fptemp;
3450 /* fpsrcop = integer obtained by rounding to the nearest */
3451 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3452 floor(fpsrcop): ceil(fpsrcop);
3453 ST0 -= (ST1 * fpsrcop * fptemp);
3454 }
3455}
3456
3457void helper_fprem(void)
3458{
3459 CPU86_LDouble dblq, fpsrcop, fptemp;
3460 CPU86_LDoubleU fpsrcop1, fptemp1;
3461 int expdif;
3462 int q;
3463
3464 fpsrcop = ST0;
3465 fptemp = ST1;
3466 fpsrcop1.d = fpsrcop;
3467 fptemp1.d = fptemp;
3468 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3469 if ( expdif < 53 ) {
3470 dblq = fpsrcop / fptemp;
3471 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3472 ST0 = fpsrcop - fptemp*dblq;
3473 q = (int)dblq; /* cutting off top bits is assumed here */
3474 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3475 /* (C0,C1,C3) <-- (q2,q1,q0) */
3476 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3477 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3478 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3479 } else {
3480 env->fpus |= 0x400; /* C2 <-- 1 */
3481 fptemp = pow(2.0, expdif-50);
3482 fpsrcop = (ST0 / ST1) / fptemp;
3483 /* fpsrcop = integer obtained by chopping */
3484 fpsrcop = (fpsrcop < 0.0)?
3485 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3486 ST0 -= (ST1 * fpsrcop * fptemp);
3487 }
3488}
3489
3490void helper_fyl2xp1(void)
3491{
3492 CPU86_LDouble fptemp;
3493
3494 fptemp = ST0;
3495 if ((fptemp+1.0)>0.0) {
3496 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3497 ST1 *= fptemp;
3498 fpop();
3499 } else {
3500 env->fpus &= (~0x4700);
3501 env->fpus |= 0x400;
3502 }
3503}
3504
3505void helper_fsqrt(void)
3506{
3507 CPU86_LDouble fptemp;
3508
3509 fptemp = ST0;
3510 if (fptemp<0.0) {
3511 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3512 env->fpus |= 0x400;
3513 }
3514 ST0 = sqrt(fptemp);
3515}
3516
3517void helper_fsincos(void)
3518{
3519 CPU86_LDouble fptemp;
3520
3521 fptemp = ST0;
3522 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3523 env->fpus |= 0x400;
3524 } else {
3525 ST0 = sin(fptemp);
3526 fpush();
3527 ST0 = cos(fptemp);
3528 env->fpus &= (~0x400); /* C2 <-- 0 */
3529 /* the above code is for |arg| < 2**63 only */
3530 }
3531}
3532
3533void helper_frndint(void)
3534{
3535 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3536}
3537
3538void helper_fscale(void)
3539{
3540 ST0 = ldexp (ST0, (int)(ST1));
3541}
3542
3543void helper_fsin(void)
3544{
3545 CPU86_LDouble fptemp;
3546
3547 fptemp = ST0;
3548 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3549 env->fpus |= 0x400;
3550 } else {
3551 ST0 = sin(fptemp);
3552 env->fpus &= (~0x400); /* C2 <-- 0 */
3553 /* the above code is for |arg| < 2**53 only */
3554 }
3555}
3556
3557void helper_fcos(void)
3558{
3559 CPU86_LDouble fptemp;
3560
3561 fptemp = ST0;
3562 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3563 env->fpus |= 0x400;
3564 } else {
3565 ST0 = cos(fptemp);
3566 env->fpus &= (~0x400); /* C2 <-- 0 */
3567 /* the above code is for |arg5 < 2**63 only */
3568 }
3569}
3570
3571void helper_fxam_ST0(void)
3572{
3573 CPU86_LDoubleU temp;
3574 int expdif;
3575
3576 temp.d = ST0;
3577
3578 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3579 if (SIGND(temp))
3580 env->fpus |= 0x200; /* C1 <-- 1 */
3581
3582 /* XXX: test fptags too */
3583 expdif = EXPD(temp);
3584 if (expdif == MAXEXPD) {
3585#ifdef USE_X86LDOUBLE
3586 if (MANTD(temp) == 0x8000000000000000ULL)
3587#else
3588 if (MANTD(temp) == 0)
3589#endif
3590 env->fpus |= 0x500 /*Infinity*/;
3591 else
3592 env->fpus |= 0x100 /*NaN*/;
3593 } else if (expdif == 0) {
3594 if (MANTD(temp) == 0)
3595 env->fpus |= 0x4000 /*Zero*/;
3596 else
3597 env->fpus |= 0x4400 /*Denormal*/;
3598 } else {
3599 env->fpus |= 0x400;
3600 }
3601}
3602
3603void helper_fstenv(target_ulong ptr, int data32)
3604{
3605 int fpus, fptag, exp, i;
3606 uint64_t mant;
3607 CPU86_LDoubleU tmp;
3608
3609 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3610 fptag = 0;
3611 for (i=7; i>=0; i--) {
3612 fptag <<= 2;
3613 if (env->fptags[i]) {
3614 fptag |= 3;
3615 } else {
3616 tmp.d = env->fpregs[i].d;
3617 exp = EXPD(tmp);
3618 mant = MANTD(tmp);
3619 if (exp == 0 && mant == 0) {
3620 /* zero */
3621 fptag |= 1;
3622 } else if (exp == 0 || exp == MAXEXPD
3623#ifdef USE_X86LDOUBLE
3624 || (mant & (1LL << 63)) == 0
3625#endif
3626 ) {
3627 /* NaNs, infinity, denormal */
3628 fptag |= 2;
3629 }
3630 }
3631 }
3632 if (data32) {
3633 /* 32 bit */
3634 stl(ptr, env->fpuc);
3635 stl(ptr + 4, fpus);
3636 stl(ptr + 8, fptag);
3637 stl(ptr + 12, 0); /* fpip */
3638 stl(ptr + 16, 0); /* fpcs */
3639 stl(ptr + 20, 0); /* fpoo */
3640 stl(ptr + 24, 0); /* fpos */
3641 } else {
3642 /* 16 bit */
3643 stw(ptr, env->fpuc);
3644 stw(ptr + 2, fpus);
3645 stw(ptr + 4, fptag);
3646 stw(ptr + 6, 0);
3647 stw(ptr + 8, 0);
3648 stw(ptr + 10, 0);
3649 stw(ptr + 12, 0);
3650 }
3651}
3652
3653void helper_fldenv(target_ulong ptr, int data32)
3654{
3655 int i, fpus, fptag;
3656
3657 if (data32) {
3658 env->fpuc = lduw(ptr);
3659 fpus = lduw(ptr + 4);
3660 fptag = lduw(ptr + 8);
3661 }
3662 else {
3663 env->fpuc = lduw(ptr);
3664 fpus = lduw(ptr + 2);
3665 fptag = lduw(ptr + 4);
3666 }
3667 env->fpstt = (fpus >> 11) & 7;
3668 env->fpus = fpus & ~0x3800;
3669 for(i = 0;i < 8; i++) {
3670 env->fptags[i] = ((fptag & 3) == 3);
3671 fptag >>= 2;
3672 }
3673}
3674
3675void helper_fsave(target_ulong ptr, int data32)
3676{
3677 CPU86_LDouble tmp;
3678 int i;
3679
3680 helper_fstenv(ptr, data32);
3681
3682 ptr += (14 << data32);
3683 for(i = 0;i < 8; i++) {
3684 tmp = ST(i);
3685 helper_fstt(tmp, ptr);
3686 ptr += 10;
3687 }
3688
3689 /* fninit */
3690 env->fpus = 0;
3691 env->fpstt = 0;
3692 env->fpuc = 0x37f;
3693 env->fptags[0] = 1;
3694 env->fptags[1] = 1;
3695 env->fptags[2] = 1;
3696 env->fptags[3] = 1;
3697 env->fptags[4] = 1;
3698 env->fptags[5] = 1;
3699 env->fptags[6] = 1;
3700 env->fptags[7] = 1;
3701}
3702
3703void helper_frstor(target_ulong ptr, int data32)
3704{
3705 CPU86_LDouble tmp;
3706 int i;
3707
3708 helper_fldenv(ptr, data32);
3709 ptr += (14 << data32);
3710
3711 for(i = 0;i < 8; i++) {
3712 tmp = helper_fldt(ptr);
3713 ST(i) = tmp;
3714 ptr += 10;
3715 }
3716}
3717
3718void helper_fxsave(target_ulong ptr, int data64)
3719{
3720 int fpus, fptag, i, nb_xmm_regs;
3721 CPU86_LDouble tmp;
3722 target_ulong addr;
3723
3724 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3725 fptag = 0;
3726 for(i = 0; i < 8; i++) {
3727 fptag |= (env->fptags[i] << i);
3728 }
3729 stw(ptr, env->fpuc);
3730 stw(ptr + 2, fpus);
3731 stw(ptr + 4, fptag ^ 0xff);
3732
3733 addr = ptr + 0x20;
3734 for(i = 0;i < 8; i++) {
3735 tmp = ST(i);
3736 helper_fstt(tmp, addr);
3737 addr += 16;
3738 }
3739
3740 if (env->cr[4] & CR4_OSFXSR_MASK) {
3741 /* XXX: finish it */
3742 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3743 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3744 nb_xmm_regs = 8 << data64;
3745 addr = ptr + 0xa0;
3746 for(i = 0; i < nb_xmm_regs; i++) {
3747 stq(addr, env->xmm_regs[i].XMM_Q(0));
3748 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3749 addr += 16;
3750 }
3751 }
3752}
3753
3754void helper_fxrstor(target_ulong ptr, int data64)
3755{
3756 int i, fpus, fptag, nb_xmm_regs;
3757 CPU86_LDouble tmp;
3758 target_ulong addr;
3759
3760 env->fpuc = lduw(ptr);
3761 fpus = lduw(ptr + 2);
3762 fptag = lduw(ptr + 4);
3763 env->fpstt = (fpus >> 11) & 7;
3764 env->fpus = fpus & ~0x3800;
3765 fptag ^= 0xff;
3766 for(i = 0;i < 8; i++) {
3767 env->fptags[i] = ((fptag >> i) & 1);
3768 }
3769
3770 addr = ptr + 0x20;
3771 for(i = 0;i < 8; i++) {
3772 tmp = helper_fldt(addr);
3773 ST(i) = tmp;
3774 addr += 16;
3775 }
3776
3777 if (env->cr[4] & CR4_OSFXSR_MASK) {
3778 /* XXX: finish it */
3779 env->mxcsr = ldl(ptr + 0x18);
3780 //ldl(ptr + 0x1c);
3781 nb_xmm_regs = 8 << data64;
3782 addr = ptr + 0xa0;
3783 for(i = 0; i < nb_xmm_regs; i++) {
3784#if !defined(VBOX) || __GNUC__ < 4
3785 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3786 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3787#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3788# if 1
3789 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3790 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3791 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3792 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3793# else
3794 /* this works fine on Mac OS X, gcc 4.0.1 */
3795 uint64_t u64 = ldq(addr);
3796 env->xmm_regs[i].XMM_Q(0);
3797 u64 = ldq(addr + 4);
3798 env->xmm_regs[i].XMM_Q(1) = u64;
3799# endif
3800#endif
3801 addr += 16;
3802 }
3803 }
3804}
3805
3806#ifndef USE_X86LDOUBLE
3807
3808void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3809{
3810 CPU86_LDoubleU temp;
3811 int e;
3812
3813 temp.d = f;
3814 /* mantissa */
3815 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3816 /* exponent + sign */
3817 e = EXPD(temp) - EXPBIAS + 16383;
3818 e |= SIGND(temp) >> 16;
3819 *pexp = e;
3820}
3821
3822CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3823{
3824 CPU86_LDoubleU temp;
3825 int e;
3826 uint64_t ll;
3827
3828 /* XXX: handle overflow ? */
3829 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3830 e |= (upper >> 4) & 0x800; /* sign */
3831 ll = (mant >> 11) & ((1LL << 52) - 1);
3832#ifdef __arm__
3833 temp.l.upper = (e << 20) | (ll >> 32);
3834 temp.l.lower = ll;
3835#else
3836 temp.ll = ll | ((uint64_t)e << 52);
3837#endif
3838 return temp.d;
3839}
3840
3841#else
3842
3843void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3844{
3845 CPU86_LDoubleU temp;
3846
3847 temp.d = f;
3848 *pmant = temp.l.lower;
3849 *pexp = temp.l.upper;
3850}
3851
3852CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3853{
3854 CPU86_LDoubleU temp;
3855
3856 temp.l.upper = upper;
3857 temp.l.lower = mant;
3858 return temp.d;
3859}
3860#endif
3861
3862#ifdef TARGET_X86_64
3863
3864//#define DEBUG_MULDIV
3865
3866static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3867{
3868 *plow += a;
3869 /* carry test */
3870 if (*plow < a)
3871 (*phigh)++;
3872 *phigh += b;
3873}
3874
3875static void neg128(uint64_t *plow, uint64_t *phigh)
3876{
3877 *plow = ~ *plow;
3878 *phigh = ~ *phigh;
3879 add128(plow, phigh, 1, 0);
3880}
3881
3882static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3883{
3884 uint32_t a0, a1, b0, b1;
3885 uint64_t v;
3886
3887 a0 = a;
3888 a1 = a >> 32;
3889
3890 b0 = b;
3891 b1 = b >> 32;
3892
3893 v = (uint64_t)a0 * (uint64_t)b0;
3894 *plow = v;
3895 *phigh = 0;
3896
3897 v = (uint64_t)a0 * (uint64_t)b1;
3898 add128(plow, phigh, v << 32, v >> 32);
3899
3900 v = (uint64_t)a1 * (uint64_t)b0;
3901 add128(plow, phigh, v << 32, v >> 32);
3902
3903 v = (uint64_t)a1 * (uint64_t)b1;
3904 *phigh += v;
3905#ifdef DEBUG_MULDIV
3906 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3907 a, b, *phigh, *plow);
3908#endif
3909}
3910
3911static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3912{
3913 int sa, sb;
3914 sa = (a < 0);
3915 if (sa)
3916 a = -a;
3917 sb = (b < 0);
3918 if (sb)
3919 b = -b;
3920 mul64(plow, phigh, a, b);
3921 if (sa ^ sb) {
3922 neg128(plow, phigh);
3923 }
3924}
3925
3926/* return TRUE if overflow */
3927static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3928{
3929 uint64_t q, r, a1, a0;
3930 int i, qb, ab;
3931
3932 a0 = *plow;
3933 a1 = *phigh;
3934 if (a1 == 0) {
3935 q = a0 / b;
3936 r = a0 % b;
3937 *plow = q;
3938 *phigh = r;
3939 } else {
3940 if (a1 >= b)
3941 return 1;
3942 /* XXX: use a better algorithm */
3943 for(i = 0; i < 64; i++) {
3944 ab = a1 >> 63;
3945 a1 = (a1 << 1) | (a0 >> 63);
3946 if (ab || a1 >= b) {
3947 a1 -= b;
3948 qb = 1;
3949 } else {
3950 qb = 0;
3951 }
3952 a0 = (a0 << 1) | qb;
3953 }
3954#if defined(DEBUG_MULDIV)
3955 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3956 *phigh, *plow, b, a0, a1);
3957#endif
3958 *plow = a0;
3959 *phigh = a1;
3960 }
3961 return 0;
3962}
3963
3964/* return TRUE if overflow */
3965static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3966{
3967 int sa, sb;
3968 sa = ((int64_t)*phigh < 0);
3969 if (sa)
3970 neg128(plow, phigh);
3971 sb = (b < 0);
3972 if (sb)
3973 b = -b;
3974 if (div64(plow, phigh, b) != 0)
3975 return 1;
3976 if (sa ^ sb) {
3977 if (*plow > (1ULL << 63))
3978 return 1;
3979 *plow = - *plow;
3980 } else {
3981 if (*plow >= (1ULL << 63))
3982 return 1;
3983 }
3984 if (sa)
3985 *phigh = - *phigh;
3986 return 0;
3987}
3988
3989void helper_mulq_EAX_T0(void)
3990{
3991 uint64_t r0, r1;
3992
3993 mul64(&r0, &r1, EAX, T0);
3994 EAX = r0;
3995 EDX = r1;
3996 CC_DST = r0;
3997 CC_SRC = r1;
3998}
3999
4000void helper_imulq_EAX_T0(void)
4001{
4002 uint64_t r0, r1;
4003
4004 imul64(&r0, &r1, EAX, T0);
4005 EAX = r0;
4006 EDX = r1;
4007 CC_DST = r0;
4008 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4009}
4010
4011void helper_imulq_T0_T1(void)
4012{
4013 uint64_t r0, r1;
4014
4015 imul64(&r0, &r1, T0, T1);
4016 T0 = r0;
4017 CC_DST = r0;
4018 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4019}
4020
4021void helper_divq_EAX_T0(void)
4022{
4023 uint64_t r0, r1;
4024 if (T0 == 0) {
4025 raise_exception(EXCP00_DIVZ);
4026 }
4027 r0 = EAX;
4028 r1 = EDX;
4029 if (div64(&r0, &r1, T0))
4030 raise_exception(EXCP00_DIVZ);
4031 EAX = r0;
4032 EDX = r1;
4033}
4034
4035void helper_idivq_EAX_T0(void)
4036{
4037 uint64_t r0, r1;
4038 if (T0 == 0) {
4039 raise_exception(EXCP00_DIVZ);
4040 }
4041 r0 = EAX;
4042 r1 = EDX;
4043 if (idiv64(&r0, &r1, T0))
4044 raise_exception(EXCP00_DIVZ);
4045 EAX = r0;
4046 EDX = r1;
4047}
4048
4049void helper_bswapq_T0(void)
4050{
4051 T0 = bswap64(T0);
4052}
4053#endif
4054
4055void helper_hlt(void)
4056{
4057 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4058 env->hflags |= HF_HALTED_MASK;
4059 env->exception_index = EXCP_HLT;
4060 cpu_loop_exit();
4061}
4062
4063void helper_monitor(void)
4064{
4065 if ((uint32_t)ECX != 0)
4066 raise_exception(EXCP0D_GPF);
4067 /* XXX: store address ? */
4068}
4069
4070void helper_mwait(void)
4071{
4072 if ((uint32_t)ECX != 0)
4073 raise_exception(EXCP0D_GPF);
4074#ifdef VBOX
4075 helper_hlt();
4076#else
4077 /* XXX: not complete but not completely erroneous */
4078 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4079 /* more than one CPU: do not sleep because another CPU may
4080 wake this one */
4081 } else {
4082 helper_hlt();
4083 }
4084#endif
4085}
4086
4087float approx_rsqrt(float a)
4088{
4089 return 1.0 / sqrt(a);
4090}
4091
4092float approx_rcp(float a)
4093{
4094 return 1.0 / a;
4095}
4096
4097void update_fp_status(void)
4098{
4099 int rnd_type;
4100
4101 /* set rounding mode */
4102 switch(env->fpuc & RC_MASK) {
4103 default:
4104 case RC_NEAR:
4105 rnd_type = float_round_nearest_even;
4106 break;
4107 case RC_DOWN:
4108 rnd_type = float_round_down;
4109 break;
4110 case RC_UP:
4111 rnd_type = float_round_up;
4112 break;
4113 case RC_CHOP:
4114 rnd_type = float_round_to_zero;
4115 break;
4116 }
4117 set_float_rounding_mode(rnd_type, &env->fp_status);
4118#ifdef FLOATX80
4119 switch((env->fpuc >> 8) & 3) {
4120 case 0:
4121 rnd_type = 32;
4122 break;
4123 case 2:
4124 rnd_type = 64;
4125 break;
4126 case 3:
4127 default:
4128 rnd_type = 80;
4129 break;
4130 }
4131 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4132#endif
4133}
4134
4135#if !defined(CONFIG_USER_ONLY)
4136
4137#define MMUSUFFIX _mmu
4138#define GETPC() (__builtin_return_address(0))
4139
4140#define SHIFT 0
4141#include "softmmu_template.h"
4142
4143#define SHIFT 1
4144#include "softmmu_template.h"
4145
4146#define SHIFT 2
4147#include "softmmu_template.h"
4148
4149#define SHIFT 3
4150#include "softmmu_template.h"
4151
4152#endif
4153
4154/* try to fill the TLB and return an exception if error. If retaddr is
4155 NULL, it means that the function was called in C code (i.e. not
4156 from generated code or from helper.c) */
4157/* XXX: fix it to restore all registers */
4158void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4159{
4160 TranslationBlock *tb;
4161 int ret;
4162 unsigned long pc;
4163 CPUX86State *saved_env;
4164
4165 /* XXX: hack to restore env in all cases, even if not called from
4166 generated code */
4167 saved_env = env;
4168 env = cpu_single_env;
4169
4170 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4171 if (ret) {
4172 if (retaddr) {
4173 /* now we have a real cpu fault */
4174 pc = (unsigned long)retaddr;
4175 tb = tb_find_pc(pc);
4176 if (tb) {
4177 /* the PC is inside the translated code. It means that we have
4178 a virtual CPU fault */
4179 cpu_restore_state(tb, env, pc, NULL);
4180 }
4181 }
4182 if (retaddr)
4183 raise_exception_err(env->exception_index, env->error_code);
4184 else
4185 raise_exception_err_norestore(env->exception_index, env->error_code);
4186 }
4187 env = saved_env;
4188}
4189
4190#ifdef VBOX
4191
4192/**
4193 * Correctly computes the eflags.
4194 * @returns eflags.
4195 * @param env1 CPU environment.
4196 */
4197uint32_t raw_compute_eflags(CPUX86State *env1)
4198{
4199 CPUX86State *savedenv = env;
4200 env = env1;
4201 uint32_t efl = compute_eflags();
4202 env = savedenv;
4203 return efl;
4204}
4205
4206/**
4207 * Reads byte from virtual address in guest memory area.
4208 * XXX: is it working for any addresses? swapped out pages?
4209 * @returns readed data byte.
4210 * @param env1 CPU environment.
4211 * @param pvAddr GC Virtual address.
4212 */
4213uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4214{
4215 CPUX86State *savedenv = env;
4216 env = env1;
4217 uint8_t u8 = ldub_kernel(addr);
4218 env = savedenv;
4219 return u8;
4220}
4221
4222/**
4223 * Reads byte from virtual address in guest memory area.
4224 * XXX: is it working for any addresses? swapped out pages?
4225 * @returns readed data byte.
4226 * @param env1 CPU environment.
4227 * @param pvAddr GC Virtual address.
4228 */
4229uint16_t read_word(CPUX86State *env1, target_ulong addr)
4230{
4231 CPUX86State *savedenv = env;
4232 env = env1;
4233 uint16_t u16 = lduw_kernel(addr);
4234 env = savedenv;
4235 return u16;
4236}
4237
4238/**
4239 * Reads byte from virtual address in guest memory area.
4240 * XXX: is it working for any addresses? swapped out pages?
4241 * @returns readed data byte.
4242 * @param env1 CPU environment.
4243 * @param pvAddr GC Virtual address.
4244 */
4245uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4246{
4247 CPUX86State *savedenv = env;
4248 env = env1;
4249 uint32_t u32 = ldl_kernel(addr);
4250 env = savedenv;
4251 return u32;
4252}
4253
4254/**
4255 * Writes byte to virtual address in guest memory area.
4256 * XXX: is it working for any addresses? swapped out pages?
4257 * @returns readed data byte.
4258 * @param env1 CPU environment.
4259 * @param pvAddr GC Virtual address.
4260 * @param val byte value
4261 */
4262void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4263{
4264 CPUX86State *savedenv = env;
4265 env = env1;
4266 stb(addr, val);
4267 env = savedenv;
4268}
4269
4270void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4271{
4272 CPUX86State *savedenv = env;
4273 env = env1;
4274 stw(addr, val);
4275 env = savedenv;
4276}
4277
4278void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4279{
4280 CPUX86State *savedenv = env;
4281 env = env1;
4282 stl(addr, val);
4283 env = savedenv;
4284}
4285
4286/**
4287 * Correctly loads selector into segment register with updating internal
4288 * qemu data/caches.
4289 * @param env1 CPU environment.
4290 * @param seg_reg Segment register.
4291 * @param selector Selector to load.
4292 */
4293void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4294{
4295 CPUX86State *savedenv = env;
4296 env = env1;
4297
4298 if ( env->eflags & X86_EFL_VM
4299 || !(env->cr[0] & X86_CR0_PE))
4300 {
4301 load_seg_vm(seg_reg, selector);
4302
4303 env = savedenv;
4304
4305 /* Successful sync. */
4306 env1->segs[seg_reg].newselector = 0;
4307 }
4308 else
4309 {
4310 if (setjmp(env1->jmp_env) == 0)
4311 {
4312 if (seg_reg == R_CS)
4313 {
4314 uint32_t e1, e2;
4315 load_segment(&e1, &e2, selector);
4316 cpu_x86_load_seg_cache(env, R_CS, selector,
4317 get_seg_base(e1, e2),
4318 get_seg_limit(e1, e2),
4319 e2);
4320 }
4321 else
4322 load_seg(seg_reg, selector);
4323 env = savedenv;
4324
4325 /* Successful sync. */
4326 env1->segs[seg_reg].newselector = 0;
4327 }
4328 else
4329 {
4330 env = savedenv;
4331
4332 /* Postpone sync until the guest uses the selector. */
4333 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4334 env1->segs[seg_reg].newselector = selector;
4335 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4336 }
4337 }
4338
4339}
4340
4341
4342/**
4343 * Correctly loads a new ldtr selector.
4344 *
4345 * @param env1 CPU environment.
4346 * @param selector Selector to load.
4347 */
4348void sync_ldtr(CPUX86State *env1, int selector)
4349{
4350 CPUX86State *saved_env = env;
4351 target_ulong saved_T0 = T0;
4352 if (setjmp(env1->jmp_env) == 0)
4353 {
4354 env = env1;
4355 T0 = selector;
4356 helper_lldt_T0();
4357 T0 = saved_T0;
4358 env = saved_env;
4359 }
4360 else
4361 {
4362 T0 = saved_T0;
4363 env = saved_env;
4364#ifdef VBOX_STRICT
4365 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4366#endif
4367 }
4368}
4369
4370/**
4371 * Correctly loads a new tr selector.
4372 *
4373 * @param env1 CPU environment.
4374 * @param selector Selector to load.
4375 */
4376int sync_tr(CPUX86State *env1, int selector)
4377{
4378 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
4379 SegmentCache *dt;
4380 uint32_t e1, e2;
4381 int index, type, entry_limit;
4382 target_ulong ptr;
4383 CPUX86State *saved_env = env;
4384 env = env1;
4385
4386 selector &= 0xffff;
4387 if ((selector & 0xfffc) == 0) {
4388 /* NULL selector case: invalid TR */
4389 env->tr.base = 0;
4390 env->tr.limit = 0;
4391 env->tr.flags = 0;
4392 } else {
4393 if (selector & 0x4)
4394 goto l_failure;
4395 dt = &env->gdt;
4396 index = selector & ~7;
4397#ifdef TARGET_X86_64
4398 if (env->hflags & HF_LMA_MASK)
4399 entry_limit = 15;
4400 else
4401#endif
4402 entry_limit = 7;
4403 if ((index + entry_limit) > dt->limit)
4404 goto l_failure;
4405 ptr = dt->base + index;
4406 e1 = ldl_kernel(ptr);
4407 e2 = ldl_kernel(ptr + 4);
4408 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4409 if ((e2 & DESC_S_MASK) /*||
4410 (type != 1 && type != 9)*/)
4411 goto l_failure;
4412 if (!(e2 & DESC_P_MASK))
4413 goto l_failure;
4414#ifdef TARGET_X86_64
4415 if (env->hflags & HF_LMA_MASK) {
4416 uint32_t e3;
4417 e3 = ldl_kernel(ptr + 8);
4418 load_seg_cache_raw_dt(&env->tr, e1, e2);
4419 env->tr.base |= (target_ulong)e3 << 32;
4420 } else
4421#endif
4422 {
4423 load_seg_cache_raw_dt(&env->tr, e1, e2);
4424 }
4425 e2 |= DESC_TSS_BUSY_MASK;
4426 stl_kernel(ptr + 4, e2);
4427 }
4428 env->tr.selector = selector;
4429
4430 env = saved_env;
4431 return 0;
4432l_failure:
4433 AssertMsgFailed(("selector=%d\n", selector));
4434 return -1;
4435}
4436
4437int emulate_single_instr(CPUX86State *env1)
4438{
4439#if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */
4440 /* This has to be static because it needs to be addressible
4441 using 32-bit immediate addresses on 64-bit machines. This
4442 is dictated by the gcc code model used when building this
4443 module / op.o. Using a static here pushes the problem
4444 onto the module loader. */
4445 static TranslationBlock tb_temp;
4446#endif
4447 TranslationBlock *tb;
4448 TranslationBlock *current;
4449 int csize;
4450 void (*gen_func)(void);
4451 uint8_t *tc_ptr;
4452 uint32_t old_eip;
4453
4454 /* ensures env is loaded in ebp! */
4455 CPUX86State *savedenv = env;
4456 env = env1;
4457
4458 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4459
4460#if 1 /* see above */
4461 tc_ptr = env->pvCodeBuffer;
4462#else
4463 tc_ptr = code_gen_ptr;
4464#endif
4465
4466 /*
4467 * Setup temporary translation block.
4468 */
4469 /* tb_alloc: */
4470#if 1 /* see above */
4471 tb = &tb_temp;
4472 tb->pc = env->segs[R_CS].base + env->eip;
4473 tb->cflags = 0;
4474#else
4475 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4476 if (!tb)
4477 {
4478 tb_flush(env);
4479 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4480 }
4481#endif
4482
4483 /* tb_find_slow: */
4484 tb->tc_ptr = tc_ptr;
4485 tb->cs_base = env->segs[R_CS].base;
4486 tb->flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4487
4488 /* Initialize the rest with sensible values. */
4489 tb->size = 0;
4490 tb->phys_hash_next = NULL;
4491 tb->page_next[0] = NULL;
4492 tb->page_next[1] = NULL;
4493 tb->page_addr[0] = 0;
4494 tb->page_addr[1] = 0;
4495 tb->tb_next_offset[0] = 0xffff;
4496 tb->tb_next_offset[1] = 0xffff;
4497 tb->tb_next[0] = 0xffff;
4498 tb->tb_next[1] = 0xffff;
4499 tb->jmp_next[0] = NULL;
4500 tb->jmp_next[1] = NULL;
4501 tb->jmp_first = NULL;
4502
4503 current = env->current_tb;
4504 env->current_tb = NULL;
4505
4506 /*
4507 * Translate only one instruction.
4508 */
4509 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4510 if (cpu_gen_code(env, tb, env->cbCodeBuffer, &csize) < 0)
4511 {
4512 AssertFailed();
4513 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4514 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4515 env = savedenv;
4516 return -1;
4517 }
4518#ifdef DEBUG
4519 if(csize > env->cbCodeBuffer)
4520 {
4521 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4522 AssertFailed();
4523 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4524 env = savedenv;
4525 return -1;
4526 }
4527 if (tb->tc_ptr != tc_ptr)
4528 {
4529 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4530 AssertFailed();
4531 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4532 env = savedenv;
4533 return -1;
4534 }
4535#endif
4536 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4537
4538 /* tb_link_phys: */
4539 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
4540 Assert(tb->jmp_next[0] == NULL); Assert(tb->jmp_next[1] == NULL);
4541 if (tb->tb_next_offset[0] != 0xffff)
4542 tb_set_jmp_target(tb, 0, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[0]));
4543 if (tb->tb_next_offset[1] != 0xffff)
4544 tb_set_jmp_target(tb, 1, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[1]));
4545
4546 /*
4547 * Execute it using emulation
4548 */
4549 old_eip = env->eip;
4550 gen_func = (void *)tb->tc_ptr;
4551 env->current_tb = tb;
4552
4553 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4554 // perhaps not a very safe hack
4555 while(old_eip == env->eip)
4556 {
4557 gen_func();
4558 /*
4559 * Exit once we detect an external interrupt and interrupts are enabled
4560 */
4561 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4562 ( (env->eflags & IF_MASK) &&
4563 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4564 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4565 {
4566 break;
4567 }
4568 }
4569 env->current_tb = current;
4570
4571 Assert(tb->phys_hash_next == NULL);
4572 Assert(tb->page_next[0] == NULL);
4573 Assert(tb->page_next[1] == NULL);
4574 Assert(tb->page_addr[0] == 0);
4575 Assert(tb->page_addr[1] == 0);
4576/*
4577 Assert(tb->tb_next_offset[0] == 0xffff);
4578 Assert(tb->tb_next_offset[1] == 0xffff);
4579 Assert(tb->tb_next[0] == 0xffff);
4580 Assert(tb->tb_next[1] == 0xffff);
4581 Assert(tb->jmp_next[0] == NULL);
4582 Assert(tb->jmp_next[1] == NULL);
4583 Assert(tb->jmp_first == NULL); */
4584
4585 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4586
4587 /*
4588 * Execute the next instruction when we encounter instruction fusing.
4589 */
4590 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4591 {
4592 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK)\n"));
4593 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4594 emulate_single_instr(env);
4595 }
4596
4597 env = savedenv;
4598 return 0;
4599}
4600
4601int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4602 uint32_t *esp_ptr, int dpl)
4603{
4604 int type, index, shift;
4605
4606 CPUX86State *savedenv = env;
4607 env = env1;
4608
4609 if (!(env->tr.flags & DESC_P_MASK))
4610 cpu_abort(env, "invalid tss");
4611 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4612 if ((type & 7) != 1)
4613 cpu_abort(env, "invalid tss type %d", type);
4614 shift = type >> 3;
4615 index = (dpl * 4 + 2) << shift;
4616 if (index + (4 << shift) - 1 > env->tr.limit)
4617 {
4618 env = savedenv;
4619 return 0;
4620 }
4621 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4622
4623 if (shift == 0) {
4624 *esp_ptr = lduw_kernel(env->tr.base + index);
4625 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4626 } else {
4627 *esp_ptr = ldl_kernel(env->tr.base + index);
4628 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4629 }
4630
4631 env = savedenv;
4632 return 1;
4633}
4634
4635//*****************************************************************************
4636// Needs to be at the bottom of the file (overriding macros)
4637
4638static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4639{
4640 return *(CPU86_LDouble *)ptr;
4641}
4642
4643static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4644{
4645 *(CPU86_LDouble *)ptr = f;
4646}
4647
4648#undef stw
4649#undef stl
4650#undef stq
4651#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4652#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4653#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4654#define data64 0
4655
4656//*****************************************************************************
4657void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4658{
4659 int fpus, fptag, i, nb_xmm_regs;
4660 CPU86_LDouble tmp;
4661 uint8_t *addr;
4662
4663 if (env->cpuid_features & CPUID_FXSR)
4664 {
4665 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4666 fptag = 0;
4667 for(i = 0; i < 8; i++) {
4668 fptag |= (env->fptags[i] << i);
4669 }
4670 stw(ptr, env->fpuc);
4671 stw(ptr + 2, fpus);
4672 stw(ptr + 4, fptag ^ 0xff);
4673
4674 addr = ptr + 0x20;
4675 for(i = 0;i < 8; i++) {
4676 tmp = ST(i);
4677 helper_fstt_raw(tmp, addr);
4678 addr += 16;
4679 }
4680
4681 if (env->cr[4] & CR4_OSFXSR_MASK) {
4682 /* XXX: finish it */
4683 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4684 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4685 nb_xmm_regs = 8 << data64;
4686 addr = ptr + 0xa0;
4687 for(i = 0; i < nb_xmm_regs; i++) {
4688#if __GNUC__ < 4
4689 stq(addr, env->xmm_regs[i].XMM_Q(0));
4690 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4691#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4692 stl(addr, env->xmm_regs[i].XMM_L(0));
4693 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4694 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4695 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4696#endif
4697 addr += 16;
4698 }
4699 }
4700 }
4701 else
4702 {
4703 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4704 int fptag;
4705
4706 fp->FCW = env->fpuc;
4707 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4708 fptag = 0;
4709 for (i=7; i>=0; i--) {
4710 fptag <<= 2;
4711 if (env->fptags[i]) {
4712 fptag |= 3;
4713 } else {
4714 /* the FPU automatically computes it */
4715 }
4716 }
4717 fp->FTW = fptag;
4718
4719 for(i = 0;i < 8; i++) {
4720 tmp = ST(i);
4721 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4722 }
4723 }
4724}
4725
4726//*****************************************************************************
4727#undef lduw
4728#undef ldl
4729#undef ldq
4730#define lduw(a) *(uint16_t *)(a)
4731#define ldl(a) *(uint32_t *)(a)
4732#define ldq(a) *(uint64_t *)(a)
4733//*****************************************************************************
4734void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4735{
4736 int i, fpus, fptag, nb_xmm_regs;
4737 CPU86_LDouble tmp;
4738 uint8_t *addr;
4739
4740 if (env->cpuid_features & CPUID_FXSR)
4741 {
4742 env->fpuc = lduw(ptr);
4743 fpus = lduw(ptr + 2);
4744 fptag = lduw(ptr + 4);
4745 env->fpstt = (fpus >> 11) & 7;
4746 env->fpus = fpus & ~0x3800;
4747 fptag ^= 0xff;
4748 for(i = 0;i < 8; i++) {
4749 env->fptags[i] = ((fptag >> i) & 1);
4750 }
4751
4752 addr = ptr + 0x20;
4753 for(i = 0;i < 8; i++) {
4754 tmp = helper_fldt_raw(addr);
4755 ST(i) = tmp;
4756 addr += 16;
4757 }
4758
4759 if (env->cr[4] & CR4_OSFXSR_MASK) {
4760 /* XXX: finish it, endianness */
4761 env->mxcsr = ldl(ptr + 0x18);
4762 //ldl(ptr + 0x1c);
4763 nb_xmm_regs = 8 << data64;
4764 addr = ptr + 0xa0;
4765 for(i = 0; i < nb_xmm_regs; i++) {
4766#if HC_ARCH_BITS == 32
4767 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
4768 env->xmm_regs[i].XMM_L(0) = ldl(addr);
4769 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
4770 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
4771 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
4772#else
4773 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4774 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4775#endif
4776 addr += 16;
4777 }
4778 }
4779 }
4780 else
4781 {
4782 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4783 int fptag, j;
4784
4785 env->fpuc = fp->FCW;
4786 env->fpstt = (fp->FSW >> 11) & 7;
4787 env->fpus = fp->FSW & ~0x3800;
4788 fptag = fp->FTW;
4789 for(i = 0;i < 8; i++) {
4790 env->fptags[i] = ((fptag & 3) == 3);
4791 fptag >>= 2;
4792 }
4793 j = env->fpstt;
4794 for(i = 0;i < 8; i++) {
4795 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4796 ST(i) = tmp;
4797 }
4798 }
4799}
4800//*****************************************************************************
4801//*****************************************************************************
4802
4803#endif /* VBOX */
4804
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette