VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 3261

Last change on this file since 3261 was 3023, checked in by vboxsync, 17 years ago

only check for excessive faults when in protected mode

  • Property svn:eol-style set to native
File size: 133.3 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifdef VBOX
21# include <VBox/err.h>
22#endif
23#include "exec.h"
24
25//#define DEBUG_PCALL
26
27#if 0
28#define raise_exception_err(a, b)\
29do {\
30 if (logfile)\
31 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
32 (raise_exception_err)(a, b);\
33} while (0)
34#endif
35
36const uint8_t parity_table[256] = {
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69};
70
71/* modulo 17 table */
72const uint8_t rclw_table[32] = {
73 0, 1, 2, 3, 4, 5, 6, 7,
74 8, 9,10,11,12,13,14,15,
75 16, 0, 1, 2, 3, 4, 5, 6,
76 7, 8, 9,10,11,12,13,14,
77};
78
79/* modulo 9 table */
80const uint8_t rclb_table[32] = {
81 0, 1, 2, 3, 4, 5, 6, 7,
82 8, 0, 1, 2, 3, 4, 5, 6,
83 7, 8, 0, 1, 2, 3, 4, 5,
84 6, 7, 8, 0, 1, 2, 3, 4,
85};
86
87const CPU86_LDouble f15rk[7] =
88{
89 0.00000000000000000000L,
90 1.00000000000000000000L,
91 3.14159265358979323851L, /*pi*/
92 0.30102999566398119523L, /*lg2*/
93 0.69314718055994530943L, /*ln2*/
94 1.44269504088896340739L, /*l2e*/
95 3.32192809488736234781L, /*l2t*/
96};
97
98/* thread support */
99
100spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101
102void cpu_lock(void)
103{
104 spin_lock(&global_cpu_lock);
105}
106
107void cpu_unlock(void)
108{
109 spin_unlock(&global_cpu_lock);
110}
111
112void cpu_loop_exit(void)
113{
114 /* NOTE: the register at this point must be saved by hand because
115 longjmp restore them */
116 regs_to_env();
117 longjmp(env->jmp_env, 1);
118}
119
120/* return non zero if error */
121static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
122 int selector)
123{
124 SegmentCache *dt;
125 int index;
126 target_ulong ptr;
127
128 if (selector & 0x4)
129 dt = &env->ldt;
130 else
131 dt = &env->gdt;
132 index = selector & ~7;
133 if ((index + 7) > dt->limit)
134 return -1;
135 ptr = dt->base + index;
136 *e1_ptr = ldl_kernel(ptr);
137 *e2_ptr = ldl_kernel(ptr + 4);
138 return 0;
139}
140
141static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
142{
143 unsigned int limit;
144 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
145 if (e2 & DESC_G_MASK)
146 limit = (limit << 12) | 0xfff;
147 return limit;
148}
149
150static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
151{
152 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
153}
154
155static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
156{
157 sc->base = get_seg_base(e1, e2);
158 sc->limit = get_seg_limit(e1, e2);
159 sc->flags = e2;
160}
161
162/* init the segment cache in vm86 mode. */
163static inline void load_seg_vm(int seg, int selector)
164{
165 selector &= 0xffff;
166 cpu_x86_load_seg_cache(env, seg, selector,
167 (selector << 4), 0xffff, 0);
168}
169
170static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
171 uint32_t *esp_ptr, int dpl)
172{
173 int type, index, shift;
174
175#if 0
176 {
177 int i;
178 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
179 for(i=0;i<env->tr.limit;i++) {
180 printf("%02x ", env->tr.base[i]);
181 if ((i & 7) == 7) printf("\n");
182 }
183 printf("\n");
184 }
185#endif
186
187 if (!(env->tr.flags & DESC_P_MASK))
188 cpu_abort(env, "invalid tss");
189 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
190 if ((type & 7) != 1)
191 cpu_abort(env, "invalid tss type %d", type);
192 shift = type >> 3;
193 index = (dpl * 4 + 2) << shift;
194 if (index + (4 << shift) - 1 > env->tr.limit)
195 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
196 if (shift == 0) {
197 *esp_ptr = lduw_kernel(env->tr.base + index);
198 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
199 } else {
200 *esp_ptr = ldl_kernel(env->tr.base + index);
201 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
202 }
203}
204
205/* XXX: merge with load_seg() */
206static void tss_load_seg(int seg_reg, int selector)
207{
208 uint32_t e1, e2;
209 int rpl, dpl, cpl;
210
211 if ((selector & 0xfffc) != 0) {
212 if (load_segment(&e1, &e2, selector) != 0)
213 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
214 if (!(e2 & DESC_S_MASK))
215 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216 rpl = selector & 3;
217 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
218 cpl = env->hflags & HF_CPL_MASK;
219 if (seg_reg == R_CS) {
220 if (!(e2 & DESC_CS_MASK))
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222 /* XXX: is it correct ? */
223 if (dpl != rpl)
224 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 if ((e2 & DESC_C_MASK) && dpl > rpl)
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 } else if (seg_reg == R_SS) {
228 /* SS must be writable data */
229 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 if (dpl != cpl || dpl != rpl)
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 } else {
234 /* not readable code */
235 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 /* if data or non conforming code, checks the rights */
238 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
239 if (dpl < cpl || dpl < rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 }
242 }
243 if (!(e2 & DESC_P_MASK))
244 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
245 cpu_x86_load_seg_cache(env, seg_reg, selector,
246 get_seg_base(e1, e2),
247 get_seg_limit(e1, e2),
248 e2);
249 } else {
250 if (seg_reg == R_SS || seg_reg == R_CS)
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 }
253}
254
255#define SWITCH_TSS_JMP 0
256#define SWITCH_TSS_IRET 1
257#define SWITCH_TSS_CALL 2
258
259/* XXX: restore CPU state in registers (PowerPC case) */
260static void switch_tss(int tss_selector,
261 uint32_t e1, uint32_t e2, int source,
262 uint32_t next_eip)
263{
264 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
265 target_ulong tss_base;
266 uint32_t new_regs[8], new_segs[6];
267 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
268 uint32_t old_eflags, eflags_mask;
269 SegmentCache *dt;
270 int index;
271 target_ulong ptr;
272
273 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
274#ifdef DEBUG_PCALL
275 if (loglevel & CPU_LOG_PCALL)
276 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
277#endif
278
279#if defined(VBOX) && defined(DEBUG)
280 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
281#endif
282
283 /* if task gate, we read the TSS segment and we load it */
284 if (type == 5) {
285 if (!(e2 & DESC_P_MASK))
286 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287 tss_selector = e1 >> 16;
288 if (tss_selector & 4)
289 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
290 if (load_segment(&e1, &e2, tss_selector) != 0)
291 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
292 if (e2 & DESC_S_MASK)
293 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
295 if ((type & 7) != 1)
296 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
297 }
298
299 if (!(e2 & DESC_P_MASK))
300 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
301
302 if (type & 8)
303 tss_limit_max = 103;
304 else
305 tss_limit_max = 43;
306 tss_limit = get_seg_limit(e1, e2);
307 tss_base = get_seg_base(e1, e2);
308 if ((tss_selector & 4) != 0 ||
309 tss_limit < tss_limit_max)
310 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
311 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
312 if (old_type & 8)
313 old_tss_limit_max = 103;
314 else
315 old_tss_limit_max = 43;
316
317 /* read all the registers from the new TSS */
318 if (type & 8) {
319 /* 32 bit */
320 new_cr3 = ldl_kernel(tss_base + 0x1c);
321 new_eip = ldl_kernel(tss_base + 0x20);
322 new_eflags = ldl_kernel(tss_base + 0x24);
323 for(i = 0; i < 8; i++)
324 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
325 for(i = 0; i < 6; i++)
326 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
327 new_ldt = lduw_kernel(tss_base + 0x60);
328 new_trap = ldl_kernel(tss_base + 0x64);
329 } else {
330 /* 16 bit */
331 new_cr3 = 0;
332 new_eip = lduw_kernel(tss_base + 0x0e);
333 new_eflags = lduw_kernel(tss_base + 0x10);
334 for(i = 0; i < 8; i++)
335 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
336 for(i = 0; i < 4; i++)
337 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
338 new_ldt = lduw_kernel(tss_base + 0x2a);
339 new_segs[R_FS] = 0;
340 new_segs[R_GS] = 0;
341 new_trap = 0;
342 }
343
344 /* NOTE: we must avoid memory exceptions during the task switch,
345 so we make dummy accesses before */
346 /* XXX: it can still fail in some cases, so a bigger hack is
347 necessary to valid the TLB after having done the accesses */
348
349 v1 = ldub_kernel(env->tr.base);
350 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
351 stb_kernel(env->tr.base, v1);
352 stb_kernel(env->tr.base + old_tss_limit_max, v2);
353
354 /* clear busy bit (it is restartable) */
355 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
356 target_ulong ptr;
357 uint32_t e2;
358 ptr = env->gdt.base + (env->tr.selector & ~7);
359 e2 = ldl_kernel(ptr + 4);
360 e2 &= ~DESC_TSS_BUSY_MASK;
361 stl_kernel(ptr + 4, e2);
362 }
363 old_eflags = compute_eflags();
364 if (source == SWITCH_TSS_IRET)
365 old_eflags &= ~NT_MASK;
366
367 /* save the current state in the old TSS */
368 if (type & 8) {
369 /* 32 bit */
370 stl_kernel(env->tr.base + 0x20, next_eip);
371 stl_kernel(env->tr.base + 0x24, old_eflags);
372 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
373 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
374 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
375 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
376 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
377 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
378 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
379 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
380 for(i = 0; i < 6; i++)
381 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
382#if defined(VBOX) && defined(DEBUG)
383 printf("TSS 32 bits switch\n");
384 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
385#endif
386 } else {
387 /* 16 bit */
388 stw_kernel(env->tr.base + 0x0e, next_eip);
389 stw_kernel(env->tr.base + 0x10, old_eflags);
390 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
391 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
392 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
393 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
394 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
395 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
396 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
397 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
398 for(i = 0; i < 4; i++)
399 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
400 }
401
402 /* now if an exception occurs, it will occurs in the next task
403 context */
404
405 if (source == SWITCH_TSS_CALL) {
406 stw_kernel(tss_base, env->tr.selector);
407 new_eflags |= NT_MASK;
408 }
409
410 /* set busy bit */
411 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
412 target_ulong ptr;
413 uint32_t e2;
414 ptr = env->gdt.base + (tss_selector & ~7);
415 e2 = ldl_kernel(ptr + 4);
416 e2 |= DESC_TSS_BUSY_MASK;
417 stl_kernel(ptr + 4, e2);
418 }
419
420 /* set the new CPU state */
421 /* from this point, any exception which occurs can give problems */
422 env->cr[0] |= CR0_TS_MASK;
423 env->hflags |= HF_TS_MASK;
424 env->tr.selector = tss_selector;
425 env->tr.base = tss_base;
426 env->tr.limit = tss_limit;
427 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
428
429 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
430 cpu_x86_update_cr3(env, new_cr3);
431 }
432
433 /* load all registers without an exception, then reload them with
434 possible exception */
435 env->eip = new_eip;
436 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
437 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
438 if (!(type & 8))
439 eflags_mask &= 0xffff;
440 load_eflags(new_eflags, eflags_mask);
441 /* XXX: what to do in 16 bit case ? */
442 EAX = new_regs[0];
443 ECX = new_regs[1];
444 EDX = new_regs[2];
445 EBX = new_regs[3];
446 ESP = new_regs[4];
447 EBP = new_regs[5];
448 ESI = new_regs[6];
449 EDI = new_regs[7];
450 if (new_eflags & VM_MASK) {
451 for(i = 0; i < 6; i++)
452 load_seg_vm(i, new_segs[i]);
453 /* in vm86, CPL is always 3 */
454 cpu_x86_set_cpl(env, 3);
455 } else {
456 /* CPL is set the RPL of CS */
457 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
458 /* first just selectors as the rest may trigger exceptions */
459 for(i = 0; i < 6; i++)
460 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
461 }
462
463 env->ldt.selector = new_ldt & ~4;
464 env->ldt.base = 0;
465 env->ldt.limit = 0;
466 env->ldt.flags = 0;
467
468 /* load the LDT */
469 if (new_ldt & 4)
470 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
471
472 if ((new_ldt & 0xfffc) != 0) {
473 dt = &env->gdt;
474 index = new_ldt & ~7;
475 if ((index + 7) > dt->limit)
476 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477 ptr = dt->base + index;
478 e1 = ldl_kernel(ptr);
479 e2 = ldl_kernel(ptr + 4);
480 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 if (!(e2 & DESC_P_MASK))
483 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
489 tss_load_seg(R_CS, new_segs[R_CS]);
490 tss_load_seg(R_SS, new_segs[R_SS]);
491 tss_load_seg(R_ES, new_segs[R_ES]);
492 tss_load_seg(R_DS, new_segs[R_DS]);
493 tss_load_seg(R_FS, new_segs[R_FS]);
494 tss_load_seg(R_GS, new_segs[R_GS]);
495 }
496
497 /* check that EIP is in the CS segment limits */
498 if (new_eip > env->segs[R_CS].limit) {
499 /* XXX: different exception if CALL ? */
500 raise_exception_err(EXCP0D_GPF, 0);
501 }
502}
503
504/* check if Port I/O is allowed in TSS */
505static inline void check_io(int addr, int size)
506{
507 int io_offset, val, mask;
508
509 /* TSS must be a valid 32 bit one */
510 if (!(env->tr.flags & DESC_P_MASK) ||
511 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
512 env->tr.limit < 103)
513 goto fail;
514 io_offset = lduw_kernel(env->tr.base + 0x66);
515 io_offset += (addr >> 3);
516 /* Note: the check needs two bytes */
517 if ((io_offset + 1) > env->tr.limit)
518 goto fail;
519 val = lduw_kernel(env->tr.base + io_offset);
520 val >>= (addr & 7);
521 mask = (1 << size) - 1;
522 /* all bits must be zero to allow the I/O */
523 if ((val & mask) != 0) {
524 fail:
525 raise_exception_err(EXCP0D_GPF, 0);
526 }
527}
528
529void check_iob_T0(void)
530{
531 check_io(T0, 1);
532}
533
534void check_iow_T0(void)
535{
536 check_io(T0, 2);
537}
538
539void check_iol_T0(void)
540{
541 check_io(T0, 4);
542}
543
544void check_iob_DX(void)
545{
546 check_io(EDX & 0xffff, 1);
547}
548
549void check_iow_DX(void)
550{
551 check_io(EDX & 0xffff, 2);
552}
553
554void check_iol_DX(void)
555{
556 check_io(EDX & 0xffff, 4);
557}
558
559static inline unsigned int get_sp_mask(unsigned int e2)
560{
561 if (e2 & DESC_B_MASK)
562 return 0xffffffff;
563 else
564 return 0xffff;
565}
566
567#ifdef TARGET_X86_64
568#define SET_ESP(val, sp_mask)\
569do {\
570 if ((sp_mask) == 0xffff)\
571 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
572 else if ((sp_mask) == 0xffffffffLL)\
573 ESP = (uint32_t)(val);\
574 else\
575 ESP = (val);\
576} while (0)
577#else
578#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
579#endif
580
581/* XXX: add a is_user flag to have proper security support */
582#define PUSHW(ssp, sp, sp_mask, val)\
583{\
584 sp -= 2;\
585 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
586}
587
588#define PUSHL(ssp, sp, sp_mask, val)\
589{\
590 sp -= 4;\
591 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
592}
593
594#define POPW(ssp, sp, sp_mask, val)\
595{\
596 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
597 sp += 2;\
598}
599
600#define POPL(ssp, sp, sp_mask, val)\
601{\
602 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
603 sp += 4;\
604}
605
606/* protected mode interrupt */
607static void do_interrupt_protected(int intno, int is_int, int error_code,
608 unsigned int next_eip, int is_hw)
609{
610 SegmentCache *dt;
611 target_ulong ptr, ssp;
612 int type, dpl, selector, ss_dpl, cpl;
613 int has_error_code, new_stack, shift;
614 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
615 uint32_t old_eip, sp_mask;
616
617#ifdef VBOX
618 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
619 cpu_loop_exit();
620#endif
621
622 has_error_code = 0;
623 if (!is_int && !is_hw) {
624 switch(intno) {
625 case 8:
626 case 10:
627 case 11:
628 case 12:
629 case 13:
630 case 14:
631 case 17:
632 has_error_code = 1;
633 break;
634 }
635 }
636 if (is_int)
637 old_eip = next_eip;
638 else
639 old_eip = env->eip;
640
641 dt = &env->idt;
642 if (intno * 8 + 7 > dt->limit)
643 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
644 ptr = dt->base + intno * 8;
645 e1 = ldl_kernel(ptr);
646 e2 = ldl_kernel(ptr + 4);
647 /* check gate type */
648 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
649 switch(type) {
650 case 5: /* task gate */
651 /* must do that check here to return the correct error code */
652 if (!(e2 & DESC_P_MASK))
653 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
654 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
655 if (has_error_code) {
656 int type;
657 uint32_t mask;
658 /* push the error code */
659 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
660 shift = type >> 3;
661 if (env->segs[R_SS].flags & DESC_B_MASK)
662 mask = 0xffffffff;
663 else
664 mask = 0xffff;
665 esp = (ESP - (2 << shift)) & mask;
666 ssp = env->segs[R_SS].base + esp;
667 if (shift)
668 stl_kernel(ssp, error_code);
669 else
670 stw_kernel(ssp, error_code);
671 SET_ESP(esp, mask);
672 }
673 return;
674 case 6: /* 286 interrupt gate */
675 case 7: /* 286 trap gate */
676 case 14: /* 386 interrupt gate */
677 case 15: /* 386 trap gate */
678 break;
679 default:
680 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
681 break;
682 }
683 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
684 cpl = env->hflags & HF_CPL_MASK;
685 /* check privledge if software int */
686 if (is_int && dpl < cpl)
687 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688 /* check valid bit */
689 if (!(e2 & DESC_P_MASK))
690 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
691 selector = e1 >> 16;
692 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
693 if ((selector & 0xfffc) == 0)
694 raise_exception_err(EXCP0D_GPF, 0);
695
696 if (load_segment(&e1, &e2, selector) != 0)
697 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
698 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
700 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
701 if (dpl > cpl)
702 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
703 if (!(e2 & DESC_P_MASK))
704 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
705 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
706 /* to inner priviledge */
707 get_ss_esp_from_tss(&ss, &esp, dpl);
708 if ((ss & 0xfffc) == 0)
709 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
710 if ((ss & 3) != dpl)
711 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
712 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
713 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
714 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
715 if (ss_dpl != dpl)
716 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717 if (!(ss_e2 & DESC_S_MASK) ||
718 (ss_e2 & DESC_CS_MASK) ||
719 !(ss_e2 & DESC_W_MASK))
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if (!(ss_e2 & DESC_P_MASK))
722 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723 new_stack = 1;
724 sp_mask = get_sp_mask(ss_e2);
725 ssp = get_seg_base(ss_e1, ss_e2);
726#if defined(VBOX) && defined(DEBUG)
727 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
728#endif
729 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
730 /* to same priviledge */
731 if (env->eflags & VM_MASK)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 new_stack = 0;
734 sp_mask = get_sp_mask(env->segs[R_SS].flags);
735 ssp = env->segs[R_SS].base;
736 esp = ESP;
737 dpl = cpl;
738 } else {
739 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
740 new_stack = 0; /* avoid warning */
741 sp_mask = 0; /* avoid warning */
742 ssp = 0; /* avoid warning */
743 esp = 0; /* avoid warning */
744 }
745
746 shift = type >> 3;
747
748#if 0
749 /* XXX: check that enough room is available */
750 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
751 if (env->eflags & VM_MASK)
752 push_size += 8;
753 push_size <<= shift;
754#endif
755 if (shift == 1) {
756 if (new_stack) {
757 if (env->eflags & VM_MASK) {
758 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
760 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
762 }
763 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
764 PUSHL(ssp, esp, sp_mask, ESP);
765 }
766 PUSHL(ssp, esp, sp_mask, compute_eflags());
767 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
768 PUSHL(ssp, esp, sp_mask, old_eip);
769 if (has_error_code) {
770 PUSHL(ssp, esp, sp_mask, error_code);
771 }
772 } else {
773 if (new_stack) {
774 if (env->eflags & VM_MASK) {
775 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
777 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
779 }
780 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
781 PUSHW(ssp, esp, sp_mask, ESP);
782 }
783 PUSHW(ssp, esp, sp_mask, compute_eflags());
784 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
785 PUSHW(ssp, esp, sp_mask, old_eip);
786 if (has_error_code) {
787 PUSHW(ssp, esp, sp_mask, error_code);
788 }
789 }
790
791 if (new_stack) {
792 if (env->eflags & VM_MASK) {
793 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
797 }
798 ss = (ss & ~3) | dpl;
799 cpu_x86_load_seg_cache(env, R_SS, ss,
800 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
801 }
802 SET_ESP(esp, sp_mask);
803
804 selector = (selector & ~3) | dpl;
805 cpu_x86_load_seg_cache(env, R_CS, selector,
806 get_seg_base(e1, e2),
807 get_seg_limit(e1, e2),
808 e2);
809 cpu_x86_set_cpl(env, dpl);
810 env->eip = offset;
811
812 /* interrupt gate clear IF mask */
813 if ((type & 1) == 0) {
814 env->eflags &= ~IF_MASK;
815 }
816 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
817}
818
819#ifdef VBOX
820
821/* check if VME interrupt redirection is enabled in TSS */
822static inline bool is_vme_irq_redirected(int intno)
823{
824 int io_offset, intredir_offset;
825 unsigned char val, mask;
826
827 /* TSS must be a valid 32 bit one */
828 if (!(env->tr.flags & DESC_P_MASK) ||
829 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
830 env->tr.limit < 103)
831 goto fail;
832 io_offset = lduw_kernel(env->tr.base + 0x66);
833 /* the virtual interrupt redirection bitmap is located below the io bitmap */
834 intredir_offset = io_offset - 0x20;
835
836 intredir_offset += (intno >> 3);
837 if ((intredir_offset) > env->tr.limit)
838 goto fail;
839
840 val = ldub_kernel(env->tr.base + intredir_offset);
841 mask = 1 << (unsigned char)(intno & 7);
842
843 /* bit set means no redirection. */
844 if ((val & mask) != 0) {
845 return false;
846 }
847 return true;
848
849fail:
850 raise_exception_err(EXCP0D_GPF, 0);
851 return true;
852}
853
854/* V86 mode software interrupt with CR4.VME=1 */
855static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
856{
857 target_ulong ptr, ssp;
858 int selector;
859 uint32_t offset, esp;
860 uint32_t old_cs, old_eflags;
861 uint32_t iopl;
862
863 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
864
865 if (!is_vme_irq_redirected(intno))
866 {
867 if (iopl == 3)
868 /* normal protected mode handler call */
869 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
870 else
871 raise_exception_err(EXCP0D_GPF, 0);
872 }
873
874 /* virtual mode idt is at linear address 0 */
875 ptr = 0 + intno * 4;
876 offset = lduw_kernel(ptr);
877 selector = lduw_kernel(ptr + 2);
878 esp = ESP;
879 ssp = env->segs[R_SS].base;
880 old_cs = env->segs[R_CS].selector;
881
882 old_eflags = compute_eflags();
883 if (iopl < 3)
884 {
885 /* copy VIF into IF and set IOPL to 3 */
886 if (env->eflags & VIF_MASK)
887 old_eflags |= IF_MASK;
888 else
889 old_eflags &= ~IF_MASK;
890
891 old_eflags |= (3 << IOPL_SHIFT);
892 }
893
894 /* XXX: use SS segment size ? */
895 PUSHW(ssp, esp, 0xffff, old_eflags);
896 PUSHW(ssp, esp, 0xffff, old_cs);
897 PUSHW(ssp, esp, 0xffff, next_eip);
898
899 /* update processor state */
900 ESP = (ESP & ~0xffff) | (esp & 0xffff);
901 env->eip = offset;
902 env->segs[R_CS].selector = selector;
903 env->segs[R_CS].base = (selector << 4);
904 env->eflags &= ~(TF_MASK | RF_MASK);
905
906 if (iopl < 3)
907 env->eflags &= ~VIF_MASK;
908 else
909 env->eflags &= ~IF_MASK;
910}
911#endif /* VBOX */
912
913#ifdef TARGET_X86_64
914
915#define PUSHQ(sp, val)\
916{\
917 sp -= 8;\
918 stq_kernel(sp, (val));\
919}
920
921#define POPQ(sp, val)\
922{\
923 val = ldq_kernel(sp);\
924 sp += 8;\
925}
926
927static inline target_ulong get_rsp_from_tss(int level)
928{
929 int index;
930
931#if 0
932 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
933 env->tr.base, env->tr.limit);
934#endif
935
936 if (!(env->tr.flags & DESC_P_MASK))
937 cpu_abort(env, "invalid tss");
938 index = 8 * level + 4;
939 if ((index + 7) > env->tr.limit)
940 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
941 return ldq_kernel(env->tr.base + index);
942}
943
944/* 64 bit interrupt */
945static void do_interrupt64(int intno, int is_int, int error_code,
946 target_ulong next_eip, int is_hw)
947{
948 SegmentCache *dt;
949 target_ulong ptr;
950 int type, dpl, selector, cpl, ist;
951 int has_error_code, new_stack;
952 uint32_t e1, e2, e3, ss;
953 target_ulong old_eip, esp, offset;
954
955 has_error_code = 0;
956 if (!is_int && !is_hw) {
957 switch(intno) {
958 case 8:
959 case 10:
960 case 11:
961 case 12:
962 case 13:
963 case 14:
964 case 17:
965 has_error_code = 1;
966 break;
967 }
968 }
969 if (is_int)
970 old_eip = next_eip;
971 else
972 old_eip = env->eip;
973
974 dt = &env->idt;
975 if (intno * 16 + 15 > dt->limit)
976 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
977 ptr = dt->base + intno * 16;
978 e1 = ldl_kernel(ptr);
979 e2 = ldl_kernel(ptr + 4);
980 e3 = ldl_kernel(ptr + 8);
981 /* check gate type */
982 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
983 switch(type) {
984 case 14: /* 386 interrupt gate */
985 case 15: /* 386 trap gate */
986 break;
987 default:
988 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
989 break;
990 }
991 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
992 cpl = env->hflags & HF_CPL_MASK;
993 /* check privledge if software int */
994 if (is_int && dpl < cpl)
995 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
996 /* check valid bit */
997 if (!(e2 & DESC_P_MASK))
998 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
999 selector = e1 >> 16;
1000 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1001 ist = e2 & 7;
1002 if ((selector & 0xfffc) == 0)
1003 raise_exception_err(EXCP0D_GPF, 0);
1004
1005 if (load_segment(&e1, &e2, selector) != 0)
1006 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1007 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1008 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1009 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1010 if (dpl > cpl)
1011 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1012 if (!(e2 & DESC_P_MASK))
1013 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1014 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1015 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1016 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1017 /* to inner priviledge */
1018 if (ist != 0)
1019 esp = get_rsp_from_tss(ist + 3);
1020 else
1021 esp = get_rsp_from_tss(dpl);
1022 esp &= ~0xfLL; /* align stack */
1023 ss = 0;
1024 new_stack = 1;
1025 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1026 /* to same priviledge */
1027 if (env->eflags & VM_MASK)
1028 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1029 new_stack = 0;
1030 if (ist != 0)
1031 esp = get_rsp_from_tss(ist + 3);
1032 else
1033 esp = ESP;
1034 esp &= ~0xfLL; /* align stack */
1035 dpl = cpl;
1036 } else {
1037 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1038 new_stack = 0; /* avoid warning */
1039 esp = 0; /* avoid warning */
1040 }
1041
1042 PUSHQ(esp, env->segs[R_SS].selector);
1043 PUSHQ(esp, ESP);
1044 PUSHQ(esp, compute_eflags());
1045 PUSHQ(esp, env->segs[R_CS].selector);
1046 PUSHQ(esp, old_eip);
1047 if (has_error_code) {
1048 PUSHQ(esp, error_code);
1049 }
1050
1051 if (new_stack) {
1052 ss = 0 | dpl;
1053 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1054 }
1055 ESP = esp;
1056
1057 selector = (selector & ~3) | dpl;
1058 cpu_x86_load_seg_cache(env, R_CS, selector,
1059 get_seg_base(e1, e2),
1060 get_seg_limit(e1, e2),
1061 e2);
1062 cpu_x86_set_cpl(env, dpl);
1063 env->eip = offset;
1064
1065 /* interrupt gate clear IF mask */
1066 if ((type & 1) == 0) {
1067 env->eflags &= ~IF_MASK;
1068 }
1069 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1070}
1071#endif
1072
1073void helper_syscall(int next_eip_addend)
1074{
1075 int selector;
1076
1077 if (!(env->efer & MSR_EFER_SCE)) {
1078 raise_exception_err(EXCP06_ILLOP, 0);
1079 }
1080 selector = (env->star >> 32) & 0xffff;
1081#ifdef TARGET_X86_64
1082 if (env->hflags & HF_LMA_MASK) {
1083 int code64;
1084
1085 ECX = env->eip + next_eip_addend;
1086 env->regs[11] = compute_eflags();
1087
1088 code64 = env->hflags & HF_CS64_MASK;
1089
1090 cpu_x86_set_cpl(env, 0);
1091 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1092 0, 0xffffffff,
1093 DESC_G_MASK | DESC_P_MASK |
1094 DESC_S_MASK |
1095 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1096 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1097 0, 0xffffffff,
1098 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1099 DESC_S_MASK |
1100 DESC_W_MASK | DESC_A_MASK);
1101 env->eflags &= ~env->fmask;
1102 if (code64)
1103 env->eip = env->lstar;
1104 else
1105 env->eip = env->cstar;
1106 } else
1107#endif
1108 {
1109 ECX = (uint32_t)(env->eip + next_eip_addend);
1110
1111 cpu_x86_set_cpl(env, 0);
1112 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1113 0, 0xffffffff,
1114 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1115 DESC_S_MASK |
1116 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1117 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1118 0, 0xffffffff,
1119 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1120 DESC_S_MASK |
1121 DESC_W_MASK | DESC_A_MASK);
1122 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1123 env->eip = (uint32_t)env->star;
1124 }
1125}
1126
1127void helper_sysret(int dflag)
1128{
1129 int cpl, selector;
1130
1131 if (!(env->efer & MSR_EFER_SCE)) {
1132 raise_exception_err(EXCP06_ILLOP, 0);
1133 }
1134 cpl = env->hflags & HF_CPL_MASK;
1135 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1136 raise_exception_err(EXCP0D_GPF, 0);
1137 }
1138 selector = (env->star >> 48) & 0xffff;
1139#ifdef TARGET_X86_64
1140 if (env->hflags & HF_LMA_MASK) {
1141 if (dflag == 2) {
1142 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1143 0, 0xffffffff,
1144 DESC_G_MASK | DESC_P_MASK |
1145 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1146 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1147 DESC_L_MASK);
1148 env->eip = ECX;
1149 } else {
1150 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1151 0, 0xffffffff,
1152 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1153 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1154 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1155 env->eip = (uint32_t)ECX;
1156 }
1157 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1158 0, 0xffffffff,
1159 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1160 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1161 DESC_W_MASK | DESC_A_MASK);
1162 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1163 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1164 cpu_x86_set_cpl(env, 3);
1165 } else
1166#endif
1167 {
1168 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1169 0, 0xffffffff,
1170 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1171 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1172 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1173 env->eip = (uint32_t)ECX;
1174 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1175 0, 0xffffffff,
1176 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1177 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1178 DESC_W_MASK | DESC_A_MASK);
1179 env->eflags |= IF_MASK;
1180 cpu_x86_set_cpl(env, 3);
1181 }
1182#ifdef USE_KQEMU
1183 if (kqemu_is_ok(env)) {
1184 if (env->hflags & HF_LMA_MASK)
1185 CC_OP = CC_OP_EFLAGS;
1186 env->exception_index = -1;
1187 cpu_loop_exit();
1188 }
1189#endif
1190}
1191
1192#ifdef VBOX
1193/**
1194 * Checks and processes external VMM events.
1195 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1196 */
1197void helper_external_event(void)
1198{
1199#if defined(__DARWIN__) && defined(VBOX_STRICT)
1200 uintptr_t uESP;
1201 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1202 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1203#endif
1204 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1205 {
1206 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1207 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1208 }
1209 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1210 {
1211 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1212 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1213 }
1214 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1215 {
1216 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1217 remR3DmaRun(env);
1218 }
1219 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1220 {
1221 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1222 remR3TimersRun(env);
1223 }
1224}
1225#endif /* VBOX */
1226
1227/* real mode interrupt */
1228static void do_interrupt_real(int intno, int is_int, int error_code,
1229 unsigned int next_eip)
1230{
1231 SegmentCache *dt;
1232 target_ulong ptr, ssp;
1233 int selector;
1234 uint32_t offset, esp;
1235 uint32_t old_cs, old_eip;
1236
1237 /* real mode (simpler !) */
1238 dt = &env->idt;
1239 if (intno * 4 + 3 > dt->limit)
1240 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1241 ptr = dt->base + intno * 4;
1242 offset = lduw_kernel(ptr);
1243 selector = lduw_kernel(ptr + 2);
1244 esp = ESP;
1245 ssp = env->segs[R_SS].base;
1246 if (is_int)
1247 old_eip = next_eip;
1248 else
1249 old_eip = env->eip;
1250 old_cs = env->segs[R_CS].selector;
1251 /* XXX: use SS segment size ? */
1252 PUSHW(ssp, esp, 0xffff, compute_eflags());
1253 PUSHW(ssp, esp, 0xffff, old_cs);
1254 PUSHW(ssp, esp, 0xffff, old_eip);
1255
1256 /* update processor state */
1257 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1258 env->eip = offset;
1259 env->segs[R_CS].selector = selector;
1260 env->segs[R_CS].base = (selector << 4);
1261 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1262}
1263
1264/* fake user mode interrupt */
1265void do_interrupt_user(int intno, int is_int, int error_code,
1266 target_ulong next_eip)
1267{
1268 SegmentCache *dt;
1269 target_ulong ptr;
1270 int dpl, cpl;
1271 uint32_t e2;
1272
1273 dt = &env->idt;
1274 ptr = dt->base + (intno * 8);
1275 e2 = ldl_kernel(ptr + 4);
1276
1277 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1278 cpl = env->hflags & HF_CPL_MASK;
1279 /* check privledge if software int */
1280 if (is_int && dpl < cpl)
1281 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1282
1283 /* Since we emulate only user space, we cannot do more than
1284 exiting the emulation with the suitable exception and error
1285 code */
1286 if (is_int)
1287 EIP = next_eip;
1288}
1289
1290/*
1291 * Begin execution of an interruption. is_int is TRUE if coming from
1292 * the int instruction. next_eip is the EIP value AFTER the interrupt
1293 * instruction. It is only relevant if is_int is TRUE.
1294 */
1295void do_interrupt(int intno, int is_int, int error_code,
1296 target_ulong next_eip, int is_hw)
1297{
1298 if (loglevel & CPU_LOG_INT) {
1299 if ((env->cr[0] & CR0_PE_MASK)) {
1300 static int count;
1301 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1302 count, intno, error_code, is_int,
1303 env->hflags & HF_CPL_MASK,
1304 env->segs[R_CS].selector, EIP,
1305 (int)env->segs[R_CS].base + EIP,
1306 env->segs[R_SS].selector, ESP);
1307 if (intno == 0x0e) {
1308 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1309 } else {
1310 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1311 }
1312 fprintf(logfile, "\n");
1313 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1314#if 0
1315 {
1316 int i;
1317 uint8_t *ptr;
1318 fprintf(logfile, " code=");
1319 ptr = env->segs[R_CS].base + env->eip;
1320 for(i = 0; i < 16; i++) {
1321 fprintf(logfile, " %02x", ldub(ptr + i));
1322 }
1323 fprintf(logfile, "\n");
1324 }
1325#endif
1326 count++;
1327 }
1328 }
1329 if (env->cr[0] & CR0_PE_MASK) {
1330#if TARGET_X86_64
1331 if (env->hflags & HF_LMA_MASK) {
1332 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1333 } else
1334#endif
1335 {
1336#ifdef VBOX
1337 /* int xx *, v86 code and VME enabled? */
1338 if ( (env->eflags & VM_MASK)
1339 && (env->cr[4] & CR4_VME_MASK)
1340 && is_int
1341 && !is_hw
1342 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1343 )
1344 do_soft_interrupt_vme(intno, error_code, next_eip);
1345 else
1346#endif /* VBOX */
1347 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1348 }
1349 } else {
1350 do_interrupt_real(intno, is_int, error_code, next_eip);
1351 }
1352}
1353
1354/*
1355 * Signal an interruption. It is executed in the main CPU loop.
1356 * is_int is TRUE if coming from the int instruction. next_eip is the
1357 * EIP value AFTER the interrupt instruction. It is only relevant if
1358 * is_int is TRUE.
1359 */
1360void raise_interrupt(int intno, int is_int, int error_code,
1361 int next_eip_addend)
1362{
1363#if defined(VBOX) && defined(DEBUG) && !defined(DEBUG_dmik)
1364 Log2(("raise_interrupt: %x %x %x %08x\n", intno, is_int, error_code, env->eip + next_eip_addend));
1365#endif
1366 env->exception_index = intno;
1367 env->error_code = error_code;
1368 env->exception_is_int = is_int;
1369 env->exception_next_eip = env->eip + next_eip_addend;
1370 cpu_loop_exit();
1371}
1372
1373/* same as raise_exception_err, but do not restore global registers */
1374static void raise_exception_err_norestore(int exception_index, int error_code)
1375{
1376 env->exception_index = exception_index;
1377 env->error_code = error_code;
1378 env->exception_is_int = 0;
1379 env->exception_next_eip = 0;
1380 longjmp(env->jmp_env, 1);
1381}
1382
1383/* shortcuts to generate exceptions */
1384
1385void (raise_exception_err)(int exception_index, int error_code)
1386{
1387 raise_interrupt(exception_index, 0, error_code, 0);
1388}
1389
1390void raise_exception(int exception_index)
1391{
1392 raise_interrupt(exception_index, 0, 0, 0);
1393}
1394
1395/* SMM support */
1396
1397#if defined(CONFIG_USER_ONLY)
1398
1399void do_smm_enter(void)
1400{
1401}
1402
1403void helper_rsm(void)
1404{
1405}
1406
1407#else
1408
1409#ifdef TARGET_X86_64
1410#define SMM_REVISION_ID 0x00020064
1411#else
1412#define SMM_REVISION_ID 0x00020000
1413#endif
1414
1415void do_smm_enter(void)
1416{
1417#ifdef VBOX
1418 cpu_abort(env, "do_ssm_enter");
1419#else /* !VBOX */
1420 target_ulong sm_state;
1421 SegmentCache *dt;
1422 int i, offset;
1423
1424 if (loglevel & CPU_LOG_INT) {
1425 fprintf(logfile, "SMM: enter\n");
1426 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1427 }
1428
1429 env->hflags |= HF_SMM_MASK;
1430 cpu_smm_update(env);
1431
1432 sm_state = env->smbase + 0x8000;
1433
1434#ifdef TARGET_X86_64
1435 for(i = 0; i < 6; i++) {
1436 dt = &env->segs[i];
1437 offset = 0x7e00 + i * 16;
1438 stw_phys(sm_state + offset, dt->selector);
1439 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1440 stl_phys(sm_state + offset + 4, dt->limit);
1441 stq_phys(sm_state + offset + 8, dt->base);
1442 }
1443
1444 stq_phys(sm_state + 0x7e68, env->gdt.base);
1445 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1446
1447 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1448 stq_phys(sm_state + 0x7e78, env->ldt.base);
1449 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1450 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1451
1452 stq_phys(sm_state + 0x7e88, env->idt.base);
1453 stl_phys(sm_state + 0x7e84, env->idt.limit);
1454
1455 stw_phys(sm_state + 0x7e90, env->tr.selector);
1456 stq_phys(sm_state + 0x7e98, env->tr.base);
1457 stl_phys(sm_state + 0x7e94, env->tr.limit);
1458 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1459
1460 stq_phys(sm_state + 0x7ed0, env->efer);
1461
1462 stq_phys(sm_state + 0x7ff8, EAX);
1463 stq_phys(sm_state + 0x7ff0, ECX);
1464 stq_phys(sm_state + 0x7fe8, EDX);
1465 stq_phys(sm_state + 0x7fe0, EBX);
1466 stq_phys(sm_state + 0x7fd8, ESP);
1467 stq_phys(sm_state + 0x7fd0, EBP);
1468 stq_phys(sm_state + 0x7fc8, ESI);
1469 stq_phys(sm_state + 0x7fc0, EDI);
1470 for(i = 8; i < 16; i++)
1471 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1472 stq_phys(sm_state + 0x7f78, env->eip);
1473 stl_phys(sm_state + 0x7f70, compute_eflags());
1474 stl_phys(sm_state + 0x7f68, env->dr[6]);
1475 stl_phys(sm_state + 0x7f60, env->dr[7]);
1476
1477 stl_phys(sm_state + 0x7f48, env->cr[4]);
1478 stl_phys(sm_state + 0x7f50, env->cr[3]);
1479 stl_phys(sm_state + 0x7f58, env->cr[0]);
1480
1481 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1482 stl_phys(sm_state + 0x7f00, env->smbase);
1483#else
1484 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1485 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1486 stl_phys(sm_state + 0x7ff4, compute_eflags());
1487 stl_phys(sm_state + 0x7ff0, env->eip);
1488 stl_phys(sm_state + 0x7fec, EDI);
1489 stl_phys(sm_state + 0x7fe8, ESI);
1490 stl_phys(sm_state + 0x7fe4, EBP);
1491 stl_phys(sm_state + 0x7fe0, ESP);
1492 stl_phys(sm_state + 0x7fdc, EBX);
1493 stl_phys(sm_state + 0x7fd8, EDX);
1494 stl_phys(sm_state + 0x7fd4, ECX);
1495 stl_phys(sm_state + 0x7fd0, EAX);
1496 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1497 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1498
1499 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1500 stl_phys(sm_state + 0x7f64, env->tr.base);
1501 stl_phys(sm_state + 0x7f60, env->tr.limit);
1502 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1503
1504 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1505 stl_phys(sm_state + 0x7f80, env->ldt.base);
1506 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1507 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1508
1509 stl_phys(sm_state + 0x7f74, env->gdt.base);
1510 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1511
1512 stl_phys(sm_state + 0x7f58, env->idt.base);
1513 stl_phys(sm_state + 0x7f54, env->idt.limit);
1514
1515 for(i = 0; i < 6; i++) {
1516 dt = &env->segs[i];
1517 if (i < 3)
1518 offset = 0x7f84 + i * 12;
1519 else
1520 offset = 0x7f2c + (i - 3) * 12;
1521 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1522 stl_phys(sm_state + offset + 8, dt->base);
1523 stl_phys(sm_state + offset + 4, dt->limit);
1524 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1525 }
1526 stl_phys(sm_state + 0x7f14, env->cr[4]);
1527
1528 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1529 stl_phys(sm_state + 0x7ef8, env->smbase);
1530#endif
1531 /* init SMM cpu state */
1532
1533#ifdef TARGET_X86_64
1534 env->efer = 0;
1535 env->hflags &= ~HF_LMA_MASK;
1536#endif
1537 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1538 env->eip = 0x00008000;
1539 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1540 0xffffffff, 0);
1541 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1542 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1543 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1544 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1545 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1546
1547 cpu_x86_update_cr0(env,
1548 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1549 cpu_x86_update_cr4(env, 0);
1550 env->dr[7] = 0x00000400;
1551 CC_OP = CC_OP_EFLAGS;
1552#endif /* VBOX */
1553}
1554
1555void helper_rsm(void)
1556{
1557#ifdef VBOX
1558 cpu_abort(env, "helper_rsm");
1559#else /* !VBOX */
1560 target_ulong sm_state;
1561 int i, offset;
1562 uint32_t val;
1563
1564 sm_state = env->smbase + 0x8000;
1565#ifdef TARGET_X86_64
1566 env->efer = ldq_phys(sm_state + 0x7ed0);
1567 if (env->efer & MSR_EFER_LMA)
1568 env->hflags |= HF_LMA_MASK;
1569 else
1570 env->hflags &= ~HF_LMA_MASK;
1571
1572 for(i = 0; i < 6; i++) {
1573 offset = 0x7e00 + i * 16;
1574 cpu_x86_load_seg_cache(env, i,
1575 lduw_phys(sm_state + offset),
1576 ldq_phys(sm_state + offset + 8),
1577 ldl_phys(sm_state + offset + 4),
1578 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1579 }
1580
1581 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1582 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1583
1584 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1585 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1586 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1587 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1588
1589 env->idt.base = ldq_phys(sm_state + 0x7e88);
1590 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1591
1592 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1593 env->tr.base = ldq_phys(sm_state + 0x7e98);
1594 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1595 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1596
1597 EAX = ldq_phys(sm_state + 0x7ff8);
1598 ECX = ldq_phys(sm_state + 0x7ff0);
1599 EDX = ldq_phys(sm_state + 0x7fe8);
1600 EBX = ldq_phys(sm_state + 0x7fe0);
1601 ESP = ldq_phys(sm_state + 0x7fd8);
1602 EBP = ldq_phys(sm_state + 0x7fd0);
1603 ESI = ldq_phys(sm_state + 0x7fc8);
1604 EDI = ldq_phys(sm_state + 0x7fc0);
1605 for(i = 8; i < 16; i++)
1606 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1607 env->eip = ldq_phys(sm_state + 0x7f78);
1608 load_eflags(ldl_phys(sm_state + 0x7f70),
1609 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1610 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1611 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1612
1613 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1614 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1615 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1616
1617 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1618 if (val & 0x20000) {
1619 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1620 }
1621#else
1622 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1623 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1624 load_eflags(ldl_phys(sm_state + 0x7ff4),
1625 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1626 env->eip = ldl_phys(sm_state + 0x7ff0);
1627 EDI = ldl_phys(sm_state + 0x7fec);
1628 ESI = ldl_phys(sm_state + 0x7fe8);
1629 EBP = ldl_phys(sm_state + 0x7fe4);
1630 ESP = ldl_phys(sm_state + 0x7fe0);
1631 EBX = ldl_phys(sm_state + 0x7fdc);
1632 EDX = ldl_phys(sm_state + 0x7fd8);
1633 ECX = ldl_phys(sm_state + 0x7fd4);
1634 EAX = ldl_phys(sm_state + 0x7fd0);
1635 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1636 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1637
1638 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1639 env->tr.base = ldl_phys(sm_state + 0x7f64);
1640 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1641 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1642
1643 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1644 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1645 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1646 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1647
1648 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1649 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1650
1651 env->idt.base = ldl_phys(sm_state + 0x7f58);
1652 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1653
1654 for(i = 0; i < 6; i++) {
1655 if (i < 3)
1656 offset = 0x7f84 + i * 12;
1657 else
1658 offset = 0x7f2c + (i - 3) * 12;
1659 cpu_x86_load_seg_cache(env, i,
1660 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1661 ldl_phys(sm_state + offset + 8),
1662 ldl_phys(sm_state + offset + 4),
1663 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1664 }
1665 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1666
1667 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1668 if (val & 0x20000) {
1669 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1670 }
1671#endif
1672 CC_OP = CC_OP_EFLAGS;
1673 env->hflags &= ~HF_SMM_MASK;
1674 cpu_smm_update(env);
1675
1676 if (loglevel & CPU_LOG_INT) {
1677 fprintf(logfile, "SMM: after RSM\n");
1678 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1679 }
1680#endif /* !VBOX */
1681}
1682
1683#endif /* !CONFIG_USER_ONLY */
1684
1685
1686#ifdef BUGGY_GCC_DIV64
1687/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1688 call it from another function */
1689uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1690{
1691 *q_ptr = num / den;
1692 return num % den;
1693}
1694
1695int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1696{
1697 *q_ptr = num / den;
1698 return num % den;
1699}
1700#endif
1701
1702void helper_divl_EAX_T0(void)
1703{
1704 unsigned int den, r;
1705 uint64_t num, q;
1706
1707 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1708 den = T0;
1709 if (den == 0) {
1710 raise_exception(EXCP00_DIVZ);
1711 }
1712#ifdef BUGGY_GCC_DIV64
1713 r = div32(&q, num, den);
1714#else
1715 q = (num / den);
1716 r = (num % den);
1717#endif
1718 if (q > 0xffffffff)
1719 raise_exception(EXCP00_DIVZ);
1720 EAX = (uint32_t)q;
1721 EDX = (uint32_t)r;
1722}
1723
1724void helper_idivl_EAX_T0(void)
1725{
1726 int den, r;
1727 int64_t num, q;
1728
1729 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1730 den = T0;
1731 if (den == 0) {
1732 raise_exception(EXCP00_DIVZ);
1733 }
1734#ifdef BUGGY_GCC_DIV64
1735 r = idiv32(&q, num, den);
1736#else
1737 q = (num / den);
1738 r = (num % den);
1739#endif
1740 if (q != (int32_t)q)
1741 raise_exception(EXCP00_DIVZ);
1742 EAX = (uint32_t)q;
1743 EDX = (uint32_t)r;
1744}
1745
1746void helper_cmpxchg8b(void)
1747{
1748 uint64_t d;
1749 int eflags;
1750
1751 eflags = cc_table[CC_OP].compute_all();
1752 d = ldq(A0);
1753 if (d == (((uint64_t)EDX << 32) | EAX)) {
1754 stq(A0, ((uint64_t)ECX << 32) | EBX);
1755 eflags |= CC_Z;
1756 } else {
1757 EDX = d >> 32;
1758 EAX = d;
1759 eflags &= ~CC_Z;
1760 }
1761 CC_SRC = eflags;
1762}
1763
1764void helper_cpuid(void)
1765{
1766#ifndef VBOX
1767 uint32_t index;
1768 index = (uint32_t)EAX;
1769
1770 /* test if maximum index reached */
1771 if (index & 0x80000000) {
1772 if (index > env->cpuid_xlevel)
1773 index = env->cpuid_level;
1774 } else {
1775 if (index > env->cpuid_level)
1776 index = env->cpuid_level;
1777 }
1778
1779 switch(index) {
1780 case 0:
1781 EAX = env->cpuid_level;
1782 EBX = env->cpuid_vendor1;
1783 EDX = env->cpuid_vendor2;
1784 ECX = env->cpuid_vendor3;
1785 break;
1786 case 1:
1787 EAX = env->cpuid_version;
1788 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1789 ECX = env->cpuid_ext_features;
1790 EDX = env->cpuid_features;
1791 break;
1792 case 2:
1793 /* cache info: needed for Pentium Pro compatibility */
1794 EAX = 0x410601;
1795 EBX = 0;
1796 ECX = 0;
1797 EDX = 0;
1798 break;
1799 case 0x80000000:
1800 EAX = env->cpuid_xlevel;
1801 EBX = env->cpuid_vendor1;
1802 EDX = env->cpuid_vendor2;
1803 ECX = env->cpuid_vendor3;
1804 break;
1805 case 0x80000001:
1806 EAX = env->cpuid_features;
1807 EBX = 0;
1808 ECX = 0;
1809 EDX = env->cpuid_ext2_features;
1810 break;
1811 case 0x80000002:
1812 case 0x80000003:
1813 case 0x80000004:
1814 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1815 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1816 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1817 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1818 break;
1819 case 0x80000005:
1820 /* cache info (L1 cache) */
1821 EAX = 0x01ff01ff;
1822 EBX = 0x01ff01ff;
1823 ECX = 0x40020140;
1824 EDX = 0x40020140;
1825 break;
1826 case 0x80000006:
1827 /* cache info (L2 cache) */
1828 EAX = 0;
1829 EBX = 0x42004200;
1830 ECX = 0x02008140;
1831 EDX = 0;
1832 break;
1833 case 0x80000008:
1834 /* virtual & phys address size in low 2 bytes. */
1835 EAX = 0x00003028;
1836 EBX = 0;
1837 ECX = 0;
1838 EDX = 0;
1839 break;
1840 default:
1841 /* reserved values: zero */
1842 EAX = 0;
1843 EBX = 0;
1844 ECX = 0;
1845 EDX = 0;
1846 break;
1847 }
1848#else /* VBOX */
1849 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1850#endif /* VBOX */
1851}
1852
1853void helper_enter_level(int level, int data32)
1854{
1855 target_ulong ssp;
1856 uint32_t esp_mask, esp, ebp;
1857
1858 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1859 ssp = env->segs[R_SS].base;
1860 ebp = EBP;
1861 esp = ESP;
1862 if (data32) {
1863 /* 32 bit */
1864 esp -= 4;
1865 while (--level) {
1866 esp -= 4;
1867 ebp -= 4;
1868 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1869 }
1870 esp -= 4;
1871 stl(ssp + (esp & esp_mask), T1);
1872 } else {
1873 /* 16 bit */
1874 esp -= 2;
1875 while (--level) {
1876 esp -= 2;
1877 ebp -= 2;
1878 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1879 }
1880 esp -= 2;
1881 stw(ssp + (esp & esp_mask), T1);
1882 }
1883}
1884
1885#ifdef TARGET_X86_64
1886void helper_enter64_level(int level, int data64)
1887{
1888 target_ulong esp, ebp;
1889 ebp = EBP;
1890 esp = ESP;
1891
1892 if (data64) {
1893 /* 64 bit */
1894 esp -= 8;
1895 while (--level) {
1896 esp -= 8;
1897 ebp -= 8;
1898 stq(esp, ldq(ebp));
1899 }
1900 esp -= 8;
1901 stq(esp, T1);
1902 } else {
1903 /* 16 bit */
1904 esp -= 2;
1905 while (--level) {
1906 esp -= 2;
1907 ebp -= 2;
1908 stw(esp, lduw(ebp));
1909 }
1910 esp -= 2;
1911 stw(esp, T1);
1912 }
1913}
1914#endif
1915
1916void helper_lldt_T0(void)
1917{
1918 int selector;
1919 SegmentCache *dt;
1920 uint32_t e1, e2;
1921 int index, entry_limit;
1922 target_ulong ptr;
1923#ifdef VBOX
1924 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n",
1925 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1926#endif
1927
1928 selector = T0 & 0xffff;
1929 if ((selector & 0xfffc) == 0) {
1930 /* XXX: NULL selector case: invalid LDT */
1931 env->ldt.base = 0;
1932 env->ldt.limit = 0;
1933 } else {
1934 if (selector & 0x4)
1935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1936 dt = &env->gdt;
1937 index = selector & ~7;
1938#ifdef TARGET_X86_64
1939 if (env->hflags & HF_LMA_MASK)
1940 entry_limit = 15;
1941 else
1942#endif
1943 entry_limit = 7;
1944 if ((index + entry_limit) > dt->limit)
1945 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1946 ptr = dt->base + index;
1947 e1 = ldl_kernel(ptr);
1948 e2 = ldl_kernel(ptr + 4);
1949 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1950 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1951 if (!(e2 & DESC_P_MASK))
1952 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1953#ifdef TARGET_X86_64
1954 if (env->hflags & HF_LMA_MASK) {
1955 uint32_t e3;
1956 e3 = ldl_kernel(ptr + 8);
1957 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1958 env->ldt.base |= (target_ulong)e3 << 32;
1959 } else
1960#endif
1961 {
1962 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1963 }
1964 }
1965 env->ldt.selector = selector;
1966#ifdef VBOX
1967 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n",
1968 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
1969#endif
1970}
1971
1972void helper_ltr_T0(void)
1973{
1974 int selector;
1975 SegmentCache *dt;
1976 uint32_t e1, e2;
1977 int index, type, entry_limit;
1978 target_ulong ptr;
1979
1980#ifdef VBOX
1981 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
1982 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
1983 env->tr.flags, (RTSEL)(T0 & 0xffff)));
1984#endif
1985
1986 selector = T0 & 0xffff;
1987 if ((selector & 0xfffc) == 0) {
1988 /* NULL selector case: invalid TR */
1989 env->tr.base = 0;
1990 env->tr.limit = 0;
1991 env->tr.flags = 0;
1992 } else {
1993 if (selector & 0x4)
1994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1995 dt = &env->gdt;
1996 index = selector & ~7;
1997#ifdef TARGET_X86_64
1998 if (env->hflags & HF_LMA_MASK)
1999 entry_limit = 15;
2000 else
2001#endif
2002 entry_limit = 7;
2003 if ((index + entry_limit) > dt->limit)
2004 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2005 ptr = dt->base + index;
2006 e1 = ldl_kernel(ptr);
2007 e2 = ldl_kernel(ptr + 4);
2008 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2009 if ((e2 & DESC_S_MASK) ||
2010 (type != 1 && type != 9))
2011 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2012 if (!(e2 & DESC_P_MASK))
2013 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2014#ifdef TARGET_X86_64
2015 if (env->hflags & HF_LMA_MASK) {
2016 uint32_t e3;
2017 e3 = ldl_kernel(ptr + 8);
2018 load_seg_cache_raw_dt(&env->tr, e1, e2);
2019 env->tr.base |= (target_ulong)e3 << 32;
2020 } else
2021#endif
2022 {
2023 load_seg_cache_raw_dt(&env->tr, e1, e2);
2024 }
2025 e2 |= DESC_TSS_BUSY_MASK;
2026 stl_kernel(ptr + 4, e2);
2027 }
2028 env->tr.selector = selector;
2029#ifdef VBOX
2030 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2031 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2032 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2033#endif
2034}
2035
2036/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2037void load_seg(int seg_reg, int selector)
2038{
2039 uint32_t e1, e2;
2040 int cpl, dpl, rpl;
2041 SegmentCache *dt;
2042 int index;
2043 target_ulong ptr;
2044
2045 selector &= 0xffff;
2046 cpl = env->hflags & HF_CPL_MASK;
2047
2048#ifdef VBOX
2049 /* Trying to load a selector with CPL=1? */
2050 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2051 {
2052 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2053 selector = selector & 0xfffc;
2054 }
2055#endif
2056
2057 if ((selector & 0xfffc) == 0) {
2058 /* null selector case */
2059 if (seg_reg == R_SS
2060#ifdef TARGET_X86_64
2061 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2062#endif
2063 )
2064 raise_exception_err(EXCP0D_GPF, 0);
2065 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2066 } else {
2067
2068 if (selector & 0x4)
2069 dt = &env->ldt;
2070 else
2071 dt = &env->gdt;
2072 index = selector & ~7;
2073 if ((index + 7) > dt->limit)
2074 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2075 ptr = dt->base + index;
2076 e1 = ldl_kernel(ptr);
2077 e2 = ldl_kernel(ptr + 4);
2078
2079 if (!(e2 & DESC_S_MASK))
2080 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2081 rpl = selector & 3;
2082 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2083 if (seg_reg == R_SS) {
2084 /* must be writable segment */
2085 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2086 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2087 if (rpl != cpl || dpl != cpl)
2088 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2089 } else {
2090 /* must be readable segment */
2091 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2092 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093
2094 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2095 /* if not conforming code, test rights */
2096 if (dpl < cpl || dpl < rpl)
2097 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2098 }
2099 }
2100
2101 if (!(e2 & DESC_P_MASK)) {
2102 if (seg_reg == R_SS)
2103 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2104 else
2105 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2106 }
2107
2108 /* set the access bit if not already set */
2109 if (!(e2 & DESC_A_MASK)) {
2110 e2 |= DESC_A_MASK;
2111 stl_kernel(ptr + 4, e2);
2112 }
2113
2114 cpu_x86_load_seg_cache(env, seg_reg, selector,
2115 get_seg_base(e1, e2),
2116 get_seg_limit(e1, e2),
2117 e2);
2118#if 0
2119 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2120 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2121#endif
2122 }
2123}
2124
2125/* protected mode jump */
2126void helper_ljmp_protected_T0_T1(int next_eip_addend)
2127{
2128 int new_cs, gate_cs, type;
2129 uint32_t e1, e2, cpl, dpl, rpl, limit;
2130 target_ulong new_eip, next_eip;
2131
2132 new_cs = T0;
2133 new_eip = T1;
2134 if ((new_cs & 0xfffc) == 0)
2135 raise_exception_err(EXCP0D_GPF, 0);
2136 if (load_segment(&e1, &e2, new_cs) != 0)
2137 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2138 cpl = env->hflags & HF_CPL_MASK;
2139 if (e2 & DESC_S_MASK) {
2140 if (!(e2 & DESC_CS_MASK))
2141 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2142 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2143 if (e2 & DESC_C_MASK) {
2144 /* conforming code segment */
2145 if (dpl > cpl)
2146 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2147 } else {
2148 /* non conforming code segment */
2149 rpl = new_cs & 3;
2150 if (rpl > cpl)
2151 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2152 if (dpl != cpl)
2153 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2154 }
2155 if (!(e2 & DESC_P_MASK))
2156 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2157 limit = get_seg_limit(e1, e2);
2158 if (new_eip > limit &&
2159 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2160 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2161 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2162 get_seg_base(e1, e2), limit, e2);
2163 EIP = new_eip;
2164 } else {
2165 /* jump to call or task gate */
2166 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2167 rpl = new_cs & 3;
2168 cpl = env->hflags & HF_CPL_MASK;
2169 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2170 switch(type) {
2171 case 1: /* 286 TSS */
2172 case 9: /* 386 TSS */
2173 case 5: /* task gate */
2174 if (dpl < cpl || dpl < rpl)
2175 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2176 next_eip = env->eip + next_eip_addend;
2177 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2178 CC_OP = CC_OP_EFLAGS;
2179 break;
2180 case 4: /* 286 call gate */
2181 case 12: /* 386 call gate */
2182 if ((dpl < cpl) || (dpl < rpl))
2183 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2184 if (!(e2 & DESC_P_MASK))
2185 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2186 gate_cs = e1 >> 16;
2187 new_eip = (e1 & 0xffff);
2188 if (type == 12)
2189 new_eip |= (e2 & 0xffff0000);
2190 if (load_segment(&e1, &e2, gate_cs) != 0)
2191 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2193 /* must be code segment */
2194 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2195 (DESC_S_MASK | DESC_CS_MASK)))
2196 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2197 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2198 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2199 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2200 if (!(e2 & DESC_P_MASK))
2201 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2202 limit = get_seg_limit(e1, e2);
2203 if (new_eip > limit)
2204 raise_exception_err(EXCP0D_GPF, 0);
2205 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2206 get_seg_base(e1, e2), limit, e2);
2207 EIP = new_eip;
2208 break;
2209 default:
2210 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2211 break;
2212 }
2213 }
2214}
2215
2216/* real mode call */
2217void helper_lcall_real_T0_T1(int shift, int next_eip)
2218{
2219 int new_cs, new_eip;
2220 uint32_t esp, esp_mask;
2221 target_ulong ssp;
2222
2223 new_cs = T0;
2224 new_eip = T1;
2225 esp = ESP;
2226 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2227 ssp = env->segs[R_SS].base;
2228 if (shift) {
2229 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2230 PUSHL(ssp, esp, esp_mask, next_eip);
2231 } else {
2232 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2233 PUSHW(ssp, esp, esp_mask, next_eip);
2234 }
2235
2236 SET_ESP(esp, esp_mask);
2237 env->eip = new_eip;
2238 env->segs[R_CS].selector = new_cs;
2239 env->segs[R_CS].base = (new_cs << 4);
2240}
2241
2242/* protected mode call */
2243void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2244{
2245 int new_cs, new_stack, i;
2246 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2247 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2248 uint32_t val, limit, old_sp_mask;
2249 target_ulong ssp, old_ssp, next_eip, new_eip;
2250
2251 new_cs = T0;
2252 new_eip = T1;
2253 next_eip = env->eip + next_eip_addend;
2254#ifdef DEBUG_PCALL
2255 if (loglevel & CPU_LOG_PCALL) {
2256 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2257 new_cs, (uint32_t)new_eip, shift);
2258 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2259 }
2260#endif
2261 if ((new_cs & 0xfffc) == 0)
2262 raise_exception_err(EXCP0D_GPF, 0);
2263 if (load_segment(&e1, &e2, new_cs) != 0)
2264 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265 cpl = env->hflags & HF_CPL_MASK;
2266#ifdef DEBUG_PCALL
2267 if (loglevel & CPU_LOG_PCALL) {
2268 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2269 }
2270#endif
2271 if (e2 & DESC_S_MASK) {
2272 if (!(e2 & DESC_CS_MASK))
2273 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2274 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2275 if (e2 & DESC_C_MASK) {
2276 /* conforming code segment */
2277 if (dpl > cpl)
2278 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2279 } else {
2280 /* non conforming code segment */
2281 rpl = new_cs & 3;
2282 if (rpl > cpl)
2283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284 if (dpl != cpl)
2285 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2286 }
2287 if (!(e2 & DESC_P_MASK))
2288 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2289
2290#ifdef TARGET_X86_64
2291 /* XXX: check 16/32 bit cases in long mode */
2292 if (shift == 2) {
2293 target_ulong rsp;
2294 /* 64 bit case */
2295 rsp = ESP;
2296 PUSHQ(rsp, env->segs[R_CS].selector);
2297 PUSHQ(rsp, next_eip);
2298 /* from this point, not restartable */
2299 ESP = rsp;
2300 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2301 get_seg_base(e1, e2),
2302 get_seg_limit(e1, e2), e2);
2303 EIP = new_eip;
2304 } else
2305#endif
2306 {
2307 sp = ESP;
2308 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2309 ssp = env->segs[R_SS].base;
2310 if (shift) {
2311 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2312 PUSHL(ssp, sp, sp_mask, next_eip);
2313 } else {
2314 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2315 PUSHW(ssp, sp, sp_mask, next_eip);
2316 }
2317
2318 limit = get_seg_limit(e1, e2);
2319 if (new_eip > limit)
2320 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2321 /* from this point, not restartable */
2322 SET_ESP(sp, sp_mask);
2323 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2324 get_seg_base(e1, e2), limit, e2);
2325 EIP = new_eip;
2326 }
2327 } else {
2328 /* check gate type */
2329 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2330 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2331 rpl = new_cs & 3;
2332 switch(type) {
2333 case 1: /* available 286 TSS */
2334 case 9: /* available 386 TSS */
2335 case 5: /* task gate */
2336 if (dpl < cpl || dpl < rpl)
2337 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2338 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2339 CC_OP = CC_OP_EFLAGS;
2340 return;
2341 case 4: /* 286 call gate */
2342 case 12: /* 386 call gate */
2343 break;
2344 default:
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 break;
2347 }
2348 shift = type >> 3;
2349
2350 if (dpl < cpl || dpl < rpl)
2351 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2352 /* check valid bit */
2353 if (!(e2 & DESC_P_MASK))
2354 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2355 selector = e1 >> 16;
2356 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2357 param_count = e2 & 0x1f;
2358 if ((selector & 0xfffc) == 0)
2359 raise_exception_err(EXCP0D_GPF, 0);
2360
2361 if (load_segment(&e1, &e2, selector) != 0)
2362 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2363 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2364 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2365 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2366 if (dpl > cpl)
2367 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2368 if (!(e2 & DESC_P_MASK))
2369 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2370
2371 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2372 /* to inner priviledge */
2373 get_ss_esp_from_tss(&ss, &sp, dpl);
2374#ifdef DEBUG_PCALL
2375 if (loglevel & CPU_LOG_PCALL)
2376 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2377 ss, sp, param_count, ESP);
2378#endif
2379 if ((ss & 0xfffc) == 0)
2380 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2381 if ((ss & 3) != dpl)
2382 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2383 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2384 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2385 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2386 if (ss_dpl != dpl)
2387 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2388 if (!(ss_e2 & DESC_S_MASK) ||
2389 (ss_e2 & DESC_CS_MASK) ||
2390 !(ss_e2 & DESC_W_MASK))
2391 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2392 if (!(ss_e2 & DESC_P_MASK))
2393 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2394
2395 // push_size = ((param_count * 2) + 8) << shift;
2396
2397 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2398 old_ssp = env->segs[R_SS].base;
2399
2400 sp_mask = get_sp_mask(ss_e2);
2401 ssp = get_seg_base(ss_e1, ss_e2);
2402 if (shift) {
2403 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2404 PUSHL(ssp, sp, sp_mask, ESP);
2405 for(i = param_count - 1; i >= 0; i--) {
2406 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2407 PUSHL(ssp, sp, sp_mask, val);
2408 }
2409 } else {
2410 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2411 PUSHW(ssp, sp, sp_mask, ESP);
2412 for(i = param_count - 1; i >= 0; i--) {
2413 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2414 PUSHW(ssp, sp, sp_mask, val);
2415 }
2416 }
2417 new_stack = 1;
2418 } else {
2419 /* to same priviledge */
2420 sp = ESP;
2421 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2422 ssp = env->segs[R_SS].base;
2423 // push_size = (4 << shift);
2424 new_stack = 0;
2425 }
2426
2427 if (shift) {
2428 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2429 PUSHL(ssp, sp, sp_mask, next_eip);
2430 } else {
2431 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2432 PUSHW(ssp, sp, sp_mask, next_eip);
2433 }
2434
2435 /* from this point, not restartable */
2436
2437 if (new_stack) {
2438 ss = (ss & ~3) | dpl;
2439 cpu_x86_load_seg_cache(env, R_SS, ss,
2440 ssp,
2441 get_seg_limit(ss_e1, ss_e2),
2442 ss_e2);
2443 }
2444
2445 selector = (selector & ~3) | dpl;
2446 cpu_x86_load_seg_cache(env, R_CS, selector,
2447 get_seg_base(e1, e2),
2448 get_seg_limit(e1, e2),
2449 e2);
2450 cpu_x86_set_cpl(env, dpl);
2451 SET_ESP(sp, sp_mask);
2452 EIP = offset;
2453 }
2454#ifdef USE_KQEMU
2455 if (kqemu_is_ok(env)) {
2456 env->exception_index = -1;
2457 cpu_loop_exit();
2458 }
2459#endif
2460}
2461
2462/* real and vm86 mode iret */
2463void helper_iret_real(int shift)
2464{
2465 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2466 target_ulong ssp;
2467 int eflags_mask;
2468#ifdef VBOX
2469 bool fVME = false;
2470
2471 remR3TrapClear(env->pVM);
2472#endif /* VBOX */
2473
2474 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2475 sp = ESP;
2476 ssp = env->segs[R_SS].base;
2477 if (shift == 1) {
2478 /* 32 bits */
2479 POPL(ssp, sp, sp_mask, new_eip);
2480 POPL(ssp, sp, sp_mask, new_cs);
2481 new_cs &= 0xffff;
2482 POPL(ssp, sp, sp_mask, new_eflags);
2483 } else {
2484 /* 16 bits */
2485 POPW(ssp, sp, sp_mask, new_eip);
2486 POPW(ssp, sp, sp_mask, new_cs);
2487 POPW(ssp, sp, sp_mask, new_eflags);
2488 }
2489#ifdef VBOX
2490 if ( (env->eflags & VM_MASK)
2491 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2492 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2493 {
2494 fVME = true;
2495 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2496 /* if TF will be set -> #GP */
2497 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2498 || (new_eflags & TF_MASK))
2499 raise_exception(EXCP0D_GPF);
2500 }
2501#endif /* VBOX */
2502
2503 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2504 load_seg_vm(R_CS, new_cs);
2505 env->eip = new_eip;
2506#ifdef VBOX
2507 if (fVME)
2508 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2509 else
2510#endif
2511 if (env->eflags & VM_MASK)
2512 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2513 else
2514 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2515 if (shift == 0)
2516 eflags_mask &= 0xffff;
2517 load_eflags(new_eflags, eflags_mask);
2518
2519#ifdef VBOX
2520 if (fVME)
2521 {
2522 if (new_eflags & IF_MASK)
2523 env->eflags |= VIF_MASK;
2524 else
2525 env->eflags &= ~VIF_MASK;
2526 }
2527#endif /* VBOX */
2528}
2529
2530static inline void validate_seg(int seg_reg, int cpl)
2531{
2532 int dpl;
2533 uint32_t e2;
2534
2535 /* XXX: on x86_64, we do not want to nullify FS and GS because
2536 they may still contain a valid base. I would be interested to
2537 know how a real x86_64 CPU behaves */
2538 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2539 (env->segs[seg_reg].selector & 0xfffc) == 0)
2540 return;
2541
2542 e2 = env->segs[seg_reg].flags;
2543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2544 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2545 /* data or non conforming code segment */
2546 if (dpl < cpl) {
2547 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2548 }
2549 }
2550}
2551
2552/* protected mode iret */
2553static inline void helper_ret_protected(int shift, int is_iret, int addend)
2554{
2555 uint32_t new_cs, new_eflags, new_ss;
2556 uint32_t new_es, new_ds, new_fs, new_gs;
2557 uint32_t e1, e2, ss_e1, ss_e2;
2558 int cpl, dpl, rpl, eflags_mask, iopl;
2559 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2560
2561#ifdef TARGET_X86_64
2562 if (shift == 2)
2563 sp_mask = -1;
2564 else
2565#endif
2566 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2567 sp = ESP;
2568 ssp = env->segs[R_SS].base;
2569 new_eflags = 0; /* avoid warning */
2570#ifdef TARGET_X86_64
2571 if (shift == 2) {
2572 POPQ(sp, new_eip);
2573 POPQ(sp, new_cs);
2574 new_cs &= 0xffff;
2575 if (is_iret) {
2576 POPQ(sp, new_eflags);
2577 }
2578 } else
2579#endif
2580 if (shift == 1) {
2581 /* 32 bits */
2582 POPL(ssp, sp, sp_mask, new_eip);
2583 POPL(ssp, sp, sp_mask, new_cs);
2584 new_cs &= 0xffff;
2585 if (is_iret) {
2586 POPL(ssp, sp, sp_mask, new_eflags);
2587#if defined(VBOX) && defined(DEBUG)
2588 printf("iret: new CS %04X\n", new_cs);
2589 printf("iret: new EIP %08X\n", new_eip);
2590 printf("iret: new EFLAGS %08X\n", new_eflags);
2591 printf("iret: EAX=%08x\n", EAX);
2592#endif
2593
2594 if (new_eflags & VM_MASK)
2595 goto return_to_vm86;
2596 }
2597#ifdef VBOX
2598 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2599 {
2600#ifdef DEBUG
2601 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2602#endif
2603 new_cs = new_cs & 0xfffc;
2604 }
2605#endif
2606 } else {
2607 /* 16 bits */
2608 POPW(ssp, sp, sp_mask, new_eip);
2609 POPW(ssp, sp, sp_mask, new_cs);
2610 if (is_iret)
2611 POPW(ssp, sp, sp_mask, new_eflags);
2612 }
2613#ifdef DEBUG_PCALL
2614 if (loglevel & CPU_LOG_PCALL) {
2615 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2616 new_cs, new_eip, shift, addend);
2617 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2618 }
2619#endif
2620 if ((new_cs & 0xfffc) == 0)
2621 {
2622#if defined(VBOX) && defined(DEBUG)
2623 printf("new_cs & 0xfffc) == 0\n");
2624#endif
2625 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626 }
2627 if (load_segment(&e1, &e2, new_cs) != 0)
2628 {
2629#if defined(VBOX) && defined(DEBUG)
2630 printf("load_segment failed\n");
2631#endif
2632 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2633 }
2634 if (!(e2 & DESC_S_MASK) ||
2635 !(e2 & DESC_CS_MASK))
2636 {
2637#if defined(VBOX) && defined(DEBUG)
2638 printf("e2 mask %08x\n", e2);
2639#endif
2640 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2641 }
2642 cpl = env->hflags & HF_CPL_MASK;
2643 rpl = new_cs & 3;
2644 if (rpl < cpl)
2645 {
2646#if defined(VBOX) && defined(DEBUG)
2647 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2648#endif
2649 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2650 }
2651 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2652 if (e2 & DESC_C_MASK) {
2653 if (dpl > rpl)
2654 {
2655#if defined(VBOX) && defined(DEBUG)
2656 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2657#endif
2658 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2659 }
2660 } else {
2661 if (dpl != rpl)
2662 {
2663#if defined(VBOX) && defined(DEBUG)
2664 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2665#endif
2666 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2667 }
2668 }
2669 if (!(e2 & DESC_P_MASK))
2670 {
2671#if defined(VBOX) && defined(DEBUG)
2672 printf("DESC_P_MASK e2=%08x\n", e2);
2673#endif
2674 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2675 }
2676 sp += addend;
2677 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2678 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2679 /* return to same priledge level */
2680 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2681 get_seg_base(e1, e2),
2682 get_seg_limit(e1, e2),
2683 e2);
2684 } else {
2685 /* return to different priviledge level */
2686#ifdef TARGET_X86_64
2687 if (shift == 2) {
2688 POPQ(sp, new_esp);
2689 POPQ(sp, new_ss);
2690 new_ss &= 0xffff;
2691 } else
2692#endif
2693 if (shift == 1) {
2694 /* 32 bits */
2695 POPL(ssp, sp, sp_mask, new_esp);
2696 POPL(ssp, sp, sp_mask, new_ss);
2697 new_ss &= 0xffff;
2698 } else {
2699 /* 16 bits */
2700 POPW(ssp, sp, sp_mask, new_esp);
2701 POPW(ssp, sp, sp_mask, new_ss);
2702 }
2703#ifdef DEBUG_PCALL
2704 if (loglevel & CPU_LOG_PCALL) {
2705 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2706 new_ss, new_esp);
2707 }
2708#endif
2709 if ((new_ss & 0xfffc) == 0) {
2710#ifdef TARGET_X86_64
2711 /* NULL ss is allowed in long mode if cpl != 3*/
2712 /* XXX: test CS64 ? */
2713 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2714 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2715 0, 0xffffffff,
2716 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2717 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2718 DESC_W_MASK | DESC_A_MASK);
2719 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2720 } else
2721#endif
2722 {
2723 raise_exception_err(EXCP0D_GPF, 0);
2724 }
2725 } else {
2726 if ((new_ss & 3) != rpl)
2727 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2728 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2729 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2730 if (!(ss_e2 & DESC_S_MASK) ||
2731 (ss_e2 & DESC_CS_MASK) ||
2732 !(ss_e2 & DESC_W_MASK))
2733 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2734 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2735 if (dpl != rpl)
2736 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2737 if (!(ss_e2 & DESC_P_MASK))
2738 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2739 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2740 get_seg_base(ss_e1, ss_e2),
2741 get_seg_limit(ss_e1, ss_e2),
2742 ss_e2);
2743 }
2744
2745 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2746 get_seg_base(e1, e2),
2747 get_seg_limit(e1, e2),
2748 e2);
2749 cpu_x86_set_cpl(env, rpl);
2750 sp = new_esp;
2751#ifdef TARGET_X86_64
2752 if (env->hflags & HF_CS64_MASK)
2753 sp_mask = -1;
2754 else
2755#endif
2756 sp_mask = get_sp_mask(ss_e2);
2757
2758 /* validate data segments */
2759 validate_seg(R_ES, rpl);
2760 validate_seg(R_DS, rpl);
2761 validate_seg(R_FS, rpl);
2762 validate_seg(R_GS, rpl);
2763
2764 sp += addend;
2765 }
2766 SET_ESP(sp, sp_mask);
2767 env->eip = new_eip;
2768 if (is_iret) {
2769 /* NOTE: 'cpl' is the _old_ CPL */
2770 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2771 if (cpl == 0)
2772#ifdef VBOX
2773 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2774#else
2775 eflags_mask |= IOPL_MASK;
2776#endif
2777 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2778 if (cpl <= iopl)
2779 eflags_mask |= IF_MASK;
2780 if (shift == 0)
2781 eflags_mask &= 0xffff;
2782 load_eflags(new_eflags, eflags_mask);
2783 }
2784 return;
2785
2786 return_to_vm86:
2787
2788#if 0 // defined(VBOX) && defined(DEBUG)
2789 printf("V86: new CS %04X\n", new_cs);
2790 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2791 printf("V86: new EIP %08X\n", new_eip);
2792 printf("V86: new EFLAGS %08X\n", new_eflags);
2793#endif
2794
2795 POPL(ssp, sp, sp_mask, new_esp);
2796 POPL(ssp, sp, sp_mask, new_ss);
2797 POPL(ssp, sp, sp_mask, new_es);
2798 POPL(ssp, sp, sp_mask, new_ds);
2799 POPL(ssp, sp, sp_mask, new_fs);
2800 POPL(ssp, sp, sp_mask, new_gs);
2801
2802 /* modify processor state */
2803 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2804 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2805 load_seg_vm(R_CS, new_cs & 0xffff);
2806 cpu_x86_set_cpl(env, 3);
2807 load_seg_vm(R_SS, new_ss & 0xffff);
2808 load_seg_vm(R_ES, new_es & 0xffff);
2809 load_seg_vm(R_DS, new_ds & 0xffff);
2810 load_seg_vm(R_FS, new_fs & 0xffff);
2811 load_seg_vm(R_GS, new_gs & 0xffff);
2812
2813 env->eip = new_eip & 0xffff;
2814 ESP = new_esp;
2815}
2816
2817void helper_iret_protected(int shift, int next_eip)
2818{
2819 int tss_selector, type;
2820 uint32_t e1, e2;
2821
2822#ifdef VBOX
2823 remR3TrapClear(env->pVM);
2824#endif
2825
2826 /* specific case for TSS */
2827 if (env->eflags & NT_MASK) {
2828#ifdef TARGET_X86_64
2829 if (env->hflags & HF_LMA_MASK)
2830 raise_exception_err(EXCP0D_GPF, 0);
2831#endif
2832 tss_selector = lduw_kernel(env->tr.base + 0);
2833 if (tss_selector & 4)
2834 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2835 if (load_segment(&e1, &e2, tss_selector) != 0)
2836 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2837 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2838 /* NOTE: we check both segment and busy TSS */
2839 if (type != 3)
2840 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2841 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2842 } else {
2843 helper_ret_protected(shift, 1, 0);
2844 }
2845#ifdef USE_KQEMU
2846 if (kqemu_is_ok(env)) {
2847 CC_OP = CC_OP_EFLAGS;
2848 env->exception_index = -1;
2849 cpu_loop_exit();
2850 }
2851#endif
2852}
2853
2854void helper_lret_protected(int shift, int addend)
2855{
2856 helper_ret_protected(shift, 0, addend);
2857#ifdef USE_KQEMU
2858 if (kqemu_is_ok(env)) {
2859 env->exception_index = -1;
2860 cpu_loop_exit();
2861 }
2862#endif
2863}
2864
2865void helper_sysenter(void)
2866{
2867 if (env->sysenter_cs == 0) {
2868 raise_exception_err(EXCP0D_GPF, 0);
2869 }
2870 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2871 cpu_x86_set_cpl(env, 0);
2872 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2873 0, 0xffffffff,
2874 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2875 DESC_S_MASK |
2876 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2877 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2878 0, 0xffffffff,
2879 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2880 DESC_S_MASK |
2881 DESC_W_MASK | DESC_A_MASK);
2882 ESP = env->sysenter_esp;
2883 EIP = env->sysenter_eip;
2884}
2885
2886void helper_sysexit(void)
2887{
2888 int cpl;
2889
2890 cpl = env->hflags & HF_CPL_MASK;
2891 if (env->sysenter_cs == 0 || cpl != 0) {
2892 raise_exception_err(EXCP0D_GPF, 0);
2893 }
2894 cpu_x86_set_cpl(env, 3);
2895 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2896 0, 0xffffffff,
2897 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2898 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2899 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2900 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2901 0, 0xffffffff,
2902 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2903 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2904 DESC_W_MASK | DESC_A_MASK);
2905 ESP = ECX;
2906 EIP = EDX;
2907#ifdef USE_KQEMU
2908 if (kqemu_is_ok(env)) {
2909 env->exception_index = -1;
2910 cpu_loop_exit();
2911 }
2912#endif
2913}
2914
2915void helper_movl_crN_T0(int reg)
2916{
2917#if !defined(CONFIG_USER_ONLY)
2918 switch(reg) {
2919 case 0:
2920 cpu_x86_update_cr0(env, T0);
2921 break;
2922 case 3:
2923 cpu_x86_update_cr3(env, T0);
2924 break;
2925 case 4:
2926 cpu_x86_update_cr4(env, T0);
2927 break;
2928 case 8:
2929 cpu_set_apic_tpr(env, T0);
2930 break;
2931 default:
2932 env->cr[reg] = T0;
2933 break;
2934 }
2935#endif
2936}
2937
2938/* XXX: do more */
2939void helper_movl_drN_T0(int reg)
2940{
2941 env->dr[reg] = T0;
2942}
2943
2944void helper_invlpg(target_ulong addr)
2945{
2946 cpu_x86_flush_tlb(env, addr);
2947}
2948
2949void helper_rdtsc(void)
2950{
2951 uint64_t val;
2952
2953 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2954 raise_exception(EXCP0D_GPF);
2955 }
2956 val = cpu_get_tsc(env);
2957 EAX = (uint32_t)(val);
2958 EDX = (uint32_t)(val >> 32);
2959}
2960
2961#if defined(CONFIG_USER_ONLY)
2962void helper_wrmsr(void)
2963{
2964}
2965
2966void helper_rdmsr(void)
2967{
2968}
2969#else
2970void helper_wrmsr(void)
2971{
2972 uint64_t val;
2973
2974 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2975
2976 switch((uint32_t)ECX) {
2977 case MSR_IA32_SYSENTER_CS:
2978 env->sysenter_cs = val & 0xffff;
2979 break;
2980 case MSR_IA32_SYSENTER_ESP:
2981 env->sysenter_esp = val;
2982 break;
2983 case MSR_IA32_SYSENTER_EIP:
2984 env->sysenter_eip = val;
2985 break;
2986 case MSR_IA32_APICBASE:
2987 cpu_set_apic_base(env, val);
2988 break;
2989 case MSR_EFER:
2990 {
2991 uint64_t update_mask;
2992 update_mask = 0;
2993 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2994 update_mask |= MSR_EFER_SCE;
2995 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2996 update_mask |= MSR_EFER_LME;
2997 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2998 update_mask |= MSR_EFER_FFXSR;
2999 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3000 update_mask |= MSR_EFER_NXE;
3001 env->efer = (env->efer & ~update_mask) |
3002 (val & update_mask);
3003 }
3004 break;
3005 case MSR_STAR:
3006 env->star = val;
3007 break;
3008 case MSR_PAT:
3009 env->pat = val;
3010 break;
3011#ifdef TARGET_X86_64
3012 case MSR_LSTAR:
3013 env->lstar = val;
3014 break;
3015 case MSR_CSTAR:
3016 env->cstar = val;
3017 break;
3018 case MSR_FMASK:
3019 env->fmask = val;
3020 break;
3021 case MSR_FSBASE:
3022 env->segs[R_FS].base = val;
3023 break;
3024 case MSR_GSBASE:
3025 env->segs[R_GS].base = val;
3026 break;
3027 case MSR_KERNELGSBASE:
3028 env->kernelgsbase = val;
3029 break;
3030#endif
3031 default:
3032 /* XXX: exception ? */
3033 break;
3034 }
3035}
3036
3037void helper_rdmsr(void)
3038{
3039 uint64_t val;
3040 switch((uint32_t)ECX) {
3041 case MSR_IA32_SYSENTER_CS:
3042 val = env->sysenter_cs;
3043 break;
3044 case MSR_IA32_SYSENTER_ESP:
3045 val = env->sysenter_esp;
3046 break;
3047 case MSR_IA32_SYSENTER_EIP:
3048 val = env->sysenter_eip;
3049 break;
3050 case MSR_IA32_APICBASE:
3051 val = cpu_get_apic_base(env);
3052 break;
3053 case MSR_EFER:
3054 val = env->efer;
3055 break;
3056 case MSR_STAR:
3057 val = env->star;
3058 break;
3059 case MSR_PAT:
3060 val = env->pat;
3061 break;
3062#ifdef TARGET_X86_64
3063 case MSR_LSTAR:
3064 val = env->lstar;
3065 break;
3066 case MSR_CSTAR:
3067 val = env->cstar;
3068 break;
3069 case MSR_FMASK:
3070 val = env->fmask;
3071 break;
3072 case MSR_FSBASE:
3073 val = env->segs[R_FS].base;
3074 break;
3075 case MSR_GSBASE:
3076 val = env->segs[R_GS].base;
3077 break;
3078 case MSR_KERNELGSBASE:
3079 val = env->kernelgsbase;
3080 break;
3081#endif
3082 default:
3083 /* XXX: exception ? */
3084 val = 0;
3085 break;
3086 }
3087 EAX = (uint32_t)(val);
3088 EDX = (uint32_t)(val >> 32);
3089}
3090#endif
3091
3092void helper_lsl(void)
3093{
3094 unsigned int selector, limit;
3095 uint32_t e1, e2, eflags;
3096 int rpl, dpl, cpl, type;
3097
3098 eflags = cc_table[CC_OP].compute_all();
3099 selector = T0 & 0xffff;
3100 if (load_segment(&e1, &e2, selector) != 0)
3101 goto fail;
3102 rpl = selector & 3;
3103 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3104 cpl = env->hflags & HF_CPL_MASK;
3105 if (e2 & DESC_S_MASK) {
3106 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3107 /* conforming */
3108 } else {
3109 if (dpl < cpl || dpl < rpl)
3110 goto fail;
3111 }
3112 } else {
3113 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3114 switch(type) {
3115 case 1:
3116 case 2:
3117 case 3:
3118 case 9:
3119 case 11:
3120 break;
3121 default:
3122 goto fail;
3123 }
3124 if (dpl < cpl || dpl < rpl) {
3125 fail:
3126 CC_SRC = eflags & ~CC_Z;
3127 return;
3128 }
3129 }
3130 limit = get_seg_limit(e1, e2);
3131 T1 = limit;
3132 CC_SRC = eflags | CC_Z;
3133}
3134
3135void helper_lar(void)
3136{
3137 unsigned int selector;
3138 uint32_t e1, e2, eflags;
3139 int rpl, dpl, cpl, type;
3140
3141 eflags = cc_table[CC_OP].compute_all();
3142 selector = T0 & 0xffff;
3143 if ((selector & 0xfffc) == 0)
3144 goto fail;
3145 if (load_segment(&e1, &e2, selector) != 0)
3146 goto fail;
3147 rpl = selector & 3;
3148 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3149 cpl = env->hflags & HF_CPL_MASK;
3150 if (e2 & DESC_S_MASK) {
3151 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3152 /* conforming */
3153 } else {
3154 if (dpl < cpl || dpl < rpl)
3155 goto fail;
3156 }
3157 } else {
3158 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3159 switch(type) {
3160 case 1:
3161 case 2:
3162 case 3:
3163 case 4:
3164 case 5:
3165 case 9:
3166 case 11:
3167 case 12:
3168 break;
3169 default:
3170 goto fail;
3171 }
3172 if (dpl < cpl || dpl < rpl) {
3173 fail:
3174 CC_SRC = eflags & ~CC_Z;
3175 return;
3176 }
3177 }
3178 T1 = e2 & 0x00f0ff00;
3179 CC_SRC = eflags | CC_Z;
3180}
3181
3182void helper_verr(void)
3183{
3184 unsigned int selector;
3185 uint32_t e1, e2, eflags;
3186 int rpl, dpl, cpl;
3187
3188 eflags = cc_table[CC_OP].compute_all();
3189 selector = T0 & 0xffff;
3190 if ((selector & 0xfffc) == 0)
3191 goto fail;
3192 if (load_segment(&e1, &e2, selector) != 0)
3193 goto fail;
3194 if (!(e2 & DESC_S_MASK))
3195 goto fail;
3196 rpl = selector & 3;
3197 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3198 cpl = env->hflags & HF_CPL_MASK;
3199 if (e2 & DESC_CS_MASK) {
3200 if (!(e2 & DESC_R_MASK))
3201 goto fail;
3202 if (!(e2 & DESC_C_MASK)) {
3203 if (dpl < cpl || dpl < rpl)
3204 goto fail;
3205 }
3206 } else {
3207 if (dpl < cpl || dpl < rpl) {
3208 fail:
3209 CC_SRC = eflags & ~CC_Z;
3210 return;
3211 }
3212 }
3213 CC_SRC = eflags | CC_Z;
3214}
3215
3216void helper_verw(void)
3217{
3218 unsigned int selector;
3219 uint32_t e1, e2, eflags;
3220 int rpl, dpl, cpl;
3221
3222 eflags = cc_table[CC_OP].compute_all();
3223 selector = T0 & 0xffff;
3224 if ((selector & 0xfffc) == 0)
3225 goto fail;
3226 if (load_segment(&e1, &e2, selector) != 0)
3227 goto fail;
3228 if (!(e2 & DESC_S_MASK))
3229 goto fail;
3230 rpl = selector & 3;
3231 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3232 cpl = env->hflags & HF_CPL_MASK;
3233 if (e2 & DESC_CS_MASK) {
3234 goto fail;
3235 } else {
3236 if (dpl < cpl || dpl < rpl)
3237 goto fail;
3238 if (!(e2 & DESC_W_MASK)) {
3239 fail:
3240 CC_SRC = eflags & ~CC_Z;
3241 return;
3242 }
3243 }
3244 CC_SRC = eflags | CC_Z;
3245}
3246
3247/* FPU helpers */
3248
3249void helper_fldt_ST0_A0(void)
3250{
3251 int new_fpstt;
3252 new_fpstt = (env->fpstt - 1) & 7;
3253 env->fpregs[new_fpstt].d = helper_fldt(A0);
3254 env->fpstt = new_fpstt;
3255 env->fptags[new_fpstt] = 0; /* validate stack entry */
3256}
3257
3258void helper_fstt_ST0_A0(void)
3259{
3260 helper_fstt(ST0, A0);
3261}
3262
3263void fpu_set_exception(int mask)
3264{
3265 env->fpus |= mask;
3266 if (env->fpus & (~env->fpuc & FPUC_EM))
3267 env->fpus |= FPUS_SE | FPUS_B;
3268}
3269
3270CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3271{
3272 if (b == 0.0)
3273 fpu_set_exception(FPUS_ZE);
3274 return a / b;
3275}
3276
3277void fpu_raise_exception(void)
3278{
3279 if (env->cr[0] & CR0_NE_MASK) {
3280 raise_exception(EXCP10_COPR);
3281 }
3282#if !defined(CONFIG_USER_ONLY)
3283 else {
3284 cpu_set_ferr(env);
3285 }
3286#endif
3287}
3288
3289/* BCD ops */
3290
3291void helper_fbld_ST0_A0(void)
3292{
3293 CPU86_LDouble tmp;
3294 uint64_t val;
3295 unsigned int v;
3296 int i;
3297
3298 val = 0;
3299 for(i = 8; i >= 0; i--) {
3300 v = ldub(A0 + i);
3301 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3302 }
3303 tmp = val;
3304 if (ldub(A0 + 9) & 0x80)
3305 tmp = -tmp;
3306 fpush();
3307 ST0 = tmp;
3308}
3309
3310void helper_fbst_ST0_A0(void)
3311{
3312 int v;
3313 target_ulong mem_ref, mem_end;
3314 int64_t val;
3315
3316 val = floatx_to_int64(ST0, &env->fp_status);
3317 mem_ref = A0;
3318 mem_end = mem_ref + 9;
3319 if (val < 0) {
3320 stb(mem_end, 0x80);
3321 val = -val;
3322 } else {
3323 stb(mem_end, 0x00);
3324 }
3325 while (mem_ref < mem_end) {
3326 if (val == 0)
3327 break;
3328 v = val % 100;
3329 val = val / 100;
3330 v = ((v / 10) << 4) | (v % 10);
3331 stb(mem_ref++, v);
3332 }
3333 while (mem_ref < mem_end) {
3334 stb(mem_ref++, 0);
3335 }
3336}
3337
3338void helper_f2xm1(void)
3339{
3340 ST0 = pow(2.0,ST0) - 1.0;
3341}
3342
3343void helper_fyl2x(void)
3344{
3345 CPU86_LDouble fptemp;
3346
3347 fptemp = ST0;
3348 if (fptemp>0.0){
3349 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3350 ST1 *= fptemp;
3351 fpop();
3352 } else {
3353 env->fpus &= (~0x4700);
3354 env->fpus |= 0x400;
3355 }
3356}
3357
3358void helper_fptan(void)
3359{
3360 CPU86_LDouble fptemp;
3361
3362 fptemp = ST0;
3363 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3364 env->fpus |= 0x400;
3365 } else {
3366 ST0 = tan(fptemp);
3367 fpush();
3368 ST0 = 1.0;
3369 env->fpus &= (~0x400); /* C2 <-- 0 */
3370 /* the above code is for |arg| < 2**52 only */
3371 }
3372}
3373
3374void helper_fpatan(void)
3375{
3376 CPU86_LDouble fptemp, fpsrcop;
3377
3378 fpsrcop = ST1;
3379 fptemp = ST0;
3380 ST1 = atan2(fpsrcop,fptemp);
3381 fpop();
3382}
3383
3384void helper_fxtract(void)
3385{
3386 CPU86_LDoubleU temp;
3387 unsigned int expdif;
3388
3389 temp.d = ST0;
3390 expdif = EXPD(temp) - EXPBIAS;
3391 /*DP exponent bias*/
3392 ST0 = expdif;
3393 fpush();
3394 BIASEXPONENT(temp);
3395 ST0 = temp.d;
3396}
3397
3398void helper_fprem1(void)
3399{
3400 CPU86_LDouble dblq, fpsrcop, fptemp;
3401 CPU86_LDoubleU fpsrcop1, fptemp1;
3402 int expdif;
3403 int q;
3404
3405 fpsrcop = ST0;
3406 fptemp = ST1;
3407 fpsrcop1.d = fpsrcop;
3408 fptemp1.d = fptemp;
3409 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3410 if (expdif < 53) {
3411 dblq = fpsrcop / fptemp;
3412 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3413 ST0 = fpsrcop - fptemp*dblq;
3414 q = (int)dblq; /* cutting off top bits is assumed here */
3415 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3416 /* (C0,C1,C3) <-- (q2,q1,q0) */
3417 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3418 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3419 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3420 } else {
3421 env->fpus |= 0x400; /* C2 <-- 1 */
3422 fptemp = pow(2.0, expdif-50);
3423 fpsrcop = (ST0 / ST1) / fptemp;
3424 /* fpsrcop = integer obtained by rounding to the nearest */
3425 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3426 floor(fpsrcop): ceil(fpsrcop);
3427 ST0 -= (ST1 * fpsrcop * fptemp);
3428 }
3429}
3430
3431void helper_fprem(void)
3432{
3433 CPU86_LDouble dblq, fpsrcop, fptemp;
3434 CPU86_LDoubleU fpsrcop1, fptemp1;
3435 int expdif;
3436 int q;
3437
3438 fpsrcop = ST0;
3439 fptemp = ST1;
3440 fpsrcop1.d = fpsrcop;
3441 fptemp1.d = fptemp;
3442 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3443 if ( expdif < 53 ) {
3444 dblq = fpsrcop / fptemp;
3445 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3446 ST0 = fpsrcop - fptemp*dblq;
3447 q = (int)dblq; /* cutting off top bits is assumed here */
3448 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3449 /* (C0,C1,C3) <-- (q2,q1,q0) */
3450 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3451 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3452 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3453 } else {
3454 env->fpus |= 0x400; /* C2 <-- 1 */
3455 fptemp = pow(2.0, expdif-50);
3456 fpsrcop = (ST0 / ST1) / fptemp;
3457 /* fpsrcop = integer obtained by chopping */
3458 fpsrcop = (fpsrcop < 0.0)?
3459 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3460 ST0 -= (ST1 * fpsrcop * fptemp);
3461 }
3462}
3463
3464void helper_fyl2xp1(void)
3465{
3466 CPU86_LDouble fptemp;
3467
3468 fptemp = ST0;
3469 if ((fptemp+1.0)>0.0) {
3470 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3471 ST1 *= fptemp;
3472 fpop();
3473 } else {
3474 env->fpus &= (~0x4700);
3475 env->fpus |= 0x400;
3476 }
3477}
3478
3479void helper_fsqrt(void)
3480{
3481 CPU86_LDouble fptemp;
3482
3483 fptemp = ST0;
3484 if (fptemp<0.0) {
3485 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3486 env->fpus |= 0x400;
3487 }
3488 ST0 = sqrt(fptemp);
3489}
3490
3491void helper_fsincos(void)
3492{
3493 CPU86_LDouble fptemp;
3494
3495 fptemp = ST0;
3496 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3497 env->fpus |= 0x400;
3498 } else {
3499 ST0 = sin(fptemp);
3500 fpush();
3501 ST0 = cos(fptemp);
3502 env->fpus &= (~0x400); /* C2 <-- 0 */
3503 /* the above code is for |arg| < 2**63 only */
3504 }
3505}
3506
3507void helper_frndint(void)
3508{
3509 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3510}
3511
3512void helper_fscale(void)
3513{
3514 ST0 = ldexp (ST0, (int)(ST1));
3515}
3516
3517void helper_fsin(void)
3518{
3519 CPU86_LDouble fptemp;
3520
3521 fptemp = ST0;
3522 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3523 env->fpus |= 0x400;
3524 } else {
3525 ST0 = sin(fptemp);
3526 env->fpus &= (~0x400); /* C2 <-- 0 */
3527 /* the above code is for |arg| < 2**53 only */
3528 }
3529}
3530
3531void helper_fcos(void)
3532{
3533 CPU86_LDouble fptemp;
3534
3535 fptemp = ST0;
3536 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3537 env->fpus |= 0x400;
3538 } else {
3539 ST0 = cos(fptemp);
3540 env->fpus &= (~0x400); /* C2 <-- 0 */
3541 /* the above code is for |arg5 < 2**63 only */
3542 }
3543}
3544
3545void helper_fxam_ST0(void)
3546{
3547 CPU86_LDoubleU temp;
3548 int expdif;
3549
3550 temp.d = ST0;
3551
3552 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3553 if (SIGND(temp))
3554 env->fpus |= 0x200; /* C1 <-- 1 */
3555
3556 /* XXX: test fptags too */
3557 expdif = EXPD(temp);
3558 if (expdif == MAXEXPD) {
3559#ifdef USE_X86LDOUBLE
3560 if (MANTD(temp) == 0x8000000000000000ULL)
3561#else
3562 if (MANTD(temp) == 0)
3563#endif
3564 env->fpus |= 0x500 /*Infinity*/;
3565 else
3566 env->fpus |= 0x100 /*NaN*/;
3567 } else if (expdif == 0) {
3568 if (MANTD(temp) == 0)
3569 env->fpus |= 0x4000 /*Zero*/;
3570 else
3571 env->fpus |= 0x4400 /*Denormal*/;
3572 } else {
3573 env->fpus |= 0x400;
3574 }
3575}
3576
3577void helper_fstenv(target_ulong ptr, int data32)
3578{
3579 int fpus, fptag, exp, i;
3580 uint64_t mant;
3581 CPU86_LDoubleU tmp;
3582
3583 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3584 fptag = 0;
3585 for (i=7; i>=0; i--) {
3586 fptag <<= 2;
3587 if (env->fptags[i]) {
3588 fptag |= 3;
3589 } else {
3590 tmp.d = env->fpregs[i].d;
3591 exp = EXPD(tmp);
3592 mant = MANTD(tmp);
3593 if (exp == 0 && mant == 0) {
3594 /* zero */
3595 fptag |= 1;
3596 } else if (exp == 0 || exp == MAXEXPD
3597#ifdef USE_X86LDOUBLE
3598 || (mant & (1LL << 63)) == 0
3599#endif
3600 ) {
3601 /* NaNs, infinity, denormal */
3602 fptag |= 2;
3603 }
3604 }
3605 }
3606 if (data32) {
3607 /* 32 bit */
3608 stl(ptr, env->fpuc);
3609 stl(ptr + 4, fpus);
3610 stl(ptr + 8, fptag);
3611 stl(ptr + 12, 0); /* fpip */
3612 stl(ptr + 16, 0); /* fpcs */
3613 stl(ptr + 20, 0); /* fpoo */
3614 stl(ptr + 24, 0); /* fpos */
3615 } else {
3616 /* 16 bit */
3617 stw(ptr, env->fpuc);
3618 stw(ptr + 2, fpus);
3619 stw(ptr + 4, fptag);
3620 stw(ptr + 6, 0);
3621 stw(ptr + 8, 0);
3622 stw(ptr + 10, 0);
3623 stw(ptr + 12, 0);
3624 }
3625}
3626
3627void helper_fldenv(target_ulong ptr, int data32)
3628{
3629 int i, fpus, fptag;
3630
3631 if (data32) {
3632 env->fpuc = lduw(ptr);
3633 fpus = lduw(ptr + 4);
3634 fptag = lduw(ptr + 8);
3635 }
3636 else {
3637 env->fpuc = lduw(ptr);
3638 fpus = lduw(ptr + 2);
3639 fptag = lduw(ptr + 4);
3640 }
3641 env->fpstt = (fpus >> 11) & 7;
3642 env->fpus = fpus & ~0x3800;
3643 for(i = 0;i < 8; i++) {
3644 env->fptags[i] = ((fptag & 3) == 3);
3645 fptag >>= 2;
3646 }
3647}
3648
3649void helper_fsave(target_ulong ptr, int data32)
3650{
3651 CPU86_LDouble tmp;
3652 int i;
3653
3654 helper_fstenv(ptr, data32);
3655
3656 ptr += (14 << data32);
3657 for(i = 0;i < 8; i++) {
3658 tmp = ST(i);
3659 helper_fstt(tmp, ptr);
3660 ptr += 10;
3661 }
3662
3663 /* fninit */
3664 env->fpus = 0;
3665 env->fpstt = 0;
3666 env->fpuc = 0x37f;
3667 env->fptags[0] = 1;
3668 env->fptags[1] = 1;
3669 env->fptags[2] = 1;
3670 env->fptags[3] = 1;
3671 env->fptags[4] = 1;
3672 env->fptags[5] = 1;
3673 env->fptags[6] = 1;
3674 env->fptags[7] = 1;
3675}
3676
3677void helper_frstor(target_ulong ptr, int data32)
3678{
3679 CPU86_LDouble tmp;
3680 int i;
3681
3682 helper_fldenv(ptr, data32);
3683 ptr += (14 << data32);
3684
3685 for(i = 0;i < 8; i++) {
3686 tmp = helper_fldt(ptr);
3687 ST(i) = tmp;
3688 ptr += 10;
3689 }
3690}
3691
3692void helper_fxsave(target_ulong ptr, int data64)
3693{
3694 int fpus, fptag, i, nb_xmm_regs;
3695 CPU86_LDouble tmp;
3696 target_ulong addr;
3697
3698 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3699 fptag = 0;
3700 for(i = 0; i < 8; i++) {
3701 fptag |= (env->fptags[i] << i);
3702 }
3703 stw(ptr, env->fpuc);
3704 stw(ptr + 2, fpus);
3705 stw(ptr + 4, fptag ^ 0xff);
3706
3707 addr = ptr + 0x20;
3708 for(i = 0;i < 8; i++) {
3709 tmp = ST(i);
3710 helper_fstt(tmp, addr);
3711 addr += 16;
3712 }
3713
3714 if (env->cr[4] & CR4_OSFXSR_MASK) {
3715 /* XXX: finish it */
3716 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3717 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3718 nb_xmm_regs = 8 << data64;
3719 addr = ptr + 0xa0;
3720 for(i = 0; i < nb_xmm_regs; i++) {
3721 stq(addr, env->xmm_regs[i].XMM_Q(0));
3722 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3723 addr += 16;
3724 }
3725 }
3726}
3727
3728void helper_fxrstor(target_ulong ptr, int data64)
3729{
3730 int i, fpus, fptag, nb_xmm_regs;
3731 CPU86_LDouble tmp;
3732 target_ulong addr;
3733
3734 env->fpuc = lduw(ptr);
3735 fpus = lduw(ptr + 2);
3736 fptag = lduw(ptr + 4);
3737 env->fpstt = (fpus >> 11) & 7;
3738 env->fpus = fpus & ~0x3800;
3739 fptag ^= 0xff;
3740 for(i = 0;i < 8; i++) {
3741 env->fptags[i] = ((fptag >> i) & 1);
3742 }
3743
3744 addr = ptr + 0x20;
3745 for(i = 0;i < 8; i++) {
3746 tmp = helper_fldt(addr);
3747 ST(i) = tmp;
3748 addr += 16;
3749 }
3750
3751 if (env->cr[4] & CR4_OSFXSR_MASK) {
3752 /* XXX: finish it */
3753 env->mxcsr = ldl(ptr + 0x18);
3754 //ldl(ptr + 0x1c);
3755 nb_xmm_regs = 8 << data64;
3756 addr = ptr + 0xa0;
3757 for(i = 0; i < nb_xmm_regs; i++) {
3758#if !defined(VBOX) || __GNUC__ < 4
3759 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3760 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3761#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3762# if 1
3763 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3764 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3765 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3766 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3767# else
3768 /* this works fine on Mac OS X, gcc 4.0.1 */
3769 uint64_t u64 = ldq(addr);
3770 env->xmm_regs[i].XMM_Q(0);
3771 u64 = ldq(addr + 4);
3772 env->xmm_regs[i].XMM_Q(1) = u64;
3773# endif
3774#endif
3775 addr += 16;
3776 }
3777 }
3778}
3779
3780#ifndef USE_X86LDOUBLE
3781
3782void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3783{
3784 CPU86_LDoubleU temp;
3785 int e;
3786
3787 temp.d = f;
3788 /* mantissa */
3789 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3790 /* exponent + sign */
3791 e = EXPD(temp) - EXPBIAS + 16383;
3792 e |= SIGND(temp) >> 16;
3793 *pexp = e;
3794}
3795
3796CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3797{
3798 CPU86_LDoubleU temp;
3799 int e;
3800 uint64_t ll;
3801
3802 /* XXX: handle overflow ? */
3803 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3804 e |= (upper >> 4) & 0x800; /* sign */
3805 ll = (mant >> 11) & ((1LL << 52) - 1);
3806#ifdef __arm__
3807 temp.l.upper = (e << 20) | (ll >> 32);
3808 temp.l.lower = ll;
3809#else
3810 temp.ll = ll | ((uint64_t)e << 52);
3811#endif
3812 return temp.d;
3813}
3814
3815#else
3816
3817void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3818{
3819 CPU86_LDoubleU temp;
3820
3821 temp.d = f;
3822 *pmant = temp.l.lower;
3823 *pexp = temp.l.upper;
3824}
3825
3826CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3827{
3828 CPU86_LDoubleU temp;
3829
3830 temp.l.upper = upper;
3831 temp.l.lower = mant;
3832 return temp.d;
3833}
3834#endif
3835
3836#ifdef TARGET_X86_64
3837
3838//#define DEBUG_MULDIV
3839
3840static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3841{
3842 *plow += a;
3843 /* carry test */
3844 if (*plow < a)
3845 (*phigh)++;
3846 *phigh += b;
3847}
3848
3849static void neg128(uint64_t *plow, uint64_t *phigh)
3850{
3851 *plow = ~ *plow;
3852 *phigh = ~ *phigh;
3853 add128(plow, phigh, 1, 0);
3854}
3855
3856static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3857{
3858 uint32_t a0, a1, b0, b1;
3859 uint64_t v;
3860
3861 a0 = a;
3862 a1 = a >> 32;
3863
3864 b0 = b;
3865 b1 = b >> 32;
3866
3867 v = (uint64_t)a0 * (uint64_t)b0;
3868 *plow = v;
3869 *phigh = 0;
3870
3871 v = (uint64_t)a0 * (uint64_t)b1;
3872 add128(plow, phigh, v << 32, v >> 32);
3873
3874 v = (uint64_t)a1 * (uint64_t)b0;
3875 add128(plow, phigh, v << 32, v >> 32);
3876
3877 v = (uint64_t)a1 * (uint64_t)b1;
3878 *phigh += v;
3879#ifdef DEBUG_MULDIV
3880 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3881 a, b, *phigh, *plow);
3882#endif
3883}
3884
3885static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3886{
3887 int sa, sb;
3888 sa = (a < 0);
3889 if (sa)
3890 a = -a;
3891 sb = (b < 0);
3892 if (sb)
3893 b = -b;
3894 mul64(plow, phigh, a, b);
3895 if (sa ^ sb) {
3896 neg128(plow, phigh);
3897 }
3898}
3899
3900/* return TRUE if overflow */
3901static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3902{
3903 uint64_t q, r, a1, a0;
3904 int i, qb, ab;
3905
3906 a0 = *plow;
3907 a1 = *phigh;
3908 if (a1 == 0) {
3909 q = a0 / b;
3910 r = a0 % b;
3911 *plow = q;
3912 *phigh = r;
3913 } else {
3914 if (a1 >= b)
3915 return 1;
3916 /* XXX: use a better algorithm */
3917 for(i = 0; i < 64; i++) {
3918 ab = a1 >> 63;
3919 a1 = (a1 << 1) | (a0 >> 63);
3920 if (ab || a1 >= b) {
3921 a1 -= b;
3922 qb = 1;
3923 } else {
3924 qb = 0;
3925 }
3926 a0 = (a0 << 1) | qb;
3927 }
3928#if defined(DEBUG_MULDIV)
3929 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3930 *phigh, *plow, b, a0, a1);
3931#endif
3932 *plow = a0;
3933 *phigh = a1;
3934 }
3935 return 0;
3936}
3937
3938/* return TRUE if overflow */
3939static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3940{
3941 int sa, sb;
3942 sa = ((int64_t)*phigh < 0);
3943 if (sa)
3944 neg128(plow, phigh);
3945 sb = (b < 0);
3946 if (sb)
3947 b = -b;
3948 if (div64(plow, phigh, b) != 0)
3949 return 1;
3950 if (sa ^ sb) {
3951 if (*plow > (1ULL << 63))
3952 return 1;
3953 *plow = - *plow;
3954 } else {
3955 if (*plow >= (1ULL << 63))
3956 return 1;
3957 }
3958 if (sa)
3959 *phigh = - *phigh;
3960 return 0;
3961}
3962
3963void helper_mulq_EAX_T0(void)
3964{
3965 uint64_t r0, r1;
3966
3967 mul64(&r0, &r1, EAX, T0);
3968 EAX = r0;
3969 EDX = r1;
3970 CC_DST = r0;
3971 CC_SRC = r1;
3972}
3973
3974void helper_imulq_EAX_T0(void)
3975{
3976 uint64_t r0, r1;
3977
3978 imul64(&r0, &r1, EAX, T0);
3979 EAX = r0;
3980 EDX = r1;
3981 CC_DST = r0;
3982 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3983}
3984
3985void helper_imulq_T0_T1(void)
3986{
3987 uint64_t r0, r1;
3988
3989 imul64(&r0, &r1, T0, T1);
3990 T0 = r0;
3991 CC_DST = r0;
3992 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3993}
3994
3995void helper_divq_EAX_T0(void)
3996{
3997 uint64_t r0, r1;
3998 if (T0 == 0) {
3999 raise_exception(EXCP00_DIVZ);
4000 }
4001 r0 = EAX;
4002 r1 = EDX;
4003 if (div64(&r0, &r1, T0))
4004 raise_exception(EXCP00_DIVZ);
4005 EAX = r0;
4006 EDX = r1;
4007}
4008
4009void helper_idivq_EAX_T0(void)
4010{
4011 uint64_t r0, r1;
4012 if (T0 == 0) {
4013 raise_exception(EXCP00_DIVZ);
4014 }
4015 r0 = EAX;
4016 r1 = EDX;
4017 if (idiv64(&r0, &r1, T0))
4018 raise_exception(EXCP00_DIVZ);
4019 EAX = r0;
4020 EDX = r1;
4021}
4022
4023void helper_bswapq_T0(void)
4024{
4025 T0 = bswap64(T0);
4026}
4027#endif
4028
4029void helper_hlt(void)
4030{
4031 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4032 env->hflags |= HF_HALTED_MASK;
4033 env->exception_index = EXCP_HLT;
4034 cpu_loop_exit();
4035}
4036
4037void helper_monitor(void)
4038{
4039 if ((uint32_t)ECX != 0)
4040 raise_exception(EXCP0D_GPF);
4041 /* XXX: store address ? */
4042}
4043
4044void helper_mwait(void)
4045{
4046 if ((uint32_t)ECX != 0)
4047 raise_exception(EXCP0D_GPF);
4048#ifdef VBOX
4049 helper_hlt();
4050#else
4051 /* XXX: not complete but not completely erroneous */
4052 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4053 /* more than one CPU: do not sleep because another CPU may
4054 wake this one */
4055 } else {
4056 helper_hlt();
4057 }
4058#endif
4059}
4060
4061float approx_rsqrt(float a)
4062{
4063 return 1.0 / sqrt(a);
4064}
4065
4066float approx_rcp(float a)
4067{
4068 return 1.0 / a;
4069}
4070
4071void update_fp_status(void)
4072{
4073 int rnd_type;
4074
4075 /* set rounding mode */
4076 switch(env->fpuc & RC_MASK) {
4077 default:
4078 case RC_NEAR:
4079 rnd_type = float_round_nearest_even;
4080 break;
4081 case RC_DOWN:
4082 rnd_type = float_round_down;
4083 break;
4084 case RC_UP:
4085 rnd_type = float_round_up;
4086 break;
4087 case RC_CHOP:
4088 rnd_type = float_round_to_zero;
4089 break;
4090 }
4091 set_float_rounding_mode(rnd_type, &env->fp_status);
4092#ifdef FLOATX80
4093 switch((env->fpuc >> 8) & 3) {
4094 case 0:
4095 rnd_type = 32;
4096 break;
4097 case 2:
4098 rnd_type = 64;
4099 break;
4100 case 3:
4101 default:
4102 rnd_type = 80;
4103 break;
4104 }
4105 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4106#endif
4107}
4108
4109#if !defined(CONFIG_USER_ONLY)
4110
4111#define MMUSUFFIX _mmu
4112#define GETPC() (__builtin_return_address(0))
4113
4114#define SHIFT 0
4115#include "softmmu_template.h"
4116
4117#define SHIFT 1
4118#include "softmmu_template.h"
4119
4120#define SHIFT 2
4121#include "softmmu_template.h"
4122
4123#define SHIFT 3
4124#include "softmmu_template.h"
4125
4126#endif
4127
4128/* try to fill the TLB and return an exception if error. If retaddr is
4129 NULL, it means that the function was called in C code (i.e. not
4130 from generated code or from helper.c) */
4131/* XXX: fix it to restore all registers */
4132void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4133{
4134 TranslationBlock *tb;
4135 int ret;
4136 unsigned long pc;
4137 CPUX86State *saved_env;
4138
4139 /* XXX: hack to restore env in all cases, even if not called from
4140 generated code */
4141 saved_env = env;
4142 env = cpu_single_env;
4143
4144 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4145 if (ret) {
4146 if (retaddr) {
4147 /* now we have a real cpu fault */
4148 pc = (unsigned long)retaddr;
4149 tb = tb_find_pc(pc);
4150 if (tb) {
4151 /* the PC is inside the translated code. It means that we have
4152 a virtual CPU fault */
4153 cpu_restore_state(tb, env, pc, NULL);
4154 }
4155 }
4156 if (retaddr)
4157 raise_exception_err(env->exception_index, env->error_code);
4158 else
4159 raise_exception_err_norestore(env->exception_index, env->error_code);
4160 }
4161 env = saved_env;
4162}
4163
4164#ifdef VBOX
4165
4166/**
4167 * Correctly computes the eflags.
4168 * @returns eflags.
4169 * @param env1 CPU environment.
4170 */
4171uint32_t raw_compute_eflags(CPUX86State *env1)
4172{
4173 CPUX86State *savedenv = env;
4174 env = env1;
4175 uint32_t efl = compute_eflags();
4176 env = savedenv;
4177 return efl;
4178}
4179
4180/**
4181 * Reads byte from virtual address in guest memory area.
4182 * XXX: is it working for any addresses? swapped out pages?
4183 * @returns readed data byte.
4184 * @param env1 CPU environment.
4185 * @param pvAddr GC Virtual address.
4186 */
4187uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4188{
4189 CPUX86State *savedenv = env;
4190 env = env1;
4191 uint8_t u8 = ldub_kernel(addr);
4192 env = savedenv;
4193 return u8;
4194}
4195
4196/**
4197 * Reads byte from virtual address in guest memory area.
4198 * XXX: is it working for any addresses? swapped out pages?
4199 * @returns readed data byte.
4200 * @param env1 CPU environment.
4201 * @param pvAddr GC Virtual address.
4202 */
4203uint16_t read_word(CPUX86State *env1, target_ulong addr)
4204{
4205 CPUX86State *savedenv = env;
4206 env = env1;
4207 uint16_t u16 = lduw_kernel(addr);
4208 env = savedenv;
4209 return u16;
4210}
4211
4212/**
4213 * Reads byte from virtual address in guest memory area.
4214 * XXX: is it working for any addresses? swapped out pages?
4215 * @returns readed data byte.
4216 * @param env1 CPU environment.
4217 * @param pvAddr GC Virtual address.
4218 */
4219uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4220{
4221 CPUX86State *savedenv = env;
4222 env = env1;
4223 uint32_t u32 = ldl_kernel(addr);
4224 env = savedenv;
4225 return u32;
4226}
4227
4228/**
4229 * Writes byte to virtual address in guest memory area.
4230 * XXX: is it working for any addresses? swapped out pages?
4231 * @returns readed data byte.
4232 * @param env1 CPU environment.
4233 * @param pvAddr GC Virtual address.
4234 * @param val byte value
4235 */
4236void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4237{
4238 CPUX86State *savedenv = env;
4239 env = env1;
4240 stb(addr, val);
4241 env = savedenv;
4242}
4243
4244void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4245{
4246 CPUX86State *savedenv = env;
4247 env = env1;
4248 stw(addr, val);
4249 env = savedenv;
4250}
4251
4252void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4253{
4254 CPUX86State *savedenv = env;
4255 env = env1;
4256 stl(addr, val);
4257 env = savedenv;
4258}
4259
4260/**
4261 * Correctly loads selector into segment register with updating internal
4262 * qemu data/caches.
4263 * @param env1 CPU environment.
4264 * @param seg_reg Segment register.
4265 * @param selector Selector to load.
4266 */
4267void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4268{
4269 CPUX86State *savedenv = env;
4270 env = env1;
4271
4272 if ( env->eflags & X86_EFL_VM
4273 || !(env->cr[0] & X86_CR0_PE))
4274 {
4275 load_seg_vm(seg_reg, selector);
4276
4277 env = savedenv;
4278
4279 /* Successful sync. */
4280 env1->segs[seg_reg].newselector = 0;
4281 }
4282 else
4283 {
4284 if (setjmp(env1->jmp_env) == 0)
4285 {
4286 if (seg_reg == R_CS)
4287 {
4288 uint32_t e1, e2;
4289 load_segment(&e1, &e2, selector);
4290 cpu_x86_load_seg_cache(env, R_CS, selector,
4291 get_seg_base(e1, e2),
4292 get_seg_limit(e1, e2),
4293 e2);
4294 }
4295 else
4296 load_seg(seg_reg, selector);
4297 env = savedenv;
4298
4299 /* Successful sync. */
4300 env1->segs[seg_reg].newselector = 0;
4301 }
4302 else
4303 {
4304 env = savedenv;
4305
4306 /* Postpone sync until the guest uses the selector. */
4307 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4308 env1->segs[seg_reg].newselector = selector;
4309 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4310 }
4311 }
4312
4313}
4314
4315
4316/**
4317 * Correctly loads a new ldtr selector.
4318 *
4319 * @param env1 CPU environment.
4320 * @param selector Selector to load.
4321 */
4322void sync_ldtr(CPUX86State *env1, int selector)
4323{
4324 CPUX86State *saved_env = env;
4325 target_ulong saved_T0 = T0;
4326 if (setjmp(env1->jmp_env) == 0)
4327 {
4328 env = env1;
4329 T0 = selector;
4330 helper_lldt_T0();
4331 T0 = saved_T0;
4332 env = saved_env;
4333 }
4334 else
4335 {
4336 T0 = saved_T0;
4337 env = saved_env;
4338#ifdef VBOX_STRICT
4339 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4340#endif
4341 }
4342}
4343
4344/**
4345 * Correctly loads a new tr selector.
4346 *
4347 * @param env1 CPU environment.
4348 * @param selector Selector to load.
4349 */
4350int sync_tr(CPUX86State *env1, int selector)
4351{
4352 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
4353 SegmentCache *dt;
4354 uint32_t e1, e2;
4355 int index, type, entry_limit;
4356 target_ulong ptr;
4357 CPUX86State *saved_env = env;
4358 env = env1;
4359
4360 selector &= 0xffff;
4361 if ((selector & 0xfffc) == 0) {
4362 /* NULL selector case: invalid TR */
4363 env->tr.base = 0;
4364 env->tr.limit = 0;
4365 env->tr.flags = 0;
4366 } else {
4367 if (selector & 0x4)
4368 goto l_failure;
4369 dt = &env->gdt;
4370 index = selector & ~7;
4371#ifdef TARGET_X86_64
4372 if (env->hflags & HF_LMA_MASK)
4373 entry_limit = 15;
4374 else
4375#endif
4376 entry_limit = 7;
4377 if ((index + entry_limit) > dt->limit)
4378 goto l_failure;
4379 ptr = dt->base + index;
4380 e1 = ldl_kernel(ptr);
4381 e2 = ldl_kernel(ptr + 4);
4382 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4383 if ((e2 & DESC_S_MASK) /*||
4384 (type != 1 && type != 9)*/)
4385 goto l_failure;
4386 if (!(e2 & DESC_P_MASK))
4387 goto l_failure;
4388#ifdef TARGET_X86_64
4389 if (env->hflags & HF_LMA_MASK) {
4390 uint32_t e3;
4391 e3 = ldl_kernel(ptr + 8);
4392 load_seg_cache_raw_dt(&env->tr, e1, e2);
4393 env->tr.base |= (target_ulong)e3 << 32;
4394 } else
4395#endif
4396 {
4397 load_seg_cache_raw_dt(&env->tr, e1, e2);
4398 }
4399 e2 |= DESC_TSS_BUSY_MASK;
4400 stl_kernel(ptr + 4, e2);
4401 }
4402 env->tr.selector = selector;
4403
4404 env = saved_env;
4405 return 0;
4406l_failure:
4407 AssertMsgFailed(("selector=%d\n", selector));
4408 return -1;
4409}
4410
4411int emulate_single_instr(CPUX86State *env1)
4412{
4413 TranslationBlock *current;
4414 TranslationBlock tb_temp;
4415 int csize;
4416 void (*gen_func)(void);
4417 uint8_t *tc_ptr;
4418 uint32_t old_eip;
4419
4420 /* ensures env is loaded in ebp! */
4421 CPUX86State *savedenv = env;
4422 env = env1;
4423
4424 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4425
4426 tc_ptr = env->pvCodeBuffer;
4427
4428 /*
4429 * Setup temporary translation block.
4430 */
4431 /* tb_alloc: */
4432 tb_temp.pc = env->segs[R_CS].base + env->eip;
4433 tb_temp.cflags = 0;
4434
4435 /* tb_find_slow: */
4436 tb_temp.tc_ptr = tc_ptr;
4437 tb_temp.cs_base = env->segs[R_CS].base;
4438 tb_temp.flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4439
4440 /* Initialize the rest with sensible values. */
4441 tb_temp.size = 0;
4442 tb_temp.phys_hash_next = NULL;
4443 tb_temp.page_next[0] = NULL;
4444 tb_temp.page_next[1] = NULL;
4445 tb_temp.page_addr[0] = 0;
4446 tb_temp.page_addr[1] = 0;
4447 tb_temp.tb_next_offset[0] = 0xffff;
4448 tb_temp.tb_next_offset[1] = 0xffff;
4449 tb_temp.tb_next[0] = 0xffff;
4450 tb_temp.tb_next[1] = 0xffff;
4451 tb_temp.jmp_next[0] = NULL;
4452 tb_temp.jmp_next[1] = NULL;
4453 tb_temp.jmp_first = NULL;
4454
4455 current = env->current_tb;
4456 env->current_tb = NULL;
4457
4458 /*
4459 * Translate only one instruction.
4460 */
4461 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4462 if (cpu_gen_code(env, &tb_temp, env->cbCodeBuffer, &csize) < 0)
4463 {
4464 AssertFailed();
4465 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4466 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4467 env = savedenv;
4468 return -1;
4469 }
4470#ifdef DEBUG
4471 if(csize > env->cbCodeBuffer)
4472 {
4473 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4474 AssertFailed();
4475 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4476 env = savedenv;
4477 return -1;
4478 }
4479 if (tb_temp.tc_ptr != tc_ptr)
4480 {
4481 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4482 AssertFailed();
4483 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4484 env = savedenv;
4485 return -1;
4486 }
4487#endif
4488 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4489
4490 /* tb_link_phys: */
4491 tb_temp.jmp_first = (TranslationBlock *)((intptr_t)&tb_temp | 2);
4492 Assert(tb_temp.jmp_next[0] == NULL); Assert(tb_temp.jmp_next[1] == NULL);
4493 if (tb_temp.tb_next_offset[0] != 0xffff)
4494 tb_set_jmp_target(&tb_temp, 0, (uintptr_t)(tb_temp.tc_ptr + tb_temp.tb_next_offset[0]));
4495 if (tb_temp.tb_next_offset[1] != 0xffff)
4496 tb_set_jmp_target(&tb_temp, 1, (uintptr_t)(tb_temp.tc_ptr + tb_temp.tb_next_offset[1]));
4497
4498 /*
4499 * Execute it using emulation
4500 */
4501 old_eip = env->eip;
4502 gen_func = (void *)tb_temp.tc_ptr;
4503 env->current_tb = &tb_temp;
4504
4505 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4506 // perhaps not a very safe hack
4507 while(old_eip == env->eip)
4508 {
4509 gen_func();
4510 /*
4511 * Exit once we detect an external interrupt and interrupts are enabled
4512 */
4513 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4514 ( (env->eflags & IF_MASK) &&
4515 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4516 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4517 {
4518 break;
4519 }
4520 }
4521 env->current_tb = current;
4522
4523 Assert(tb_temp.phys_hash_next == NULL);
4524 Assert(tb_temp.page_next[0] == NULL);
4525 Assert(tb_temp.page_next[1] == NULL);
4526 Assert(tb_temp.page_addr[0] == 0);
4527 Assert(tb_temp.page_addr[1] == 0);
4528/*
4529 Assert(tb_temp.tb_next_offset[0] == 0xffff);
4530 Assert(tb_temp.tb_next_offset[1] == 0xffff);
4531 Assert(tb_temp.tb_next[0] == 0xffff);
4532 Assert(tb_temp.tb_next[1] == 0xffff);
4533 Assert(tb_temp.jmp_next[0] == NULL);
4534 Assert(tb_temp.jmp_next[1] == NULL);
4535 Assert(tb_temp.jmp_first == NULL); */
4536
4537 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4538
4539 /*
4540 * Execute the next instruction when we encounter instruction fusing.
4541 */
4542 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4543 {
4544 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK)\n"));
4545 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4546 emulate_single_instr(env);
4547 }
4548
4549 env = savedenv;
4550 return 0;
4551}
4552
4553int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4554 uint32_t *esp_ptr, int dpl)
4555{
4556 int type, index, shift;
4557
4558 CPUX86State *savedenv = env;
4559 env = env1;
4560
4561 if (!(env->tr.flags & DESC_P_MASK))
4562 cpu_abort(env, "invalid tss");
4563 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4564 if ((type & 7) != 1)
4565 cpu_abort(env, "invalid tss type %d", type);
4566 shift = type >> 3;
4567 index = (dpl * 4 + 2) << shift;
4568 if (index + (4 << shift) - 1 > env->tr.limit)
4569 {
4570 env = savedenv;
4571 return 0;
4572 }
4573 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4574
4575 if (shift == 0) {
4576 *esp_ptr = lduw_kernel(env->tr.base + index);
4577 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4578 } else {
4579 *esp_ptr = ldl_kernel(env->tr.base + index);
4580 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4581 }
4582
4583 env = savedenv;
4584 return 1;
4585}
4586
4587//*****************************************************************************
4588// Needs to be at the bottom of the file (overriding macros)
4589
4590static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4591{
4592 return *(CPU86_LDouble *)ptr;
4593}
4594
4595static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4596{
4597 *(CPU86_LDouble *)ptr = f;
4598}
4599
4600#undef stw
4601#undef stl
4602#undef stq
4603#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4604#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4605#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4606#define data64 0
4607
4608//*****************************************************************************
4609void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4610{
4611 int fpus, fptag, i, nb_xmm_regs;
4612 CPU86_LDouble tmp;
4613 uint8_t *addr;
4614
4615 if (env->cpuid_features & CPUID_FXSR)
4616 {
4617 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4618 fptag = 0;
4619 for(i = 0; i < 8; i++) {
4620 fptag |= (env->fptags[i] << i);
4621 }
4622 stw(ptr, env->fpuc);
4623 stw(ptr + 2, fpus);
4624 stw(ptr + 4, fptag ^ 0xff);
4625
4626 addr = ptr + 0x20;
4627 for(i = 0;i < 8; i++) {
4628 tmp = ST(i);
4629 helper_fstt_raw(tmp, addr);
4630 addr += 16;
4631 }
4632
4633 if (env->cr[4] & CR4_OSFXSR_MASK) {
4634 /* XXX: finish it */
4635 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4636 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4637 nb_xmm_regs = 8 << data64;
4638 addr = ptr + 0xa0;
4639 for(i = 0; i < nb_xmm_regs; i++) {
4640#if __GNUC__ < 4
4641 stq(addr, env->xmm_regs[i].XMM_Q(0));
4642 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4643#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4644 stl(addr, env->xmm_regs[i].XMM_L(0));
4645 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4646 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4647 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4648#endif
4649 addr += 16;
4650 }
4651 }
4652 }
4653 else
4654 {
4655 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4656 int fptag;
4657
4658 fp->FCW = env->fpuc;
4659 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4660 fptag = 0;
4661 for (i=7; i>=0; i--) {
4662 fptag <<= 2;
4663 if (env->fptags[i]) {
4664 fptag |= 3;
4665 } else {
4666 /* the FPU automatically computes it */
4667 }
4668 }
4669 fp->FTW = fptag;
4670
4671 for(i = 0;i < 8; i++) {
4672 tmp = ST(i);
4673 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4674 }
4675 }
4676}
4677
4678//*****************************************************************************
4679#undef lduw
4680#undef ldl
4681#undef ldq
4682#define lduw(a) *(uint16_t *)(a)
4683#define ldl(a) *(uint32_t *)(a)
4684#define ldq(a) *(uint64_t *)(a)
4685//*****************************************************************************
4686void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4687{
4688 int i, fpus, fptag, nb_xmm_regs;
4689 CPU86_LDouble tmp;
4690 uint8_t *addr;
4691
4692 if (env->cpuid_features & CPUID_FXSR)
4693 {
4694 env->fpuc = lduw(ptr);
4695 fpus = lduw(ptr + 2);
4696 fptag = lduw(ptr + 4);
4697 env->fpstt = (fpus >> 11) & 7;
4698 env->fpus = fpus & ~0x3800;
4699 fptag ^= 0xff;
4700 for(i = 0;i < 8; i++) {
4701 env->fptags[i] = ((fptag >> i) & 1);
4702 }
4703
4704 addr = ptr + 0x20;
4705 for(i = 0;i < 8; i++) {
4706 tmp = helper_fldt_raw(addr);
4707 ST(i) = tmp;
4708 addr += 16;
4709 }
4710
4711 if (env->cr[4] & CR4_OSFXSR_MASK) {
4712 /* XXX: finish it, endianness */
4713 env->mxcsr = ldl(ptr + 0x18);
4714 //ldl(ptr + 0x1c);
4715 nb_xmm_regs = 8 << data64;
4716 addr = ptr + 0xa0;
4717 for(i = 0; i < nb_xmm_regs; i++) {
4718 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4719 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4720 addr += 16;
4721 }
4722 }
4723 }
4724 else
4725 {
4726 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4727 int fptag, j;
4728
4729 env->fpuc = fp->FCW;
4730 env->fpstt = (fp->FSW >> 11) & 7;
4731 env->fpus = fp->FSW & ~0x3800;
4732 fptag = fp->FTW;
4733 for(i = 0;i < 8; i++) {
4734 env->fptags[i] = ((fptag & 3) == 3);
4735 fptag >>= 2;
4736 }
4737 j = env->fpstt;
4738 for(i = 0;i < 8; i++) {
4739 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4740 ST(i) = tmp;
4741 }
4742 }
4743}
4744//*****************************************************************************
4745//*****************************************************************************
4746
4747#endif /* VBOX */
4748
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette