VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 2426

Last change on this file since 2426 was 2426, checked in by vboxsync, 18 years ago

Removed the old recompiler code. (wonder why subversion didn't pick up these changes right way)

  • Property svn:eol-style set to native
File size: 133.5 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifdef VBOX
21# include <VBox/err.h>
22#endif
23#include "exec.h"
24
25//#define DEBUG_PCALL
26
27#if 0
28#define raise_exception_err(a, b)\
29do {\
30 if (logfile)\
31 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
32 (raise_exception_err)(a, b);\
33} while (0)
34#endif
35
36const uint8_t parity_table[256] = {
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69};
70
71/* modulo 17 table */
72const uint8_t rclw_table[32] = {
73 0, 1, 2, 3, 4, 5, 6, 7,
74 8, 9,10,11,12,13,14,15,
75 16, 0, 1, 2, 3, 4, 5, 6,
76 7, 8, 9,10,11,12,13,14,
77};
78
79/* modulo 9 table */
80const uint8_t rclb_table[32] = {
81 0, 1, 2, 3, 4, 5, 6, 7,
82 8, 0, 1, 2, 3, 4, 5, 6,
83 7, 8, 0, 1, 2, 3, 4, 5,
84 6, 7, 8, 0, 1, 2, 3, 4,
85};
86
87const CPU86_LDouble f15rk[7] =
88{
89 0.00000000000000000000L,
90 1.00000000000000000000L,
91 3.14159265358979323851L, /*pi*/
92 0.30102999566398119523L, /*lg2*/
93 0.69314718055994530943L, /*ln2*/
94 1.44269504088896340739L, /*l2e*/
95 3.32192809488736234781L, /*l2t*/
96};
97
98/* thread support */
99
100spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101
102void cpu_lock(void)
103{
104 spin_lock(&global_cpu_lock);
105}
106
107void cpu_unlock(void)
108{
109 spin_unlock(&global_cpu_lock);
110}
111
112void cpu_loop_exit(void)
113{
114 /* NOTE: the register at this point must be saved by hand because
115 longjmp restore them */
116 regs_to_env();
117 longjmp(env->jmp_env, 1);
118}
119
120/* return non zero if error */
121static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
122 int selector)
123{
124 SegmentCache *dt;
125 int index;
126 target_ulong ptr;
127
128 if (selector & 0x4)
129 dt = &env->ldt;
130 else
131 dt = &env->gdt;
132 index = selector & ~7;
133 if ((index + 7) > dt->limit)
134 return -1;
135 ptr = dt->base + index;
136 *e1_ptr = ldl_kernel(ptr);
137 *e2_ptr = ldl_kernel(ptr + 4);
138 return 0;
139}
140
141static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
142{
143 unsigned int limit;
144 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
145 if (e2 & DESC_G_MASK)
146 limit = (limit << 12) | 0xfff;
147 return limit;
148}
149
150static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
151{
152 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
153}
154
155static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
156{
157 sc->base = get_seg_base(e1, e2);
158 sc->limit = get_seg_limit(e1, e2);
159 sc->flags = e2;
160}
161
162/* init the segment cache in vm86 mode. */
163static inline void load_seg_vm(int seg, int selector)
164{
165 selector &= 0xffff;
166 cpu_x86_load_seg_cache(env, seg, selector,
167 (selector << 4), 0xffff, 0);
168}
169
170static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
171 uint32_t *esp_ptr, int dpl)
172{
173 int type, index, shift;
174
175#if 0
176 {
177 int i;
178 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
179 for(i=0;i<env->tr.limit;i++) {
180 printf("%02x ", env->tr.base[i]);
181 if ((i & 7) == 7) printf("\n");
182 }
183 printf("\n");
184 }
185#endif
186
187 if (!(env->tr.flags & DESC_P_MASK))
188 cpu_abort(env, "invalid tss");
189 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
190 if ((type & 7) != 1)
191 cpu_abort(env, "invalid tss type %d", type);
192 shift = type >> 3;
193 index = (dpl * 4 + 2) << shift;
194 if (index + (4 << shift) - 1 > env->tr.limit)
195 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
196 if (shift == 0) {
197 *esp_ptr = lduw_kernel(env->tr.base + index);
198 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
199 } else {
200 *esp_ptr = ldl_kernel(env->tr.base + index);
201 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
202 }
203}
204
205/* XXX: merge with load_seg() */
206static void tss_load_seg(int seg_reg, int selector)
207{
208 uint32_t e1, e2;
209 int rpl, dpl, cpl;
210
211 if ((selector & 0xfffc) != 0) {
212 if (load_segment(&e1, &e2, selector) != 0)
213 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
214 if (!(e2 & DESC_S_MASK))
215 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216 rpl = selector & 3;
217 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
218 cpl = env->hflags & HF_CPL_MASK;
219 if (seg_reg == R_CS) {
220 if (!(e2 & DESC_CS_MASK))
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222 /* XXX: is it correct ? */
223 if (dpl != rpl)
224 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 if ((e2 & DESC_C_MASK) && dpl > rpl)
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 } else if (seg_reg == R_SS) {
228 /* SS must be writable data */
229 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 if (dpl != cpl || dpl != rpl)
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 } else {
234 /* not readable code */
235 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 /* if data or non conforming code, checks the rights */
238 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
239 if (dpl < cpl || dpl < rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 }
242 }
243 if (!(e2 & DESC_P_MASK))
244 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
245 cpu_x86_load_seg_cache(env, seg_reg, selector,
246 get_seg_base(e1, e2),
247 get_seg_limit(e1, e2),
248 e2);
249 } else {
250 if (seg_reg == R_SS || seg_reg == R_CS)
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 }
253}
254
255#define SWITCH_TSS_JMP 0
256#define SWITCH_TSS_IRET 1
257#define SWITCH_TSS_CALL 2
258
259/* XXX: restore CPU state in registers (PowerPC case) */
260static void switch_tss(int tss_selector,
261 uint32_t e1, uint32_t e2, int source,
262 uint32_t next_eip)
263{
264 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
265 target_ulong tss_base;
266 uint32_t new_regs[8], new_segs[6];
267 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
268 uint32_t old_eflags, eflags_mask;
269 SegmentCache *dt;
270 int index;
271 target_ulong ptr;
272
273 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
274#ifdef DEBUG_PCALL
275 if (loglevel & CPU_LOG_PCALL)
276 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
277#endif
278
279#if defined(VBOX) && defined(DEBUG)
280 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
281#endif
282
283 /* if task gate, we read the TSS segment and we load it */
284 if (type == 5) {
285 if (!(e2 & DESC_P_MASK))
286 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287 tss_selector = e1 >> 16;
288 if (tss_selector & 4)
289 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
290 if (load_segment(&e1, &e2, tss_selector) != 0)
291 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
292 if (e2 & DESC_S_MASK)
293 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
295 if ((type & 7) != 1)
296 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
297 }
298
299 if (!(e2 & DESC_P_MASK))
300 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
301
302 if (type & 8)
303 tss_limit_max = 103;
304 else
305 tss_limit_max = 43;
306 tss_limit = get_seg_limit(e1, e2);
307 tss_base = get_seg_base(e1, e2);
308 if ((tss_selector & 4) != 0 ||
309 tss_limit < tss_limit_max)
310 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
311 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
312 if (old_type & 8)
313 old_tss_limit_max = 103;
314 else
315 old_tss_limit_max = 43;
316
317 /* read all the registers from the new TSS */
318 if (type & 8) {
319 /* 32 bit */
320 new_cr3 = ldl_kernel(tss_base + 0x1c);
321 new_eip = ldl_kernel(tss_base + 0x20);
322 new_eflags = ldl_kernel(tss_base + 0x24);
323 for(i = 0; i < 8; i++)
324 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
325 for(i = 0; i < 6; i++)
326 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
327 new_ldt = lduw_kernel(tss_base + 0x60);
328 new_trap = ldl_kernel(tss_base + 0x64);
329 } else {
330 /* 16 bit */
331 new_cr3 = 0;
332 new_eip = lduw_kernel(tss_base + 0x0e);
333 new_eflags = lduw_kernel(tss_base + 0x10);
334 for(i = 0; i < 8; i++)
335 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
336 for(i = 0; i < 4; i++)
337 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
338 new_ldt = lduw_kernel(tss_base + 0x2a);
339 new_segs[R_FS] = 0;
340 new_segs[R_GS] = 0;
341 new_trap = 0;
342 }
343
344 /* NOTE: we must avoid memory exceptions during the task switch,
345 so we make dummy accesses before */
346 /* XXX: it can still fail in some cases, so a bigger hack is
347 necessary to valid the TLB after having done the accesses */
348
349 v1 = ldub_kernel(env->tr.base);
350 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
351 stb_kernel(env->tr.base, v1);
352 stb_kernel(env->tr.base + old_tss_limit_max, v2);
353
354 /* clear busy bit (it is restartable) */
355 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
356 target_ulong ptr;
357 uint32_t e2;
358 ptr = env->gdt.base + (env->tr.selector & ~7);
359 e2 = ldl_kernel(ptr + 4);
360 e2 &= ~DESC_TSS_BUSY_MASK;
361 stl_kernel(ptr + 4, e2);
362 }
363 old_eflags = compute_eflags();
364 if (source == SWITCH_TSS_IRET)
365 old_eflags &= ~NT_MASK;
366
367 /* save the current state in the old TSS */
368 if (type & 8) {
369 /* 32 bit */
370 stl_kernel(env->tr.base + 0x20, next_eip);
371 stl_kernel(env->tr.base + 0x24, old_eflags);
372 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
373 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
374 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
375 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
376 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
377 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
378 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
379 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
380 for(i = 0; i < 6; i++)
381 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
382#if defined(VBOX) && defined(DEBUG)
383 printf("TSS 32 bits switch\n");
384 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
385#endif
386 } else {
387 /* 16 bit */
388 stw_kernel(env->tr.base + 0x0e, next_eip);
389 stw_kernel(env->tr.base + 0x10, old_eflags);
390 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
391 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
392 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
393 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
394 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
395 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
396 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
397 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
398 for(i = 0; i < 4; i++)
399 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
400 }
401
402 /* now if an exception occurs, it will occurs in the next task
403 context */
404
405 if (source == SWITCH_TSS_CALL) {
406 stw_kernel(tss_base, env->tr.selector);
407 new_eflags |= NT_MASK;
408 }
409
410 /* set busy bit */
411 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
412 target_ulong ptr;
413 uint32_t e2;
414 ptr = env->gdt.base + (tss_selector & ~7);
415 e2 = ldl_kernel(ptr + 4);
416 e2 |= DESC_TSS_BUSY_MASK;
417 stl_kernel(ptr + 4, e2);
418 }
419
420 /* set the new CPU state */
421 /* from this point, any exception which occurs can give problems */
422 env->cr[0] |= CR0_TS_MASK;
423 env->hflags |= HF_TS_MASK;
424 env->tr.selector = tss_selector;
425 env->tr.base = tss_base;
426 env->tr.limit = tss_limit;
427 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
428
429 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
430 cpu_x86_update_cr3(env, new_cr3);
431 }
432
433 /* load all registers without an exception, then reload them with
434 possible exception */
435 env->eip = new_eip;
436 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
437 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
438 if (!(type & 8))
439 eflags_mask &= 0xffff;
440 load_eflags(new_eflags, eflags_mask);
441 /* XXX: what to do in 16 bit case ? */
442 EAX = new_regs[0];
443 ECX = new_regs[1];
444 EDX = new_regs[2];
445 EBX = new_regs[3];
446 ESP = new_regs[4];
447 EBP = new_regs[5];
448 ESI = new_regs[6];
449 EDI = new_regs[7];
450 if (new_eflags & VM_MASK) {
451 for(i = 0; i < 6; i++)
452 load_seg_vm(i, new_segs[i]);
453 /* in vm86, CPL is always 3 */
454 cpu_x86_set_cpl(env, 3);
455 } else {
456 /* CPL is set the RPL of CS */
457 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
458 /* first just selectors as the rest may trigger exceptions */
459 for(i = 0; i < 6; i++)
460 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
461 }
462
463 env->ldt.selector = new_ldt & ~4;
464 env->ldt.base = 0;
465 env->ldt.limit = 0;
466 env->ldt.flags = 0;
467
468 /* load the LDT */
469 if (new_ldt & 4)
470 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
471
472 if ((new_ldt & 0xfffc) != 0) {
473 dt = &env->gdt;
474 index = new_ldt & ~7;
475 if ((index + 7) > dt->limit)
476 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477 ptr = dt->base + index;
478 e1 = ldl_kernel(ptr);
479 e2 = ldl_kernel(ptr + 4);
480 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 if (!(e2 & DESC_P_MASK))
483 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
489 tss_load_seg(R_CS, new_segs[R_CS]);
490 tss_load_seg(R_SS, new_segs[R_SS]);
491 tss_load_seg(R_ES, new_segs[R_ES]);
492 tss_load_seg(R_DS, new_segs[R_DS]);
493 tss_load_seg(R_FS, new_segs[R_FS]);
494 tss_load_seg(R_GS, new_segs[R_GS]);
495 }
496
497 /* check that EIP is in the CS segment limits */
498 if (new_eip > env->segs[R_CS].limit) {
499 /* XXX: different exception if CALL ? */
500 raise_exception_err(EXCP0D_GPF, 0);
501 }
502}
503
504/* check if Port I/O is allowed in TSS */
505static inline void check_io(int addr, int size)
506{
507 int io_offset, val, mask;
508
509 /* TSS must be a valid 32 bit one */
510 if (!(env->tr.flags & DESC_P_MASK) ||
511 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
512 env->tr.limit < 103)
513 goto fail;
514 io_offset = lduw_kernel(env->tr.base + 0x66);
515 io_offset += (addr >> 3);
516 /* Note: the check needs two bytes */
517 if ((io_offset + 1) > env->tr.limit)
518 goto fail;
519 val = lduw_kernel(env->tr.base + io_offset);
520 val >>= (addr & 7);
521 mask = (1 << size) - 1;
522 /* all bits must be zero to allow the I/O */
523 if ((val & mask) != 0) {
524 fail:
525 raise_exception_err(EXCP0D_GPF, 0);
526 }
527}
528
529void check_iob_T0(void)
530{
531 check_io(T0, 1);
532}
533
534void check_iow_T0(void)
535{
536 check_io(T0, 2);
537}
538
539void check_iol_T0(void)
540{
541 check_io(T0, 4);
542}
543
544void check_iob_DX(void)
545{
546 check_io(EDX & 0xffff, 1);
547}
548
549void check_iow_DX(void)
550{
551 check_io(EDX & 0xffff, 2);
552}
553
554void check_iol_DX(void)
555{
556 check_io(EDX & 0xffff, 4);
557}
558
559static inline unsigned int get_sp_mask(unsigned int e2)
560{
561 if (e2 & DESC_B_MASK)
562 return 0xffffffff;
563 else
564 return 0xffff;
565}
566
567#ifdef TARGET_X86_64
568#define SET_ESP(val, sp_mask)\
569do {\
570 if ((sp_mask) == 0xffff)\
571 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
572 else if ((sp_mask) == 0xffffffffLL)\
573 ESP = (uint32_t)(val);\
574 else\
575 ESP = (val);\
576} while (0)
577#else
578#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
579#endif
580
581/* XXX: add a is_user flag to have proper security support */
582#define PUSHW(ssp, sp, sp_mask, val)\
583{\
584 sp -= 2;\
585 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
586}
587
588#define PUSHL(ssp, sp, sp_mask, val)\
589{\
590 sp -= 4;\
591 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
592}
593
594#define POPW(ssp, sp, sp_mask, val)\
595{\
596 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
597 sp += 2;\
598}
599
600#define POPL(ssp, sp, sp_mask, val)\
601{\
602 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
603 sp += 4;\
604}
605
606/* protected mode interrupt */
607static void do_interrupt_protected(int intno, int is_int, int error_code,
608 unsigned int next_eip, int is_hw)
609{
610 SegmentCache *dt;
611 target_ulong ptr, ssp;
612 int type, dpl, selector, ss_dpl, cpl;
613 int has_error_code, new_stack, shift;
614 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
615 uint32_t old_eip, sp_mask;
616
617#ifdef VBOX
618 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
619 cpu_loop_exit();
620#endif
621
622 has_error_code = 0;
623 if (!is_int && !is_hw) {
624 switch(intno) {
625 case 8:
626 case 10:
627 case 11:
628 case 12:
629 case 13:
630 case 14:
631 case 17:
632 has_error_code = 1;
633 break;
634 }
635 }
636 if (is_int)
637 old_eip = next_eip;
638 else
639 old_eip = env->eip;
640
641 dt = &env->idt;
642 if (intno * 8 + 7 > dt->limit)
643 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
644 ptr = dt->base + intno * 8;
645 e1 = ldl_kernel(ptr);
646 e2 = ldl_kernel(ptr + 4);
647 /* check gate type */
648 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
649 switch(type) {
650 case 5: /* task gate */
651 /* must do that check here to return the correct error code */
652 if (!(e2 & DESC_P_MASK))
653 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
654 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
655 if (has_error_code) {
656 int type;
657 uint32_t mask;
658 /* push the error code */
659 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
660 shift = type >> 3;
661 if (env->segs[R_SS].flags & DESC_B_MASK)
662 mask = 0xffffffff;
663 else
664 mask = 0xffff;
665 esp = (ESP - (2 << shift)) & mask;
666 ssp = env->segs[R_SS].base + esp;
667 if (shift)
668 stl_kernel(ssp, error_code);
669 else
670 stw_kernel(ssp, error_code);
671 SET_ESP(esp, mask);
672 }
673 return;
674 case 6: /* 286 interrupt gate */
675 case 7: /* 286 trap gate */
676 case 14: /* 386 interrupt gate */
677 case 15: /* 386 trap gate */
678 break;
679 default:
680 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
681 break;
682 }
683 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
684 cpl = env->hflags & HF_CPL_MASK;
685 /* check privledge if software int */
686 if (is_int && dpl < cpl)
687 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688 /* check valid bit */
689 if (!(e2 & DESC_P_MASK))
690 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
691 selector = e1 >> 16;
692 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
693 if ((selector & 0xfffc) == 0)
694 raise_exception_err(EXCP0D_GPF, 0);
695
696 if (load_segment(&e1, &e2, selector) != 0)
697 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
698 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
700 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
701 if (dpl > cpl)
702 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
703 if (!(e2 & DESC_P_MASK))
704 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
705 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
706 /* to inner priviledge */
707 get_ss_esp_from_tss(&ss, &esp, dpl);
708 if ((ss & 0xfffc) == 0)
709 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
710 if ((ss & 3) != dpl)
711 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
712 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
713 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
714 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
715 if (ss_dpl != dpl)
716 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717 if (!(ss_e2 & DESC_S_MASK) ||
718 (ss_e2 & DESC_CS_MASK) ||
719 !(ss_e2 & DESC_W_MASK))
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if (!(ss_e2 & DESC_P_MASK))
722 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723 new_stack = 1;
724 sp_mask = get_sp_mask(ss_e2);
725 ssp = get_seg_base(ss_e1, ss_e2);
726#if defined(VBOX) && defined(DEBUG)
727 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
728#endif
729 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
730 /* to same priviledge */
731 if (env->eflags & VM_MASK)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 new_stack = 0;
734 sp_mask = get_sp_mask(env->segs[R_SS].flags);
735 ssp = env->segs[R_SS].base;
736 esp = ESP;
737 dpl = cpl;
738 } else {
739 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
740 new_stack = 0; /* avoid warning */
741 sp_mask = 0; /* avoid warning */
742 ssp = 0; /* avoid warning */
743 esp = 0; /* avoid warning */
744 }
745
746 shift = type >> 3;
747
748#if 0
749 /* XXX: check that enough room is available */
750 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
751 if (env->eflags & VM_MASK)
752 push_size += 8;
753 push_size <<= shift;
754#endif
755 if (shift == 1) {
756 if (new_stack) {
757 if (env->eflags & VM_MASK) {
758 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
760 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
762 }
763 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
764 PUSHL(ssp, esp, sp_mask, ESP);
765 }
766 PUSHL(ssp, esp, sp_mask, compute_eflags());
767 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
768 PUSHL(ssp, esp, sp_mask, old_eip);
769 if (has_error_code) {
770 PUSHL(ssp, esp, sp_mask, error_code);
771 }
772 } else {
773 if (new_stack) {
774 if (env->eflags & VM_MASK) {
775 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
777 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
779 }
780 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
781 PUSHW(ssp, esp, sp_mask, ESP);
782 }
783 PUSHW(ssp, esp, sp_mask, compute_eflags());
784 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
785 PUSHW(ssp, esp, sp_mask, old_eip);
786 if (has_error_code) {
787 PUSHW(ssp, esp, sp_mask, error_code);
788 }
789 }
790
791 if (new_stack) {
792 if (env->eflags & VM_MASK) {
793 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
797 }
798 ss = (ss & ~3) | dpl;
799 cpu_x86_load_seg_cache(env, R_SS, ss,
800 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
801 }
802 SET_ESP(esp, sp_mask);
803
804 selector = (selector & ~3) | dpl;
805 cpu_x86_load_seg_cache(env, R_CS, selector,
806 get_seg_base(e1, e2),
807 get_seg_limit(e1, e2),
808 e2);
809 cpu_x86_set_cpl(env, dpl);
810 env->eip = offset;
811
812 /* interrupt gate clear IF mask */
813 if ((type & 1) == 0) {
814 env->eflags &= ~IF_MASK;
815 }
816 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
817}
818
819#ifdef VBOX
820
821/* check if VME interrupt redirection is enabled in TSS */
822static inline bool is_vme_irq_redirected(int intno)
823{
824 int io_offset, intredir_offset;
825 unsigned char val, mask;
826
827 /* TSS must be a valid 32 bit one */
828 if (!(env->tr.flags & DESC_P_MASK) ||
829 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
830 env->tr.limit < 103)
831 goto fail;
832 io_offset = lduw_kernel(env->tr.base + 0x66);
833 /* the virtual interrupt redirection bitmap is located below the io bitmap */
834 intredir_offset = io_offset - 0x20;
835
836 intredir_offset += (intno >> 3);
837 if ((intredir_offset) > env->tr.limit)
838 goto fail;
839
840 val = ldub_kernel(env->tr.base + intredir_offset);
841 mask = 1 << (unsigned char)(intno & 7);
842
843 /* bit set means no redirection. */
844 if ((val & mask) != 0) {
845 return false;
846 }
847 return true;
848
849fail:
850 raise_exception_err(EXCP0D_GPF, 0);
851 return true;
852}
853
854/* V86 mode software interrupt with CR4.VME=1 */
855static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
856{
857 target_ulong ptr, ssp;
858 int selector;
859 uint32_t offset, esp;
860 uint32_t old_cs, old_eflags;
861 uint32_t iopl;
862
863 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
864
865 if (!is_vme_irq_redirected(intno))
866 {
867 if (iopl == 3)
868 /* normal protected mode handler call */
869 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
870 else
871 raise_exception_err(EXCP0D_GPF, 0);
872 }
873
874 /* virtual mode idt is at linear address 0 */
875 ptr = 0 + intno * 4;
876 offset = lduw_kernel(ptr);
877 selector = lduw_kernel(ptr + 2);
878 esp = ESP;
879 ssp = env->segs[R_SS].base;
880 old_cs = env->segs[R_CS].selector;
881
882 old_eflags = compute_eflags();
883 if (iopl < 3)
884 {
885 /* copy VIF into IF and set IOPL to 3 */
886 if (env->eflags & VIF_MASK)
887 old_eflags |= IF_MASK;
888 else
889 old_eflags &= ~IF_MASK;
890
891 old_eflags |= (3 << IOPL_SHIFT);
892 }
893
894 /* XXX: use SS segment size ? */
895 PUSHW(ssp, esp, 0xffff, old_eflags);
896 PUSHW(ssp, esp, 0xffff, old_cs);
897 PUSHW(ssp, esp, 0xffff, next_eip);
898
899 /* update processor state */
900 ESP = (ESP & ~0xffff) | (esp & 0xffff);
901 env->eip = offset;
902 env->segs[R_CS].selector = selector;
903 env->segs[R_CS].base = (selector << 4);
904 env->eflags &= ~(TF_MASK | RF_MASK);
905
906 if (iopl < 3)
907 env->eflags &= ~VIF_MASK;
908 else
909 env->eflags &= ~IF_MASK;
910}
911#endif /* VBOX */
912
913#ifdef TARGET_X86_64
914
915#define PUSHQ(sp, val)\
916{\
917 sp -= 8;\
918 stq_kernel(sp, (val));\
919}
920
921#define POPQ(sp, val)\
922{\
923 val = ldq_kernel(sp);\
924 sp += 8;\
925}
926
927static inline target_ulong get_rsp_from_tss(int level)
928{
929 int index;
930
931#if 0
932 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
933 env->tr.base, env->tr.limit);
934#endif
935
936 if (!(env->tr.flags & DESC_P_MASK))
937 cpu_abort(env, "invalid tss");
938 index = 8 * level + 4;
939 if ((index + 7) > env->tr.limit)
940 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
941 return ldq_kernel(env->tr.base + index);
942}
943
944/* 64 bit interrupt */
945static void do_interrupt64(int intno, int is_int, int error_code,
946 target_ulong next_eip, int is_hw)
947{
948 SegmentCache *dt;
949 target_ulong ptr;
950 int type, dpl, selector, cpl, ist;
951 int has_error_code, new_stack;
952 uint32_t e1, e2, e3, ss;
953 target_ulong old_eip, esp, offset;
954
955 has_error_code = 0;
956 if (!is_int && !is_hw) {
957 switch(intno) {
958 case 8:
959 case 10:
960 case 11:
961 case 12:
962 case 13:
963 case 14:
964 case 17:
965 has_error_code = 1;
966 break;
967 }
968 }
969 if (is_int)
970 old_eip = next_eip;
971 else
972 old_eip = env->eip;
973
974 dt = &env->idt;
975 if (intno * 16 + 15 > dt->limit)
976 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
977 ptr = dt->base + intno * 16;
978 e1 = ldl_kernel(ptr);
979 e2 = ldl_kernel(ptr + 4);
980 e3 = ldl_kernel(ptr + 8);
981 /* check gate type */
982 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
983 switch(type) {
984 case 14: /* 386 interrupt gate */
985 case 15: /* 386 trap gate */
986 break;
987 default:
988 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
989 break;
990 }
991 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
992 cpl = env->hflags & HF_CPL_MASK;
993 /* check privledge if software int */
994 if (is_int && dpl < cpl)
995 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
996 /* check valid bit */
997 if (!(e2 & DESC_P_MASK))
998 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
999 selector = e1 >> 16;
1000 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1001 ist = e2 & 7;
1002 if ((selector & 0xfffc) == 0)
1003 raise_exception_err(EXCP0D_GPF, 0);
1004
1005 if (load_segment(&e1, &e2, selector) != 0)
1006 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1007 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1008 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1009 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1010 if (dpl > cpl)
1011 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1012 if (!(e2 & DESC_P_MASK))
1013 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1014 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1015 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1016 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1017 /* to inner priviledge */
1018 if (ist != 0)
1019 esp = get_rsp_from_tss(ist + 3);
1020 else
1021 esp = get_rsp_from_tss(dpl);
1022 esp &= ~0xfLL; /* align stack */
1023 ss = 0;
1024 new_stack = 1;
1025 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1026 /* to same priviledge */
1027 if (env->eflags & VM_MASK)
1028 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1029 new_stack = 0;
1030 if (ist != 0)
1031 esp = get_rsp_from_tss(ist + 3);
1032 else
1033 esp = ESP;
1034 esp &= ~0xfLL; /* align stack */
1035 dpl = cpl;
1036 } else {
1037 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1038 new_stack = 0; /* avoid warning */
1039 esp = 0; /* avoid warning */
1040 }
1041
1042 PUSHQ(esp, env->segs[R_SS].selector);
1043 PUSHQ(esp, ESP);
1044 PUSHQ(esp, compute_eflags());
1045 PUSHQ(esp, env->segs[R_CS].selector);
1046 PUSHQ(esp, old_eip);
1047 if (has_error_code) {
1048 PUSHQ(esp, error_code);
1049 }
1050
1051 if (new_stack) {
1052 ss = 0 | dpl;
1053 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1054 }
1055 ESP = esp;
1056
1057 selector = (selector & ~3) | dpl;
1058 cpu_x86_load_seg_cache(env, R_CS, selector,
1059 get_seg_base(e1, e2),
1060 get_seg_limit(e1, e2),
1061 e2);
1062 cpu_x86_set_cpl(env, dpl);
1063 env->eip = offset;
1064
1065 /* interrupt gate clear IF mask */
1066 if ((type & 1) == 0) {
1067 env->eflags &= ~IF_MASK;
1068 }
1069 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1070}
1071#endif
1072
1073void helper_syscall(int next_eip_addend)
1074{
1075 int selector;
1076
1077 if (!(env->efer & MSR_EFER_SCE)) {
1078 raise_exception_err(EXCP06_ILLOP, 0);
1079 }
1080 selector = (env->star >> 32) & 0xffff;
1081#ifdef TARGET_X86_64
1082 if (env->hflags & HF_LMA_MASK) {
1083 int code64;
1084
1085 ECX = env->eip + next_eip_addend;
1086 env->regs[11] = compute_eflags();
1087
1088 code64 = env->hflags & HF_CS64_MASK;
1089
1090 cpu_x86_set_cpl(env, 0);
1091 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1092 0, 0xffffffff,
1093 DESC_G_MASK | DESC_P_MASK |
1094 DESC_S_MASK |
1095 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1096 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1097 0, 0xffffffff,
1098 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1099 DESC_S_MASK |
1100 DESC_W_MASK | DESC_A_MASK);
1101 env->eflags &= ~env->fmask;
1102 if (code64)
1103 env->eip = env->lstar;
1104 else
1105 env->eip = env->cstar;
1106 } else
1107#endif
1108 {
1109 ECX = (uint32_t)(env->eip + next_eip_addend);
1110
1111 cpu_x86_set_cpl(env, 0);
1112 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1113 0, 0xffffffff,
1114 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1115 DESC_S_MASK |
1116 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1117 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1118 0, 0xffffffff,
1119 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1120 DESC_S_MASK |
1121 DESC_W_MASK | DESC_A_MASK);
1122 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1123 env->eip = (uint32_t)env->star;
1124 }
1125}
1126
1127void helper_sysret(int dflag)
1128{
1129 int cpl, selector;
1130
1131 if (!(env->efer & MSR_EFER_SCE)) {
1132 raise_exception_err(EXCP06_ILLOP, 0);
1133 }
1134 cpl = env->hflags & HF_CPL_MASK;
1135 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1136 raise_exception_err(EXCP0D_GPF, 0);
1137 }
1138 selector = (env->star >> 48) & 0xffff;
1139#ifdef TARGET_X86_64
1140 if (env->hflags & HF_LMA_MASK) {
1141 if (dflag == 2) {
1142 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1143 0, 0xffffffff,
1144 DESC_G_MASK | DESC_P_MASK |
1145 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1146 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1147 DESC_L_MASK);
1148 env->eip = ECX;
1149 } else {
1150 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1151 0, 0xffffffff,
1152 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1153 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1154 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1155 env->eip = (uint32_t)ECX;
1156 }
1157 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1158 0, 0xffffffff,
1159 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1160 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1161 DESC_W_MASK | DESC_A_MASK);
1162 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1163 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1164 cpu_x86_set_cpl(env, 3);
1165 } else
1166#endif
1167 {
1168 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1169 0, 0xffffffff,
1170 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1171 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1172 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1173 env->eip = (uint32_t)ECX;
1174 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1175 0, 0xffffffff,
1176 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1177 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1178 DESC_W_MASK | DESC_A_MASK);
1179 env->eflags |= IF_MASK;
1180 cpu_x86_set_cpl(env, 3);
1181 }
1182#ifdef USE_KQEMU
1183 if (kqemu_is_ok(env)) {
1184 if (env->hflags & HF_LMA_MASK)
1185 CC_OP = CC_OP_EFLAGS;
1186 env->exception_index = -1;
1187 cpu_loop_exit();
1188 }
1189#endif
1190}
1191
1192#ifdef VBOX
1193/**
1194 * Checks and processes external VMM events.
1195 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1196 */
1197void helper_external_event(void)
1198{
1199#if defined(__DARWIN__) && defined(VBOX_STRICT)
1200# if 0
1201 //uintptr_t uFrameAddr = (uintptr_t)__builtin_frame_address(0); - this is broken (uses %ebp)
1202 //AssertMsg(!( (uFrameAddr - sizeof(uintptr_t)) & 7 ), ("uFrameAddr=%#p\n", uFrameAddr));
1203# else
1204 uintptr_t uESP;
1205 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1206 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1207# endif
1208#endif
1209 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1210 {
1211 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1212 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1213 }
1214 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1215 {
1216 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1217 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1218 }
1219 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1220 {
1221 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1222 remR3DmaRun(env);
1223 }
1224 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1225 {
1226 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1227 remR3TimersRun(env);
1228 }
1229}
1230#endif /* VBOX */
1231
1232/* real mode interrupt */
1233static void do_interrupt_real(int intno, int is_int, int error_code,
1234 unsigned int next_eip)
1235{
1236 SegmentCache *dt;
1237 target_ulong ptr, ssp;
1238 int selector;
1239 uint32_t offset, esp;
1240 uint32_t old_cs, old_eip;
1241
1242 /* real mode (simpler !) */
1243 dt = &env->idt;
1244 if (intno * 4 + 3 > dt->limit)
1245 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1246 ptr = dt->base + intno * 4;
1247 offset = lduw_kernel(ptr);
1248 selector = lduw_kernel(ptr + 2);
1249 esp = ESP;
1250 ssp = env->segs[R_SS].base;
1251 if (is_int)
1252 old_eip = next_eip;
1253 else
1254 old_eip = env->eip;
1255 old_cs = env->segs[R_CS].selector;
1256 /* XXX: use SS segment size ? */
1257 PUSHW(ssp, esp, 0xffff, compute_eflags());
1258 PUSHW(ssp, esp, 0xffff, old_cs);
1259 PUSHW(ssp, esp, 0xffff, old_eip);
1260
1261 /* update processor state */
1262 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1263 env->eip = offset;
1264 env->segs[R_CS].selector = selector;
1265 env->segs[R_CS].base = (selector << 4);
1266 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1267}
1268
1269/* fake user mode interrupt */
1270void do_interrupt_user(int intno, int is_int, int error_code,
1271 target_ulong next_eip)
1272{
1273 SegmentCache *dt;
1274 target_ulong ptr;
1275 int dpl, cpl;
1276 uint32_t e2;
1277
1278 dt = &env->idt;
1279 ptr = dt->base + (intno * 8);
1280 e2 = ldl_kernel(ptr + 4);
1281
1282 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1283 cpl = env->hflags & HF_CPL_MASK;
1284 /* check privledge if software int */
1285 if (is_int && dpl < cpl)
1286 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1287
1288 /* Since we emulate only user space, we cannot do more than
1289 exiting the emulation with the suitable exception and error
1290 code */
1291 if (is_int)
1292 EIP = next_eip;
1293}
1294
1295/*
1296 * Begin execution of an interruption. is_int is TRUE if coming from
1297 * the int instruction. next_eip is the EIP value AFTER the interrupt
1298 * instruction. It is only relevant if is_int is TRUE.
1299 */
1300void do_interrupt(int intno, int is_int, int error_code,
1301 target_ulong next_eip, int is_hw)
1302{
1303 if (loglevel & CPU_LOG_INT) {
1304 if ((env->cr[0] & CR0_PE_MASK)) {
1305 static int count;
1306 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1307 count, intno, error_code, is_int,
1308 env->hflags & HF_CPL_MASK,
1309 env->segs[R_CS].selector, EIP,
1310 (int)env->segs[R_CS].base + EIP,
1311 env->segs[R_SS].selector, ESP);
1312 if (intno == 0x0e) {
1313 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1314 } else {
1315 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1316 }
1317 fprintf(logfile, "\n");
1318 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1319#if 0
1320 {
1321 int i;
1322 uint8_t *ptr;
1323 fprintf(logfile, " code=");
1324 ptr = env->segs[R_CS].base + env->eip;
1325 for(i = 0; i < 16; i++) {
1326 fprintf(logfile, " %02x", ldub(ptr + i));
1327 }
1328 fprintf(logfile, "\n");
1329 }
1330#endif
1331 count++;
1332 }
1333 }
1334 if (env->cr[0] & CR0_PE_MASK) {
1335#if TARGET_X86_64
1336 if (env->hflags & HF_LMA_MASK) {
1337 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1338 } else
1339#endif
1340 {
1341#ifdef VBOX
1342 /* int xx *, v86 code and VME enabled? */
1343 if ( (env->eflags & VM_MASK)
1344 && (env->cr[4] & CR4_VME_MASK)
1345 && is_int
1346 && !is_hw
1347 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1348 )
1349 do_soft_interrupt_vme(intno, error_code, next_eip);
1350 else
1351#endif /* VBOX */
1352 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1353 }
1354 } else {
1355 do_interrupt_real(intno, is_int, error_code, next_eip);
1356 }
1357}
1358
1359/*
1360 * Signal an interruption. It is executed in the main CPU loop.
1361 * is_int is TRUE if coming from the int instruction. next_eip is the
1362 * EIP value AFTER the interrupt instruction. It is only relevant if
1363 * is_int is TRUE.
1364 */
1365void raise_interrupt(int intno, int is_int, int error_code,
1366 int next_eip_addend)
1367{
1368#if defined(VBOX) && defined(DEBUG) && !defined(DEBUG_dmik)
1369 Log2(("raise_interrupt: %x %x %x %08x\n", intno, is_int, error_code, env->eip + next_eip_addend));
1370#endif
1371 env->exception_index = intno;
1372 env->error_code = error_code;
1373 env->exception_is_int = is_int;
1374 env->exception_next_eip = env->eip + next_eip_addend;
1375 cpu_loop_exit();
1376}
1377
1378/* same as raise_exception_err, but do not restore global registers */
1379static void raise_exception_err_norestore(int exception_index, int error_code)
1380{
1381 env->exception_index = exception_index;
1382 env->error_code = error_code;
1383 env->exception_is_int = 0;
1384 env->exception_next_eip = 0;
1385 longjmp(env->jmp_env, 1);
1386}
1387
1388/* shortcuts to generate exceptions */
1389
1390void (raise_exception_err)(int exception_index, int error_code)
1391{
1392 raise_interrupt(exception_index, 0, error_code, 0);
1393}
1394
1395void raise_exception(int exception_index)
1396{
1397 raise_interrupt(exception_index, 0, 0, 0);
1398}
1399
1400/* SMM support */
1401
1402#if defined(CONFIG_USER_ONLY)
1403
1404void do_smm_enter(void)
1405{
1406}
1407
1408void helper_rsm(void)
1409{
1410}
1411
1412#else
1413
1414#ifdef TARGET_X86_64
1415#define SMM_REVISION_ID 0x00020064
1416#else
1417#define SMM_REVISION_ID 0x00020000
1418#endif
1419
1420void do_smm_enter(void)
1421{
1422#ifdef VBOX
1423 cpu_abort(env, "do_ssm_enter");
1424#else /* !VBOX */
1425 target_ulong sm_state;
1426 SegmentCache *dt;
1427 int i, offset;
1428
1429 if (loglevel & CPU_LOG_INT) {
1430 fprintf(logfile, "SMM: enter\n");
1431 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1432 }
1433
1434 env->hflags |= HF_SMM_MASK;
1435 cpu_smm_update(env);
1436
1437 sm_state = env->smbase + 0x8000;
1438
1439#ifdef TARGET_X86_64
1440 for(i = 0; i < 6; i++) {
1441 dt = &env->segs[i];
1442 offset = 0x7e00 + i * 16;
1443 stw_phys(sm_state + offset, dt->selector);
1444 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1445 stl_phys(sm_state + offset + 4, dt->limit);
1446 stq_phys(sm_state + offset + 8, dt->base);
1447 }
1448
1449 stq_phys(sm_state + 0x7e68, env->gdt.base);
1450 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1451
1452 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1453 stq_phys(sm_state + 0x7e78, env->ldt.base);
1454 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1455 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1456
1457 stq_phys(sm_state + 0x7e88, env->idt.base);
1458 stl_phys(sm_state + 0x7e84, env->idt.limit);
1459
1460 stw_phys(sm_state + 0x7e90, env->tr.selector);
1461 stq_phys(sm_state + 0x7e98, env->tr.base);
1462 stl_phys(sm_state + 0x7e94, env->tr.limit);
1463 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1464
1465 stq_phys(sm_state + 0x7ed0, env->efer);
1466
1467 stq_phys(sm_state + 0x7ff8, EAX);
1468 stq_phys(sm_state + 0x7ff0, ECX);
1469 stq_phys(sm_state + 0x7fe8, EDX);
1470 stq_phys(sm_state + 0x7fe0, EBX);
1471 stq_phys(sm_state + 0x7fd8, ESP);
1472 stq_phys(sm_state + 0x7fd0, EBP);
1473 stq_phys(sm_state + 0x7fc8, ESI);
1474 stq_phys(sm_state + 0x7fc0, EDI);
1475 for(i = 8; i < 16; i++)
1476 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1477 stq_phys(sm_state + 0x7f78, env->eip);
1478 stl_phys(sm_state + 0x7f70, compute_eflags());
1479 stl_phys(sm_state + 0x7f68, env->dr[6]);
1480 stl_phys(sm_state + 0x7f60, env->dr[7]);
1481
1482 stl_phys(sm_state + 0x7f48, env->cr[4]);
1483 stl_phys(sm_state + 0x7f50, env->cr[3]);
1484 stl_phys(sm_state + 0x7f58, env->cr[0]);
1485
1486 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1487 stl_phys(sm_state + 0x7f00, env->smbase);
1488#else
1489 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1490 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1491 stl_phys(sm_state + 0x7ff4, compute_eflags());
1492 stl_phys(sm_state + 0x7ff0, env->eip);
1493 stl_phys(sm_state + 0x7fec, EDI);
1494 stl_phys(sm_state + 0x7fe8, ESI);
1495 stl_phys(sm_state + 0x7fe4, EBP);
1496 stl_phys(sm_state + 0x7fe0, ESP);
1497 stl_phys(sm_state + 0x7fdc, EBX);
1498 stl_phys(sm_state + 0x7fd8, EDX);
1499 stl_phys(sm_state + 0x7fd4, ECX);
1500 stl_phys(sm_state + 0x7fd0, EAX);
1501 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1502 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1503
1504 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1505 stl_phys(sm_state + 0x7f64, env->tr.base);
1506 stl_phys(sm_state + 0x7f60, env->tr.limit);
1507 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1508
1509 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1510 stl_phys(sm_state + 0x7f80, env->ldt.base);
1511 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1512 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1513
1514 stl_phys(sm_state + 0x7f74, env->gdt.base);
1515 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1516
1517 stl_phys(sm_state + 0x7f58, env->idt.base);
1518 stl_phys(sm_state + 0x7f54, env->idt.limit);
1519
1520 for(i = 0; i < 6; i++) {
1521 dt = &env->segs[i];
1522 if (i < 3)
1523 offset = 0x7f84 + i * 12;
1524 else
1525 offset = 0x7f2c + (i - 3) * 12;
1526 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1527 stl_phys(sm_state + offset + 8, dt->base);
1528 stl_phys(sm_state + offset + 4, dt->limit);
1529 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1530 }
1531 stl_phys(sm_state + 0x7f14, env->cr[4]);
1532
1533 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1534 stl_phys(sm_state + 0x7ef8, env->smbase);
1535#endif
1536 /* init SMM cpu state */
1537
1538#ifdef TARGET_X86_64
1539 env->efer = 0;
1540 env->hflags &= ~HF_LMA_MASK;
1541#endif
1542 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1543 env->eip = 0x00008000;
1544 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1545 0xffffffff, 0);
1546 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1547 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1548 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1549 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1550 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1551
1552 cpu_x86_update_cr0(env,
1553 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1554 cpu_x86_update_cr4(env, 0);
1555 env->dr[7] = 0x00000400;
1556 CC_OP = CC_OP_EFLAGS;
1557#endif /* VBOX */
1558}
1559
1560void helper_rsm(void)
1561{
1562#ifdef VBOX
1563 cpu_abort(env, "helper_rsm");
1564#else /* !VBOX */
1565 target_ulong sm_state;
1566 int i, offset;
1567 uint32_t val;
1568
1569 sm_state = env->smbase + 0x8000;
1570#ifdef TARGET_X86_64
1571 env->efer = ldq_phys(sm_state + 0x7ed0);
1572 if (env->efer & MSR_EFER_LMA)
1573 env->hflags |= HF_LMA_MASK;
1574 else
1575 env->hflags &= ~HF_LMA_MASK;
1576
1577 for(i = 0; i < 6; i++) {
1578 offset = 0x7e00 + i * 16;
1579 cpu_x86_load_seg_cache(env, i,
1580 lduw_phys(sm_state + offset),
1581 ldq_phys(sm_state + offset + 8),
1582 ldl_phys(sm_state + offset + 4),
1583 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1584 }
1585
1586 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1587 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1588
1589 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1590 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1591 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1592 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1593
1594 env->idt.base = ldq_phys(sm_state + 0x7e88);
1595 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1596
1597 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1598 env->tr.base = ldq_phys(sm_state + 0x7e98);
1599 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1600 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1601
1602 EAX = ldq_phys(sm_state + 0x7ff8);
1603 ECX = ldq_phys(sm_state + 0x7ff0);
1604 EDX = ldq_phys(sm_state + 0x7fe8);
1605 EBX = ldq_phys(sm_state + 0x7fe0);
1606 ESP = ldq_phys(sm_state + 0x7fd8);
1607 EBP = ldq_phys(sm_state + 0x7fd0);
1608 ESI = ldq_phys(sm_state + 0x7fc8);
1609 EDI = ldq_phys(sm_state + 0x7fc0);
1610 for(i = 8; i < 16; i++)
1611 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1612 env->eip = ldq_phys(sm_state + 0x7f78);
1613 load_eflags(ldl_phys(sm_state + 0x7f70),
1614 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1615 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1616 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1617
1618 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1619 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1620 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1621
1622 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1623 if (val & 0x20000) {
1624 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1625 }
1626#else
1627 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1628 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1629 load_eflags(ldl_phys(sm_state + 0x7ff4),
1630 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1631 env->eip = ldl_phys(sm_state + 0x7ff0);
1632 EDI = ldl_phys(sm_state + 0x7fec);
1633 ESI = ldl_phys(sm_state + 0x7fe8);
1634 EBP = ldl_phys(sm_state + 0x7fe4);
1635 ESP = ldl_phys(sm_state + 0x7fe0);
1636 EBX = ldl_phys(sm_state + 0x7fdc);
1637 EDX = ldl_phys(sm_state + 0x7fd8);
1638 ECX = ldl_phys(sm_state + 0x7fd4);
1639 EAX = ldl_phys(sm_state + 0x7fd0);
1640 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1641 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1642
1643 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1644 env->tr.base = ldl_phys(sm_state + 0x7f64);
1645 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1646 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1647
1648 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1649 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1650 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1651 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1652
1653 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1654 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1655
1656 env->idt.base = ldl_phys(sm_state + 0x7f58);
1657 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1658
1659 for(i = 0; i < 6; i++) {
1660 if (i < 3)
1661 offset = 0x7f84 + i * 12;
1662 else
1663 offset = 0x7f2c + (i - 3) * 12;
1664 cpu_x86_load_seg_cache(env, i,
1665 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1666 ldl_phys(sm_state + offset + 8),
1667 ldl_phys(sm_state + offset + 4),
1668 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1669 }
1670 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1671
1672 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1673 if (val & 0x20000) {
1674 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1675 }
1676#endif
1677 CC_OP = CC_OP_EFLAGS;
1678 env->hflags &= ~HF_SMM_MASK;
1679 cpu_smm_update(env);
1680
1681 if (loglevel & CPU_LOG_INT) {
1682 fprintf(logfile, "SMM: after RSM\n");
1683 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1684 }
1685#endif /* !VBOX */
1686}
1687
1688#endif /* !CONFIG_USER_ONLY */
1689
1690
1691#ifdef BUGGY_GCC_DIV64
1692/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1693 call it from another function */
1694uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1695{
1696 *q_ptr = num / den;
1697 return num % den;
1698}
1699
1700int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1701{
1702 *q_ptr = num / den;
1703 return num % den;
1704}
1705#endif
1706
1707void helper_divl_EAX_T0(void)
1708{
1709 unsigned int den, r;
1710 uint64_t num, q;
1711
1712 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1713 den = T0;
1714 if (den == 0) {
1715 raise_exception(EXCP00_DIVZ);
1716 }
1717#ifdef BUGGY_GCC_DIV64
1718 r = div32(&q, num, den);
1719#else
1720 q = (num / den);
1721 r = (num % den);
1722#endif
1723 if (q > 0xffffffff)
1724 raise_exception(EXCP00_DIVZ);
1725 EAX = (uint32_t)q;
1726 EDX = (uint32_t)r;
1727}
1728
1729void helper_idivl_EAX_T0(void)
1730{
1731 int den, r;
1732 int64_t num, q;
1733
1734 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1735 den = T0;
1736 if (den == 0) {
1737 raise_exception(EXCP00_DIVZ);
1738 }
1739#ifdef BUGGY_GCC_DIV64
1740 r = idiv32(&q, num, den);
1741#else
1742 q = (num / den);
1743 r = (num % den);
1744#endif
1745 if (q != (int32_t)q)
1746 raise_exception(EXCP00_DIVZ);
1747 EAX = (uint32_t)q;
1748 EDX = (uint32_t)r;
1749}
1750
1751void helper_cmpxchg8b(void)
1752{
1753 uint64_t d;
1754 int eflags;
1755
1756 eflags = cc_table[CC_OP].compute_all();
1757 d = ldq(A0);
1758 if (d == (((uint64_t)EDX << 32) | EAX)) {
1759 stq(A0, ((uint64_t)ECX << 32) | EBX);
1760 eflags |= CC_Z;
1761 } else {
1762 EDX = d >> 32;
1763 EAX = d;
1764 eflags &= ~CC_Z;
1765 }
1766 CC_SRC = eflags;
1767}
1768
1769void helper_cpuid(void)
1770{
1771#ifndef VBOX
1772 uint32_t index;
1773 index = (uint32_t)EAX;
1774
1775 /* test if maximum index reached */
1776 if (index & 0x80000000) {
1777 if (index > env->cpuid_xlevel)
1778 index = env->cpuid_level;
1779 } else {
1780 if (index > env->cpuid_level)
1781 index = env->cpuid_level;
1782 }
1783
1784 switch(index) {
1785 case 0:
1786 EAX = env->cpuid_level;
1787 EBX = env->cpuid_vendor1;
1788 EDX = env->cpuid_vendor2;
1789 ECX = env->cpuid_vendor3;
1790 break;
1791 case 1:
1792 EAX = env->cpuid_version;
1793 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1794 ECX = env->cpuid_ext_features;
1795 EDX = env->cpuid_features;
1796 break;
1797 case 2:
1798 /* cache info: needed for Pentium Pro compatibility */
1799 EAX = 0x410601;
1800 EBX = 0;
1801 ECX = 0;
1802 EDX = 0;
1803 break;
1804 case 0x80000000:
1805 EAX = env->cpuid_xlevel;
1806 EBX = env->cpuid_vendor1;
1807 EDX = env->cpuid_vendor2;
1808 ECX = env->cpuid_vendor3;
1809 break;
1810 case 0x80000001:
1811 EAX = env->cpuid_features;
1812 EBX = 0;
1813 ECX = 0;
1814 EDX = env->cpuid_ext2_features;
1815 break;
1816 case 0x80000002:
1817 case 0x80000003:
1818 case 0x80000004:
1819 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1820 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1821 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1822 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1823 break;
1824 case 0x80000005:
1825 /* cache info (L1 cache) */
1826 EAX = 0x01ff01ff;
1827 EBX = 0x01ff01ff;
1828 ECX = 0x40020140;
1829 EDX = 0x40020140;
1830 break;
1831 case 0x80000006:
1832 /* cache info (L2 cache) */
1833 EAX = 0;
1834 EBX = 0x42004200;
1835 ECX = 0x02008140;
1836 EDX = 0;
1837 break;
1838 case 0x80000008:
1839 /* virtual & phys address size in low 2 bytes. */
1840 EAX = 0x00003028;
1841 EBX = 0;
1842 ECX = 0;
1843 EDX = 0;
1844 break;
1845 default:
1846 /* reserved values: zero */
1847 EAX = 0;
1848 EBX = 0;
1849 ECX = 0;
1850 EDX = 0;
1851 break;
1852 }
1853#else /* VBOX */
1854 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1855#endif /* VBOX */
1856}
1857
1858void helper_enter_level(int level, int data32)
1859{
1860 target_ulong ssp;
1861 uint32_t esp_mask, esp, ebp;
1862
1863 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1864 ssp = env->segs[R_SS].base;
1865 ebp = EBP;
1866 esp = ESP;
1867 if (data32) {
1868 /* 32 bit */
1869 esp -= 4;
1870 while (--level) {
1871 esp -= 4;
1872 ebp -= 4;
1873 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1874 }
1875 esp -= 4;
1876 stl(ssp + (esp & esp_mask), T1);
1877 } else {
1878 /* 16 bit */
1879 esp -= 2;
1880 while (--level) {
1881 esp -= 2;
1882 ebp -= 2;
1883 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1884 }
1885 esp -= 2;
1886 stw(ssp + (esp & esp_mask), T1);
1887 }
1888}
1889
1890#ifdef TARGET_X86_64
1891void helper_enter64_level(int level, int data64)
1892{
1893 target_ulong esp, ebp;
1894 ebp = EBP;
1895 esp = ESP;
1896
1897 if (data64) {
1898 /* 64 bit */
1899 esp -= 8;
1900 while (--level) {
1901 esp -= 8;
1902 ebp -= 8;
1903 stq(esp, ldq(ebp));
1904 }
1905 esp -= 8;
1906 stq(esp, T1);
1907 } else {
1908 /* 16 bit */
1909 esp -= 2;
1910 while (--level) {
1911 esp -= 2;
1912 ebp -= 2;
1913 stw(esp, lduw(ebp));
1914 }
1915 esp -= 2;
1916 stw(esp, T1);
1917 }
1918}
1919#endif
1920
1921void helper_lldt_T0(void)
1922{
1923 int selector;
1924 SegmentCache *dt;
1925 uint32_t e1, e2;
1926 int index, entry_limit;
1927 target_ulong ptr;
1928#ifdef VBOX
1929 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n",
1930 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1931#endif
1932
1933 selector = T0 & 0xffff;
1934 if ((selector & 0xfffc) == 0) {
1935 /* XXX: NULL selector case: invalid LDT */
1936 env->ldt.base = 0;
1937 env->ldt.limit = 0;
1938 } else {
1939 if (selector & 0x4)
1940 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1941 dt = &env->gdt;
1942 index = selector & ~7;
1943#ifdef TARGET_X86_64
1944 if (env->hflags & HF_LMA_MASK)
1945 entry_limit = 15;
1946 else
1947#endif
1948 entry_limit = 7;
1949 if ((index + entry_limit) > dt->limit)
1950 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1951 ptr = dt->base + index;
1952 e1 = ldl_kernel(ptr);
1953 e2 = ldl_kernel(ptr + 4);
1954 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1956 if (!(e2 & DESC_P_MASK))
1957 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1958#ifdef TARGET_X86_64
1959 if (env->hflags & HF_LMA_MASK) {
1960 uint32_t e3;
1961 e3 = ldl_kernel(ptr + 8);
1962 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1963 env->ldt.base |= (target_ulong)e3 << 32;
1964 } else
1965#endif
1966 {
1967 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1968 }
1969 }
1970 env->ldt.selector = selector;
1971#ifdef VBOX
1972 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n",
1973 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
1974#endif
1975}
1976
1977void helper_ltr_T0(void)
1978{
1979 int selector;
1980 SegmentCache *dt;
1981 uint32_t e1, e2;
1982 int index, type, entry_limit;
1983 target_ulong ptr;
1984
1985#ifdef VBOX
1986 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
1987 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
1988 env->tr.flags, (RTSEL)(T0 & 0xffff)));
1989#endif
1990
1991 selector = T0 & 0xffff;
1992 if ((selector & 0xfffc) == 0) {
1993 /* NULL selector case: invalid TR */
1994 env->tr.base = 0;
1995 env->tr.limit = 0;
1996 env->tr.flags = 0;
1997 } else {
1998 if (selector & 0x4)
1999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2000 dt = &env->gdt;
2001 index = selector & ~7;
2002#ifdef TARGET_X86_64
2003 if (env->hflags & HF_LMA_MASK)
2004 entry_limit = 15;
2005 else
2006#endif
2007 entry_limit = 7;
2008 if ((index + entry_limit) > dt->limit)
2009 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2010 ptr = dt->base + index;
2011 e1 = ldl_kernel(ptr);
2012 e2 = ldl_kernel(ptr + 4);
2013 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2014 if ((e2 & DESC_S_MASK) ||
2015 (type != 1 && type != 9))
2016 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2017 if (!(e2 & DESC_P_MASK))
2018 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2019#ifdef TARGET_X86_64
2020 if (env->hflags & HF_LMA_MASK) {
2021 uint32_t e3;
2022 e3 = ldl_kernel(ptr + 8);
2023 load_seg_cache_raw_dt(&env->tr, e1, e2);
2024 env->tr.base |= (target_ulong)e3 << 32;
2025 } else
2026#endif
2027 {
2028 load_seg_cache_raw_dt(&env->tr, e1, e2);
2029 }
2030 e2 |= DESC_TSS_BUSY_MASK;
2031 stl_kernel(ptr + 4, e2);
2032 }
2033 env->tr.selector = selector;
2034#ifdef VBOX
2035 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2036 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2037 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2038#endif
2039}
2040
2041/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2042void load_seg(int seg_reg, int selector)
2043{
2044 uint32_t e1, e2;
2045 int cpl, dpl, rpl;
2046 SegmentCache *dt;
2047 int index;
2048 target_ulong ptr;
2049
2050 selector &= 0xffff;
2051 cpl = env->hflags & HF_CPL_MASK;
2052
2053#ifdef VBOX
2054 /* Trying to load a selector with CPL=1? */
2055 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2056 {
2057 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2058 selector = selector & 0xfffc;
2059 }
2060#endif
2061
2062 if ((selector & 0xfffc) == 0) {
2063 /* null selector case */
2064 if (seg_reg == R_SS
2065#ifdef TARGET_X86_64
2066 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2067#endif
2068 )
2069 raise_exception_err(EXCP0D_GPF, 0);
2070 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2071 } else {
2072
2073 if (selector & 0x4)
2074 dt = &env->ldt;
2075 else
2076 dt = &env->gdt;
2077 index = selector & ~7;
2078 if ((index + 7) > dt->limit)
2079 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2080 ptr = dt->base + index;
2081 e1 = ldl_kernel(ptr);
2082 e2 = ldl_kernel(ptr + 4);
2083
2084 if (!(e2 & DESC_S_MASK))
2085 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2086 rpl = selector & 3;
2087 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2088 if (seg_reg == R_SS) {
2089 /* must be writable segment */
2090 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2091 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2092 if (rpl != cpl || dpl != cpl)
2093 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2094 } else {
2095 /* must be readable segment */
2096 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2097 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2098
2099 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2100 /* if not conforming code, test rights */
2101 if (dpl < cpl || dpl < rpl)
2102 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2103 }
2104 }
2105
2106 if (!(e2 & DESC_P_MASK)) {
2107 if (seg_reg == R_SS)
2108 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2109 else
2110 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2111 }
2112
2113 /* set the access bit if not already set */
2114 if (!(e2 & DESC_A_MASK)) {
2115 e2 |= DESC_A_MASK;
2116 stl_kernel(ptr + 4, e2);
2117 }
2118
2119 cpu_x86_load_seg_cache(env, seg_reg, selector,
2120 get_seg_base(e1, e2),
2121 get_seg_limit(e1, e2),
2122 e2);
2123#if 0
2124 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2125 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2126#endif
2127 }
2128}
2129
2130/* protected mode jump */
2131void helper_ljmp_protected_T0_T1(int next_eip_addend)
2132{
2133 int new_cs, gate_cs, type;
2134 uint32_t e1, e2, cpl, dpl, rpl, limit;
2135 target_ulong new_eip, next_eip;
2136
2137 new_cs = T0;
2138 new_eip = T1;
2139 if ((new_cs & 0xfffc) == 0)
2140 raise_exception_err(EXCP0D_GPF, 0);
2141 if (load_segment(&e1, &e2, new_cs) != 0)
2142 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2143 cpl = env->hflags & HF_CPL_MASK;
2144 if (e2 & DESC_S_MASK) {
2145 if (!(e2 & DESC_CS_MASK))
2146 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2147 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2148 if (e2 & DESC_C_MASK) {
2149 /* conforming code segment */
2150 if (dpl > cpl)
2151 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2152 } else {
2153 /* non conforming code segment */
2154 rpl = new_cs & 3;
2155 if (rpl > cpl)
2156 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2157 if (dpl != cpl)
2158 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2159 }
2160 if (!(e2 & DESC_P_MASK))
2161 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2162 limit = get_seg_limit(e1, e2);
2163 if (new_eip > limit &&
2164 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2165 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2166 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2167 get_seg_base(e1, e2), limit, e2);
2168 EIP = new_eip;
2169 } else {
2170 /* jump to call or task gate */
2171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2172 rpl = new_cs & 3;
2173 cpl = env->hflags & HF_CPL_MASK;
2174 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2175 switch(type) {
2176 case 1: /* 286 TSS */
2177 case 9: /* 386 TSS */
2178 case 5: /* task gate */
2179 if (dpl < cpl || dpl < rpl)
2180 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2181 next_eip = env->eip + next_eip_addend;
2182 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2183 CC_OP = CC_OP_EFLAGS;
2184 break;
2185 case 4: /* 286 call gate */
2186 case 12: /* 386 call gate */
2187 if ((dpl < cpl) || (dpl < rpl))
2188 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2189 if (!(e2 & DESC_P_MASK))
2190 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2191 gate_cs = e1 >> 16;
2192 new_eip = (e1 & 0xffff);
2193 if (type == 12)
2194 new_eip |= (e2 & 0xffff0000);
2195 if (load_segment(&e1, &e2, gate_cs) != 0)
2196 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2197 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2198 /* must be code segment */
2199 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2200 (DESC_S_MASK | DESC_CS_MASK)))
2201 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2202 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2203 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2204 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2205 if (!(e2 & DESC_P_MASK))
2206 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2207 limit = get_seg_limit(e1, e2);
2208 if (new_eip > limit)
2209 raise_exception_err(EXCP0D_GPF, 0);
2210 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2211 get_seg_base(e1, e2), limit, e2);
2212 EIP = new_eip;
2213 break;
2214 default:
2215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216 break;
2217 }
2218 }
2219}
2220
2221/* real mode call */
2222void helper_lcall_real_T0_T1(int shift, int next_eip)
2223{
2224 int new_cs, new_eip;
2225 uint32_t esp, esp_mask;
2226 target_ulong ssp;
2227
2228 new_cs = T0;
2229 new_eip = T1;
2230 esp = ESP;
2231 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2232 ssp = env->segs[R_SS].base;
2233 if (shift) {
2234 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2235 PUSHL(ssp, esp, esp_mask, next_eip);
2236 } else {
2237 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2238 PUSHW(ssp, esp, esp_mask, next_eip);
2239 }
2240
2241 SET_ESP(esp, esp_mask);
2242 env->eip = new_eip;
2243 env->segs[R_CS].selector = new_cs;
2244 env->segs[R_CS].base = (new_cs << 4);
2245}
2246
2247/* protected mode call */
2248void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2249{
2250 int new_cs, new_stack, i;
2251 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2252 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2253 uint32_t val, limit, old_sp_mask;
2254 target_ulong ssp, old_ssp, next_eip, new_eip;
2255
2256 new_cs = T0;
2257 new_eip = T1;
2258 next_eip = env->eip + next_eip_addend;
2259#ifdef DEBUG_PCALL
2260 if (loglevel & CPU_LOG_PCALL) {
2261 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2262 new_cs, (uint32_t)new_eip, shift);
2263 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2264 }
2265#endif
2266 if ((new_cs & 0xfffc) == 0)
2267 raise_exception_err(EXCP0D_GPF, 0);
2268 if (load_segment(&e1, &e2, new_cs) != 0)
2269 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2270 cpl = env->hflags & HF_CPL_MASK;
2271#ifdef DEBUG_PCALL
2272 if (loglevel & CPU_LOG_PCALL) {
2273 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2274 }
2275#endif
2276 if (e2 & DESC_S_MASK) {
2277 if (!(e2 & DESC_CS_MASK))
2278 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2279 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2280 if (e2 & DESC_C_MASK) {
2281 /* conforming code segment */
2282 if (dpl > cpl)
2283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284 } else {
2285 /* non conforming code segment */
2286 rpl = new_cs & 3;
2287 if (rpl > cpl)
2288 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2289 if (dpl != cpl)
2290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2291 }
2292 if (!(e2 & DESC_P_MASK))
2293 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2294
2295#ifdef TARGET_X86_64
2296 /* XXX: check 16/32 bit cases in long mode */
2297 if (shift == 2) {
2298 target_ulong rsp;
2299 /* 64 bit case */
2300 rsp = ESP;
2301 PUSHQ(rsp, env->segs[R_CS].selector);
2302 PUSHQ(rsp, next_eip);
2303 /* from this point, not restartable */
2304 ESP = rsp;
2305 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2306 get_seg_base(e1, e2),
2307 get_seg_limit(e1, e2), e2);
2308 EIP = new_eip;
2309 } else
2310#endif
2311 {
2312 sp = ESP;
2313 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2314 ssp = env->segs[R_SS].base;
2315 if (shift) {
2316 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2317 PUSHL(ssp, sp, sp_mask, next_eip);
2318 } else {
2319 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2320 PUSHW(ssp, sp, sp_mask, next_eip);
2321 }
2322
2323 limit = get_seg_limit(e1, e2);
2324 if (new_eip > limit)
2325 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2326 /* from this point, not restartable */
2327 SET_ESP(sp, sp_mask);
2328 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2329 get_seg_base(e1, e2), limit, e2);
2330 EIP = new_eip;
2331 }
2332 } else {
2333 /* check gate type */
2334 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2335 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2336 rpl = new_cs & 3;
2337 switch(type) {
2338 case 1: /* available 286 TSS */
2339 case 9: /* available 386 TSS */
2340 case 5: /* task gate */
2341 if (dpl < cpl || dpl < rpl)
2342 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2343 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2344 CC_OP = CC_OP_EFLAGS;
2345 return;
2346 case 4: /* 286 call gate */
2347 case 12: /* 386 call gate */
2348 break;
2349 default:
2350 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2351 break;
2352 }
2353 shift = type >> 3;
2354
2355 if (dpl < cpl || dpl < rpl)
2356 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2357 /* check valid bit */
2358 if (!(e2 & DESC_P_MASK))
2359 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2360 selector = e1 >> 16;
2361 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2362 param_count = e2 & 0x1f;
2363 if ((selector & 0xfffc) == 0)
2364 raise_exception_err(EXCP0D_GPF, 0);
2365
2366 if (load_segment(&e1, &e2, selector) != 0)
2367 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2368 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2369 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2370 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2371 if (dpl > cpl)
2372 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2373 if (!(e2 & DESC_P_MASK))
2374 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2375
2376 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2377 /* to inner priviledge */
2378 get_ss_esp_from_tss(&ss, &sp, dpl);
2379#ifdef DEBUG_PCALL
2380 if (loglevel & CPU_LOG_PCALL)
2381 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2382 ss, sp, param_count, ESP);
2383#endif
2384 if ((ss & 0xfffc) == 0)
2385 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2386 if ((ss & 3) != dpl)
2387 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2388 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2389 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2390 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2391 if (ss_dpl != dpl)
2392 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2393 if (!(ss_e2 & DESC_S_MASK) ||
2394 (ss_e2 & DESC_CS_MASK) ||
2395 !(ss_e2 & DESC_W_MASK))
2396 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2397 if (!(ss_e2 & DESC_P_MASK))
2398 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2399
2400 // push_size = ((param_count * 2) + 8) << shift;
2401
2402 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2403 old_ssp = env->segs[R_SS].base;
2404
2405 sp_mask = get_sp_mask(ss_e2);
2406 ssp = get_seg_base(ss_e1, ss_e2);
2407 if (shift) {
2408 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2409 PUSHL(ssp, sp, sp_mask, ESP);
2410 for(i = param_count - 1; i >= 0; i--) {
2411 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2412 PUSHL(ssp, sp, sp_mask, val);
2413 }
2414 } else {
2415 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2416 PUSHW(ssp, sp, sp_mask, ESP);
2417 for(i = param_count - 1; i >= 0; i--) {
2418 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2419 PUSHW(ssp, sp, sp_mask, val);
2420 }
2421 }
2422 new_stack = 1;
2423 } else {
2424 /* to same priviledge */
2425 sp = ESP;
2426 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2427 ssp = env->segs[R_SS].base;
2428 // push_size = (4 << shift);
2429 new_stack = 0;
2430 }
2431
2432 if (shift) {
2433 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2434 PUSHL(ssp, sp, sp_mask, next_eip);
2435 } else {
2436 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2437 PUSHW(ssp, sp, sp_mask, next_eip);
2438 }
2439
2440 /* from this point, not restartable */
2441
2442 if (new_stack) {
2443 ss = (ss & ~3) | dpl;
2444 cpu_x86_load_seg_cache(env, R_SS, ss,
2445 ssp,
2446 get_seg_limit(ss_e1, ss_e2),
2447 ss_e2);
2448 }
2449
2450 selector = (selector & ~3) | dpl;
2451 cpu_x86_load_seg_cache(env, R_CS, selector,
2452 get_seg_base(e1, e2),
2453 get_seg_limit(e1, e2),
2454 e2);
2455 cpu_x86_set_cpl(env, dpl);
2456 SET_ESP(sp, sp_mask);
2457 EIP = offset;
2458 }
2459#ifdef USE_KQEMU
2460 if (kqemu_is_ok(env)) {
2461 env->exception_index = -1;
2462 cpu_loop_exit();
2463 }
2464#endif
2465}
2466
2467/* real and vm86 mode iret */
2468void helper_iret_real(int shift)
2469{
2470 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2471 target_ulong ssp;
2472 int eflags_mask;
2473#ifdef VBOX
2474 bool fVME = false;
2475
2476 remR3TrapClear(env->pVM);
2477#endif /* VBOX */
2478
2479 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2480 sp = ESP;
2481 ssp = env->segs[R_SS].base;
2482 if (shift == 1) {
2483 /* 32 bits */
2484 POPL(ssp, sp, sp_mask, new_eip);
2485 POPL(ssp, sp, sp_mask, new_cs);
2486 new_cs &= 0xffff;
2487 POPL(ssp, sp, sp_mask, new_eflags);
2488 } else {
2489 /* 16 bits */
2490 POPW(ssp, sp, sp_mask, new_eip);
2491 POPW(ssp, sp, sp_mask, new_cs);
2492 POPW(ssp, sp, sp_mask, new_eflags);
2493 }
2494#ifdef VBOX
2495 if ( (env->eflags & VM_MASK)
2496 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2497 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2498 {
2499 fVME = true;
2500 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2501 /* if TF will be set -> #GP */
2502 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2503 || (new_eflags & TF_MASK))
2504 raise_exception(EXCP0D_GPF);
2505 }
2506#endif /* VBOX */
2507
2508 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2509 load_seg_vm(R_CS, new_cs);
2510 env->eip = new_eip;
2511#ifdef VBOX
2512 if (fVME)
2513 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2514 else
2515#endif
2516 if (env->eflags & VM_MASK)
2517 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2518 else
2519 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2520 if (shift == 0)
2521 eflags_mask &= 0xffff;
2522 load_eflags(new_eflags, eflags_mask);
2523
2524#ifdef VBOX
2525 if (fVME)
2526 {
2527 if (new_eflags & IF_MASK)
2528 env->eflags |= VIF_MASK;
2529 else
2530 env->eflags &= ~VIF_MASK;
2531 }
2532#endif /* VBOX */
2533}
2534
2535static inline void validate_seg(int seg_reg, int cpl)
2536{
2537 int dpl;
2538 uint32_t e2;
2539
2540 /* XXX: on x86_64, we do not want to nullify FS and GS because
2541 they may still contain a valid base. I would be interested to
2542 know how a real x86_64 CPU behaves */
2543 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2544 (env->segs[seg_reg].selector & 0xfffc) == 0)
2545 return;
2546
2547 e2 = env->segs[seg_reg].flags;
2548 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2549 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2550 /* data or non conforming code segment */
2551 if (dpl < cpl) {
2552 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2553 }
2554 }
2555}
2556
2557/* protected mode iret */
2558static inline void helper_ret_protected(int shift, int is_iret, int addend)
2559{
2560 uint32_t new_cs, new_eflags, new_ss;
2561 uint32_t new_es, new_ds, new_fs, new_gs;
2562 uint32_t e1, e2, ss_e1, ss_e2;
2563 int cpl, dpl, rpl, eflags_mask, iopl;
2564 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2565
2566#ifdef TARGET_X86_64
2567 if (shift == 2)
2568 sp_mask = -1;
2569 else
2570#endif
2571 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2572 sp = ESP;
2573 ssp = env->segs[R_SS].base;
2574 new_eflags = 0; /* avoid warning */
2575#ifdef TARGET_X86_64
2576 if (shift == 2) {
2577 POPQ(sp, new_eip);
2578 POPQ(sp, new_cs);
2579 new_cs &= 0xffff;
2580 if (is_iret) {
2581 POPQ(sp, new_eflags);
2582 }
2583 } else
2584#endif
2585 if (shift == 1) {
2586 /* 32 bits */
2587 POPL(ssp, sp, sp_mask, new_eip);
2588 POPL(ssp, sp, sp_mask, new_cs);
2589 new_cs &= 0xffff;
2590 if (is_iret) {
2591 POPL(ssp, sp, sp_mask, new_eflags);
2592#if defined(VBOX) && defined(DEBUG)
2593 printf("iret: new CS %04X\n", new_cs);
2594 printf("iret: new EIP %08X\n", new_eip);
2595 printf("iret: new EFLAGS %08X\n", new_eflags);
2596 printf("iret: EAX=%08x\n", EAX);
2597#endif
2598
2599 if (new_eflags & VM_MASK)
2600 goto return_to_vm86;
2601 }
2602#ifdef VBOX
2603 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2604 {
2605#ifdef DEBUG
2606 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2607#endif
2608 new_cs = new_cs & 0xfffc;
2609 }
2610#endif
2611 } else {
2612 /* 16 bits */
2613 POPW(ssp, sp, sp_mask, new_eip);
2614 POPW(ssp, sp, sp_mask, new_cs);
2615 if (is_iret)
2616 POPW(ssp, sp, sp_mask, new_eflags);
2617 }
2618#ifdef DEBUG_PCALL
2619 if (loglevel & CPU_LOG_PCALL) {
2620 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2621 new_cs, new_eip, shift, addend);
2622 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2623 }
2624#endif
2625 if ((new_cs & 0xfffc) == 0)
2626 {
2627#if defined(VBOX) && defined(DEBUG)
2628 printf("new_cs & 0xfffc) == 0\n");
2629#endif
2630 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631 }
2632 if (load_segment(&e1, &e2, new_cs) != 0)
2633 {
2634#if defined(VBOX) && defined(DEBUG)
2635 printf("load_segment failed\n");
2636#endif
2637 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2638 }
2639 if (!(e2 & DESC_S_MASK) ||
2640 !(e2 & DESC_CS_MASK))
2641 {
2642#if defined(VBOX) && defined(DEBUG)
2643 printf("e2 mask %08x\n", e2);
2644#endif
2645 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2646 }
2647 cpl = env->hflags & HF_CPL_MASK;
2648 rpl = new_cs & 3;
2649 if (rpl < cpl)
2650 {
2651#if defined(VBOX) && defined(DEBUG)
2652 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2653#endif
2654 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2655 }
2656 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2657 if (e2 & DESC_C_MASK) {
2658 if (dpl > rpl)
2659 {
2660#if defined(VBOX) && defined(DEBUG)
2661 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2662#endif
2663 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2664 }
2665 } else {
2666 if (dpl != rpl)
2667 {
2668#if defined(VBOX) && defined(DEBUG)
2669 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2670#endif
2671 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2672 }
2673 }
2674 if (!(e2 & DESC_P_MASK))
2675 {
2676#if defined(VBOX) && defined(DEBUG)
2677 printf("DESC_P_MASK e2=%08x\n", e2);
2678#endif
2679 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2680 }
2681 sp += addend;
2682 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2683 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2684 /* return to same priledge level */
2685 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2686 get_seg_base(e1, e2),
2687 get_seg_limit(e1, e2),
2688 e2);
2689 } else {
2690 /* return to different priviledge level */
2691#ifdef TARGET_X86_64
2692 if (shift == 2) {
2693 POPQ(sp, new_esp);
2694 POPQ(sp, new_ss);
2695 new_ss &= 0xffff;
2696 } else
2697#endif
2698 if (shift == 1) {
2699 /* 32 bits */
2700 POPL(ssp, sp, sp_mask, new_esp);
2701 POPL(ssp, sp, sp_mask, new_ss);
2702 new_ss &= 0xffff;
2703 } else {
2704 /* 16 bits */
2705 POPW(ssp, sp, sp_mask, new_esp);
2706 POPW(ssp, sp, sp_mask, new_ss);
2707 }
2708#ifdef DEBUG_PCALL
2709 if (loglevel & CPU_LOG_PCALL) {
2710 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2711 new_ss, new_esp);
2712 }
2713#endif
2714 if ((new_ss & 0xfffc) == 0) {
2715#ifdef TARGET_X86_64
2716 /* NULL ss is allowed in long mode if cpl != 3*/
2717 /* XXX: test CS64 ? */
2718 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2719 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2720 0, 0xffffffff,
2721 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2722 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2723 DESC_W_MASK | DESC_A_MASK);
2724 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2725 } else
2726#endif
2727 {
2728 raise_exception_err(EXCP0D_GPF, 0);
2729 }
2730 } else {
2731 if ((new_ss & 3) != rpl)
2732 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2733 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2734 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2735 if (!(ss_e2 & DESC_S_MASK) ||
2736 (ss_e2 & DESC_CS_MASK) ||
2737 !(ss_e2 & DESC_W_MASK))
2738 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2739 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2740 if (dpl != rpl)
2741 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2742 if (!(ss_e2 & DESC_P_MASK))
2743 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2744 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2745 get_seg_base(ss_e1, ss_e2),
2746 get_seg_limit(ss_e1, ss_e2),
2747 ss_e2);
2748 }
2749
2750 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2751 get_seg_base(e1, e2),
2752 get_seg_limit(e1, e2),
2753 e2);
2754 cpu_x86_set_cpl(env, rpl);
2755 sp = new_esp;
2756#ifdef TARGET_X86_64
2757 if (env->hflags & HF_CS64_MASK)
2758 sp_mask = -1;
2759 else
2760#endif
2761 sp_mask = get_sp_mask(ss_e2);
2762
2763 /* validate data segments */
2764 validate_seg(R_ES, rpl);
2765 validate_seg(R_DS, rpl);
2766 validate_seg(R_FS, rpl);
2767 validate_seg(R_GS, rpl);
2768
2769 sp += addend;
2770 }
2771 SET_ESP(sp, sp_mask);
2772 env->eip = new_eip;
2773 if (is_iret) {
2774 /* NOTE: 'cpl' is the _old_ CPL */
2775 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2776 if (cpl == 0)
2777#ifdef VBOX
2778 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2779#else
2780 eflags_mask |= IOPL_MASK;
2781#endif
2782 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2783 if (cpl <= iopl)
2784 eflags_mask |= IF_MASK;
2785 if (shift == 0)
2786 eflags_mask &= 0xffff;
2787 load_eflags(new_eflags, eflags_mask);
2788 }
2789 return;
2790
2791 return_to_vm86:
2792
2793#if 0 // defined(VBOX) && defined(DEBUG)
2794 printf("V86: new CS %04X\n", new_cs);
2795 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2796 printf("V86: new EIP %08X\n", new_eip);
2797 printf("V86: new EFLAGS %08X\n", new_eflags);
2798#endif
2799
2800 POPL(ssp, sp, sp_mask, new_esp);
2801 POPL(ssp, sp, sp_mask, new_ss);
2802 POPL(ssp, sp, sp_mask, new_es);
2803 POPL(ssp, sp, sp_mask, new_ds);
2804 POPL(ssp, sp, sp_mask, new_fs);
2805 POPL(ssp, sp, sp_mask, new_gs);
2806
2807 /* modify processor state */
2808 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2809 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2810 load_seg_vm(R_CS, new_cs & 0xffff);
2811 cpu_x86_set_cpl(env, 3);
2812 load_seg_vm(R_SS, new_ss & 0xffff);
2813 load_seg_vm(R_ES, new_es & 0xffff);
2814 load_seg_vm(R_DS, new_ds & 0xffff);
2815 load_seg_vm(R_FS, new_fs & 0xffff);
2816 load_seg_vm(R_GS, new_gs & 0xffff);
2817
2818 env->eip = new_eip & 0xffff;
2819 ESP = new_esp;
2820}
2821
2822void helper_iret_protected(int shift, int next_eip)
2823{
2824 int tss_selector, type;
2825 uint32_t e1, e2;
2826
2827#ifdef VBOX
2828 remR3TrapClear(env->pVM);
2829#endif
2830
2831 /* specific case for TSS */
2832 if (env->eflags & NT_MASK) {
2833#ifdef TARGET_X86_64
2834 if (env->hflags & HF_LMA_MASK)
2835 raise_exception_err(EXCP0D_GPF, 0);
2836#endif
2837 tss_selector = lduw_kernel(env->tr.base + 0);
2838 if (tss_selector & 4)
2839 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2840 if (load_segment(&e1, &e2, tss_selector) != 0)
2841 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2842 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2843 /* NOTE: we check both segment and busy TSS */
2844 if (type != 3)
2845 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2846 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2847 } else {
2848 helper_ret_protected(shift, 1, 0);
2849 }
2850#ifdef USE_KQEMU
2851 if (kqemu_is_ok(env)) {
2852 CC_OP = CC_OP_EFLAGS;
2853 env->exception_index = -1;
2854 cpu_loop_exit();
2855 }
2856#endif
2857}
2858
2859void helper_lret_protected(int shift, int addend)
2860{
2861 helper_ret_protected(shift, 0, addend);
2862#ifdef USE_KQEMU
2863 if (kqemu_is_ok(env)) {
2864 env->exception_index = -1;
2865 cpu_loop_exit();
2866 }
2867#endif
2868}
2869
2870void helper_sysenter(void)
2871{
2872 if (env->sysenter_cs == 0) {
2873 raise_exception_err(EXCP0D_GPF, 0);
2874 }
2875 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2876 cpu_x86_set_cpl(env, 0);
2877 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2878 0, 0xffffffff,
2879 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2880 DESC_S_MASK |
2881 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2882 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2883 0, 0xffffffff,
2884 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2885 DESC_S_MASK |
2886 DESC_W_MASK | DESC_A_MASK);
2887 ESP = env->sysenter_esp;
2888 EIP = env->sysenter_eip;
2889}
2890
2891void helper_sysexit(void)
2892{
2893 int cpl;
2894
2895 cpl = env->hflags & HF_CPL_MASK;
2896 if (env->sysenter_cs == 0 || cpl != 0) {
2897 raise_exception_err(EXCP0D_GPF, 0);
2898 }
2899 cpu_x86_set_cpl(env, 3);
2900 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2901 0, 0xffffffff,
2902 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2903 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2904 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2905 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2906 0, 0xffffffff,
2907 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2908 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2909 DESC_W_MASK | DESC_A_MASK);
2910 ESP = ECX;
2911 EIP = EDX;
2912#ifdef USE_KQEMU
2913 if (kqemu_is_ok(env)) {
2914 env->exception_index = -1;
2915 cpu_loop_exit();
2916 }
2917#endif
2918}
2919
2920void helper_movl_crN_T0(int reg)
2921{
2922#if !defined(CONFIG_USER_ONLY)
2923 switch(reg) {
2924 case 0:
2925 cpu_x86_update_cr0(env, T0);
2926 break;
2927 case 3:
2928 cpu_x86_update_cr3(env, T0);
2929 break;
2930 case 4:
2931 cpu_x86_update_cr4(env, T0);
2932 break;
2933 case 8:
2934 cpu_set_apic_tpr(env, T0);
2935 break;
2936 default:
2937 env->cr[reg] = T0;
2938 break;
2939 }
2940#endif
2941}
2942
2943/* XXX: do more */
2944void helper_movl_drN_T0(int reg)
2945{
2946 env->dr[reg] = T0;
2947}
2948
2949void helper_invlpg(target_ulong addr)
2950{
2951 cpu_x86_flush_tlb(env, addr);
2952}
2953
2954void helper_rdtsc(void)
2955{
2956 uint64_t val;
2957
2958 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2959 raise_exception(EXCP0D_GPF);
2960 }
2961 val = cpu_get_tsc(env);
2962 EAX = (uint32_t)(val);
2963 EDX = (uint32_t)(val >> 32);
2964}
2965
2966#if defined(CONFIG_USER_ONLY)
2967void helper_wrmsr(void)
2968{
2969}
2970
2971void helper_rdmsr(void)
2972{
2973}
2974#else
2975void helper_wrmsr(void)
2976{
2977 uint64_t val;
2978
2979 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2980
2981 switch((uint32_t)ECX) {
2982 case MSR_IA32_SYSENTER_CS:
2983 env->sysenter_cs = val & 0xffff;
2984 break;
2985 case MSR_IA32_SYSENTER_ESP:
2986 env->sysenter_esp = val;
2987 break;
2988 case MSR_IA32_SYSENTER_EIP:
2989 env->sysenter_eip = val;
2990 break;
2991 case MSR_IA32_APICBASE:
2992 cpu_set_apic_base(env, val);
2993 break;
2994 case MSR_EFER:
2995 {
2996 uint64_t update_mask;
2997 update_mask = 0;
2998 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2999 update_mask |= MSR_EFER_SCE;
3000 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3001 update_mask |= MSR_EFER_LME;
3002 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3003 update_mask |= MSR_EFER_FFXSR;
3004 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3005 update_mask |= MSR_EFER_NXE;
3006 env->efer = (env->efer & ~update_mask) |
3007 (val & update_mask);
3008 }
3009 break;
3010 case MSR_STAR:
3011 env->star = val;
3012 break;
3013 case MSR_PAT:
3014 env->pat = val;
3015 break;
3016#ifdef TARGET_X86_64
3017 case MSR_LSTAR:
3018 env->lstar = val;
3019 break;
3020 case MSR_CSTAR:
3021 env->cstar = val;
3022 break;
3023 case MSR_FMASK:
3024 env->fmask = val;
3025 break;
3026 case MSR_FSBASE:
3027 env->segs[R_FS].base = val;
3028 break;
3029 case MSR_GSBASE:
3030 env->segs[R_GS].base = val;
3031 break;
3032 case MSR_KERNELGSBASE:
3033 env->kernelgsbase = val;
3034 break;
3035#endif
3036 default:
3037 /* XXX: exception ? */
3038 break;
3039 }
3040}
3041
3042void helper_rdmsr(void)
3043{
3044 uint64_t val;
3045 switch((uint32_t)ECX) {
3046 case MSR_IA32_SYSENTER_CS:
3047 val = env->sysenter_cs;
3048 break;
3049 case MSR_IA32_SYSENTER_ESP:
3050 val = env->sysenter_esp;
3051 break;
3052 case MSR_IA32_SYSENTER_EIP:
3053 val = env->sysenter_eip;
3054 break;
3055 case MSR_IA32_APICBASE:
3056 val = cpu_get_apic_base(env);
3057 break;
3058 case MSR_EFER:
3059 val = env->efer;
3060 break;
3061 case MSR_STAR:
3062 val = env->star;
3063 break;
3064 case MSR_PAT:
3065 val = env->pat;
3066 break;
3067#ifdef TARGET_X86_64
3068 case MSR_LSTAR:
3069 val = env->lstar;
3070 break;
3071 case MSR_CSTAR:
3072 val = env->cstar;
3073 break;
3074 case MSR_FMASK:
3075 val = env->fmask;
3076 break;
3077 case MSR_FSBASE:
3078 val = env->segs[R_FS].base;
3079 break;
3080 case MSR_GSBASE:
3081 val = env->segs[R_GS].base;
3082 break;
3083 case MSR_KERNELGSBASE:
3084 val = env->kernelgsbase;
3085 break;
3086#endif
3087 default:
3088 /* XXX: exception ? */
3089 val = 0;
3090 break;
3091 }
3092 EAX = (uint32_t)(val);
3093 EDX = (uint32_t)(val >> 32);
3094}
3095#endif
3096
3097void helper_lsl(void)
3098{
3099 unsigned int selector, limit;
3100 uint32_t e1, e2, eflags;
3101 int rpl, dpl, cpl, type;
3102
3103 eflags = cc_table[CC_OP].compute_all();
3104 selector = T0 & 0xffff;
3105 if (load_segment(&e1, &e2, selector) != 0)
3106 goto fail;
3107 rpl = selector & 3;
3108 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3109 cpl = env->hflags & HF_CPL_MASK;
3110 if (e2 & DESC_S_MASK) {
3111 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3112 /* conforming */
3113 } else {
3114 if (dpl < cpl || dpl < rpl)
3115 goto fail;
3116 }
3117 } else {
3118 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3119 switch(type) {
3120 case 1:
3121 case 2:
3122 case 3:
3123 case 9:
3124 case 11:
3125 break;
3126 default:
3127 goto fail;
3128 }
3129 if (dpl < cpl || dpl < rpl) {
3130 fail:
3131 CC_SRC = eflags & ~CC_Z;
3132 return;
3133 }
3134 }
3135 limit = get_seg_limit(e1, e2);
3136 T1 = limit;
3137 CC_SRC = eflags | CC_Z;
3138}
3139
3140void helper_lar(void)
3141{
3142 unsigned int selector;
3143 uint32_t e1, e2, eflags;
3144 int rpl, dpl, cpl, type;
3145
3146 eflags = cc_table[CC_OP].compute_all();
3147 selector = T0 & 0xffff;
3148 if ((selector & 0xfffc) == 0)
3149 goto fail;
3150 if (load_segment(&e1, &e2, selector) != 0)
3151 goto fail;
3152 rpl = selector & 3;
3153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3154 cpl = env->hflags & HF_CPL_MASK;
3155 if (e2 & DESC_S_MASK) {
3156 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3157 /* conforming */
3158 } else {
3159 if (dpl < cpl || dpl < rpl)
3160 goto fail;
3161 }
3162 } else {
3163 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3164 switch(type) {
3165 case 1:
3166 case 2:
3167 case 3:
3168 case 4:
3169 case 5:
3170 case 9:
3171 case 11:
3172 case 12:
3173 break;
3174 default:
3175 goto fail;
3176 }
3177 if (dpl < cpl || dpl < rpl) {
3178 fail:
3179 CC_SRC = eflags & ~CC_Z;
3180 return;
3181 }
3182 }
3183 T1 = e2 & 0x00f0ff00;
3184 CC_SRC = eflags | CC_Z;
3185}
3186
3187void helper_verr(void)
3188{
3189 unsigned int selector;
3190 uint32_t e1, e2, eflags;
3191 int rpl, dpl, cpl;
3192
3193 eflags = cc_table[CC_OP].compute_all();
3194 selector = T0 & 0xffff;
3195 if ((selector & 0xfffc) == 0)
3196 goto fail;
3197 if (load_segment(&e1, &e2, selector) != 0)
3198 goto fail;
3199 if (!(e2 & DESC_S_MASK))
3200 goto fail;
3201 rpl = selector & 3;
3202 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3203 cpl = env->hflags & HF_CPL_MASK;
3204 if (e2 & DESC_CS_MASK) {
3205 if (!(e2 & DESC_R_MASK))
3206 goto fail;
3207 if (!(e2 & DESC_C_MASK)) {
3208 if (dpl < cpl || dpl < rpl)
3209 goto fail;
3210 }
3211 } else {
3212 if (dpl < cpl || dpl < rpl) {
3213 fail:
3214 CC_SRC = eflags & ~CC_Z;
3215 return;
3216 }
3217 }
3218 CC_SRC = eflags | CC_Z;
3219}
3220
3221void helper_verw(void)
3222{
3223 unsigned int selector;
3224 uint32_t e1, e2, eflags;
3225 int rpl, dpl, cpl;
3226
3227 eflags = cc_table[CC_OP].compute_all();
3228 selector = T0 & 0xffff;
3229 if ((selector & 0xfffc) == 0)
3230 goto fail;
3231 if (load_segment(&e1, &e2, selector) != 0)
3232 goto fail;
3233 if (!(e2 & DESC_S_MASK))
3234 goto fail;
3235 rpl = selector & 3;
3236 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3237 cpl = env->hflags & HF_CPL_MASK;
3238 if (e2 & DESC_CS_MASK) {
3239 goto fail;
3240 } else {
3241 if (dpl < cpl || dpl < rpl)
3242 goto fail;
3243 if (!(e2 & DESC_W_MASK)) {
3244 fail:
3245 CC_SRC = eflags & ~CC_Z;
3246 return;
3247 }
3248 }
3249 CC_SRC = eflags | CC_Z;
3250}
3251
3252/* FPU helpers */
3253
3254void helper_fldt_ST0_A0(void)
3255{
3256 int new_fpstt;
3257 new_fpstt = (env->fpstt - 1) & 7;
3258 env->fpregs[new_fpstt].d = helper_fldt(A0);
3259 env->fpstt = new_fpstt;
3260 env->fptags[new_fpstt] = 0; /* validate stack entry */
3261}
3262
3263void helper_fstt_ST0_A0(void)
3264{
3265 helper_fstt(ST0, A0);
3266}
3267
3268void fpu_set_exception(int mask)
3269{
3270 env->fpus |= mask;
3271 if (env->fpus & (~env->fpuc & FPUC_EM))
3272 env->fpus |= FPUS_SE | FPUS_B;
3273}
3274
3275CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3276{
3277 if (b == 0.0)
3278 fpu_set_exception(FPUS_ZE);
3279 return a / b;
3280}
3281
3282void fpu_raise_exception(void)
3283{
3284 if (env->cr[0] & CR0_NE_MASK) {
3285 raise_exception(EXCP10_COPR);
3286 }
3287#if !defined(CONFIG_USER_ONLY)
3288 else {
3289 cpu_set_ferr(env);
3290 }
3291#endif
3292}
3293
3294/* BCD ops */
3295
3296void helper_fbld_ST0_A0(void)
3297{
3298 CPU86_LDouble tmp;
3299 uint64_t val;
3300 unsigned int v;
3301 int i;
3302
3303 val = 0;
3304 for(i = 8; i >= 0; i--) {
3305 v = ldub(A0 + i);
3306 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3307 }
3308 tmp = val;
3309 if (ldub(A0 + 9) & 0x80)
3310 tmp = -tmp;
3311 fpush();
3312 ST0 = tmp;
3313}
3314
3315void helper_fbst_ST0_A0(void)
3316{
3317 int v;
3318 target_ulong mem_ref, mem_end;
3319 int64_t val;
3320
3321 val = floatx_to_int64(ST0, &env->fp_status);
3322 mem_ref = A0;
3323 mem_end = mem_ref + 9;
3324 if (val < 0) {
3325 stb(mem_end, 0x80);
3326 val = -val;
3327 } else {
3328 stb(mem_end, 0x00);
3329 }
3330 while (mem_ref < mem_end) {
3331 if (val == 0)
3332 break;
3333 v = val % 100;
3334 val = val / 100;
3335 v = ((v / 10) << 4) | (v % 10);
3336 stb(mem_ref++, v);
3337 }
3338 while (mem_ref < mem_end) {
3339 stb(mem_ref++, 0);
3340 }
3341}
3342
3343void helper_f2xm1(void)
3344{
3345 ST0 = pow(2.0,ST0) - 1.0;
3346}
3347
3348void helper_fyl2x(void)
3349{
3350 CPU86_LDouble fptemp;
3351
3352 fptemp = ST0;
3353 if (fptemp>0.0){
3354 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3355 ST1 *= fptemp;
3356 fpop();
3357 } else {
3358 env->fpus &= (~0x4700);
3359 env->fpus |= 0x400;
3360 }
3361}
3362
3363void helper_fptan(void)
3364{
3365 CPU86_LDouble fptemp;
3366
3367 fptemp = ST0;
3368 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3369 env->fpus |= 0x400;
3370 } else {
3371 ST0 = tan(fptemp);
3372 fpush();
3373 ST0 = 1.0;
3374 env->fpus &= (~0x400); /* C2 <-- 0 */
3375 /* the above code is for |arg| < 2**52 only */
3376 }
3377}
3378
3379void helper_fpatan(void)
3380{
3381 CPU86_LDouble fptemp, fpsrcop;
3382
3383 fpsrcop = ST1;
3384 fptemp = ST0;
3385 ST1 = atan2(fpsrcop,fptemp);
3386 fpop();
3387}
3388
3389void helper_fxtract(void)
3390{
3391 CPU86_LDoubleU temp;
3392 unsigned int expdif;
3393
3394 temp.d = ST0;
3395 expdif = EXPD(temp) - EXPBIAS;
3396 /*DP exponent bias*/
3397 ST0 = expdif;
3398 fpush();
3399 BIASEXPONENT(temp);
3400 ST0 = temp.d;
3401}
3402
3403void helper_fprem1(void)
3404{
3405 CPU86_LDouble dblq, fpsrcop, fptemp;
3406 CPU86_LDoubleU fpsrcop1, fptemp1;
3407 int expdif;
3408 int q;
3409
3410 fpsrcop = ST0;
3411 fptemp = ST1;
3412 fpsrcop1.d = fpsrcop;
3413 fptemp1.d = fptemp;
3414 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3415 if (expdif < 53) {
3416 dblq = fpsrcop / fptemp;
3417 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3418 ST0 = fpsrcop - fptemp*dblq;
3419 q = (int)dblq; /* cutting off top bits is assumed here */
3420 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3421 /* (C0,C1,C3) <-- (q2,q1,q0) */
3422 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3423 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3424 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3425 } else {
3426 env->fpus |= 0x400; /* C2 <-- 1 */
3427 fptemp = pow(2.0, expdif-50);
3428 fpsrcop = (ST0 / ST1) / fptemp;
3429 /* fpsrcop = integer obtained by rounding to the nearest */
3430 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3431 floor(fpsrcop): ceil(fpsrcop);
3432 ST0 -= (ST1 * fpsrcop * fptemp);
3433 }
3434}
3435
3436void helper_fprem(void)
3437{
3438 CPU86_LDouble dblq, fpsrcop, fptemp;
3439 CPU86_LDoubleU fpsrcop1, fptemp1;
3440 int expdif;
3441 int q;
3442
3443 fpsrcop = ST0;
3444 fptemp = ST1;
3445 fpsrcop1.d = fpsrcop;
3446 fptemp1.d = fptemp;
3447 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3448 if ( expdif < 53 ) {
3449 dblq = fpsrcop / fptemp;
3450 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3451 ST0 = fpsrcop - fptemp*dblq;
3452 q = (int)dblq; /* cutting off top bits is assumed here */
3453 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3454 /* (C0,C1,C3) <-- (q2,q1,q0) */
3455 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3456 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3457 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3458 } else {
3459 env->fpus |= 0x400; /* C2 <-- 1 */
3460 fptemp = pow(2.0, expdif-50);
3461 fpsrcop = (ST0 / ST1) / fptemp;
3462 /* fpsrcop = integer obtained by chopping */
3463 fpsrcop = (fpsrcop < 0.0)?
3464 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3465 ST0 -= (ST1 * fpsrcop * fptemp);
3466 }
3467}
3468
3469void helper_fyl2xp1(void)
3470{
3471 CPU86_LDouble fptemp;
3472
3473 fptemp = ST0;
3474 if ((fptemp+1.0)>0.0) {
3475 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3476 ST1 *= fptemp;
3477 fpop();
3478 } else {
3479 env->fpus &= (~0x4700);
3480 env->fpus |= 0x400;
3481 }
3482}
3483
3484void helper_fsqrt(void)
3485{
3486 CPU86_LDouble fptemp;
3487
3488 fptemp = ST0;
3489 if (fptemp<0.0) {
3490 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3491 env->fpus |= 0x400;
3492 }
3493 ST0 = sqrt(fptemp);
3494}
3495
3496void helper_fsincos(void)
3497{
3498 CPU86_LDouble fptemp;
3499
3500 fptemp = ST0;
3501 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3502 env->fpus |= 0x400;
3503 } else {
3504 ST0 = sin(fptemp);
3505 fpush();
3506 ST0 = cos(fptemp);
3507 env->fpus &= (~0x400); /* C2 <-- 0 */
3508 /* the above code is for |arg| < 2**63 only */
3509 }
3510}
3511
3512void helper_frndint(void)
3513{
3514 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3515}
3516
3517void helper_fscale(void)
3518{
3519 ST0 = ldexp (ST0, (int)(ST1));
3520}
3521
3522void helper_fsin(void)
3523{
3524 CPU86_LDouble fptemp;
3525
3526 fptemp = ST0;
3527 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3528 env->fpus |= 0x400;
3529 } else {
3530 ST0 = sin(fptemp);
3531 env->fpus &= (~0x400); /* C2 <-- 0 */
3532 /* the above code is for |arg| < 2**53 only */
3533 }
3534}
3535
3536void helper_fcos(void)
3537{
3538 CPU86_LDouble fptemp;
3539
3540 fptemp = ST0;
3541 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3542 env->fpus |= 0x400;
3543 } else {
3544 ST0 = cos(fptemp);
3545 env->fpus &= (~0x400); /* C2 <-- 0 */
3546 /* the above code is for |arg5 < 2**63 only */
3547 }
3548}
3549
3550void helper_fxam_ST0(void)
3551{
3552 CPU86_LDoubleU temp;
3553 int expdif;
3554
3555 temp.d = ST0;
3556
3557 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3558 if (SIGND(temp))
3559 env->fpus |= 0x200; /* C1 <-- 1 */
3560
3561 /* XXX: test fptags too */
3562 expdif = EXPD(temp);
3563 if (expdif == MAXEXPD) {
3564#ifdef USE_X86LDOUBLE
3565 if (MANTD(temp) == 0x8000000000000000ULL)
3566#else
3567 if (MANTD(temp) == 0)
3568#endif
3569 env->fpus |= 0x500 /*Infinity*/;
3570 else
3571 env->fpus |= 0x100 /*NaN*/;
3572 } else if (expdif == 0) {
3573 if (MANTD(temp) == 0)
3574 env->fpus |= 0x4000 /*Zero*/;
3575 else
3576 env->fpus |= 0x4400 /*Denormal*/;
3577 } else {
3578 env->fpus |= 0x400;
3579 }
3580}
3581
3582void helper_fstenv(target_ulong ptr, int data32)
3583{
3584 int fpus, fptag, exp, i;
3585 uint64_t mant;
3586 CPU86_LDoubleU tmp;
3587
3588 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3589 fptag = 0;
3590 for (i=7; i>=0; i--) {
3591 fptag <<= 2;
3592 if (env->fptags[i]) {
3593 fptag |= 3;
3594 } else {
3595 tmp.d = env->fpregs[i].d;
3596 exp = EXPD(tmp);
3597 mant = MANTD(tmp);
3598 if (exp == 0 && mant == 0) {
3599 /* zero */
3600 fptag |= 1;
3601 } else if (exp == 0 || exp == MAXEXPD
3602#ifdef USE_X86LDOUBLE
3603 || (mant & (1LL << 63)) == 0
3604#endif
3605 ) {
3606 /* NaNs, infinity, denormal */
3607 fptag |= 2;
3608 }
3609 }
3610 }
3611 if (data32) {
3612 /* 32 bit */
3613 stl(ptr, env->fpuc);
3614 stl(ptr + 4, fpus);
3615 stl(ptr + 8, fptag);
3616 stl(ptr + 12, 0); /* fpip */
3617 stl(ptr + 16, 0); /* fpcs */
3618 stl(ptr + 20, 0); /* fpoo */
3619 stl(ptr + 24, 0); /* fpos */
3620 } else {
3621 /* 16 bit */
3622 stw(ptr, env->fpuc);
3623 stw(ptr + 2, fpus);
3624 stw(ptr + 4, fptag);
3625 stw(ptr + 6, 0);
3626 stw(ptr + 8, 0);
3627 stw(ptr + 10, 0);
3628 stw(ptr + 12, 0);
3629 }
3630}
3631
3632void helper_fldenv(target_ulong ptr, int data32)
3633{
3634 int i, fpus, fptag;
3635
3636 if (data32) {
3637 env->fpuc = lduw(ptr);
3638 fpus = lduw(ptr + 4);
3639 fptag = lduw(ptr + 8);
3640 }
3641 else {
3642 env->fpuc = lduw(ptr);
3643 fpus = lduw(ptr + 2);
3644 fptag = lduw(ptr + 4);
3645 }
3646 env->fpstt = (fpus >> 11) & 7;
3647 env->fpus = fpus & ~0x3800;
3648 for(i = 0;i < 8; i++) {
3649 env->fptags[i] = ((fptag & 3) == 3);
3650 fptag >>= 2;
3651 }
3652}
3653
3654void helper_fsave(target_ulong ptr, int data32)
3655{
3656 CPU86_LDouble tmp;
3657 int i;
3658
3659 helper_fstenv(ptr, data32);
3660
3661 ptr += (14 << data32);
3662 for(i = 0;i < 8; i++) {
3663 tmp = ST(i);
3664 helper_fstt(tmp, ptr);
3665 ptr += 10;
3666 }
3667
3668 /* fninit */
3669 env->fpus = 0;
3670 env->fpstt = 0;
3671 env->fpuc = 0x37f;
3672 env->fptags[0] = 1;
3673 env->fptags[1] = 1;
3674 env->fptags[2] = 1;
3675 env->fptags[3] = 1;
3676 env->fptags[4] = 1;
3677 env->fptags[5] = 1;
3678 env->fptags[6] = 1;
3679 env->fptags[7] = 1;
3680}
3681
3682void helper_frstor(target_ulong ptr, int data32)
3683{
3684 CPU86_LDouble tmp;
3685 int i;
3686
3687 helper_fldenv(ptr, data32);
3688 ptr += (14 << data32);
3689
3690 for(i = 0;i < 8; i++) {
3691 tmp = helper_fldt(ptr);
3692 ST(i) = tmp;
3693 ptr += 10;
3694 }
3695}
3696
3697void helper_fxsave(target_ulong ptr, int data64)
3698{
3699 int fpus, fptag, i, nb_xmm_regs;
3700 CPU86_LDouble tmp;
3701 target_ulong addr;
3702
3703 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3704 fptag = 0;
3705 for(i = 0; i < 8; i++) {
3706 fptag |= (env->fptags[i] << i);
3707 }
3708 stw(ptr, env->fpuc);
3709 stw(ptr + 2, fpus);
3710 stw(ptr + 4, fptag ^ 0xff);
3711
3712 addr = ptr + 0x20;
3713 for(i = 0;i < 8; i++) {
3714 tmp = ST(i);
3715 helper_fstt(tmp, addr);
3716 addr += 16;
3717 }
3718
3719 if (env->cr[4] & CR4_OSFXSR_MASK) {
3720 /* XXX: finish it */
3721 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3722 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3723 nb_xmm_regs = 8 << data64;
3724 addr = ptr + 0xa0;
3725 for(i = 0; i < nb_xmm_regs; i++) {
3726 stq(addr, env->xmm_regs[i].XMM_Q(0));
3727 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3728 addr += 16;
3729 }
3730 }
3731}
3732
3733void helper_fxrstor(target_ulong ptr, int data64)
3734{
3735 int i, fpus, fptag, nb_xmm_regs;
3736 CPU86_LDouble tmp;
3737 target_ulong addr;
3738
3739 env->fpuc = lduw(ptr);
3740 fpus = lduw(ptr + 2);
3741 fptag = lduw(ptr + 4);
3742 env->fpstt = (fpus >> 11) & 7;
3743 env->fpus = fpus & ~0x3800;
3744 fptag ^= 0xff;
3745 for(i = 0;i < 8; i++) {
3746 env->fptags[i] = ((fptag >> i) & 1);
3747 }
3748
3749 addr = ptr + 0x20;
3750 for(i = 0;i < 8; i++) {
3751 tmp = helper_fldt(addr);
3752 ST(i) = tmp;
3753 addr += 16;
3754 }
3755
3756 if (env->cr[4] & CR4_OSFXSR_MASK) {
3757 /* XXX: finish it */
3758 env->mxcsr = ldl(ptr + 0x18);
3759 //ldl(ptr + 0x1c);
3760 nb_xmm_regs = 8 << data64;
3761 addr = ptr + 0xa0;
3762 for(i = 0; i < nb_xmm_regs; i++) {
3763#if !defined(VBOX) || __GNUC__ < 4
3764 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3765 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3766#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3767# if 1
3768 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3769 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3770 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3771 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3772# else
3773 /* this works fine on Mac OS X, gcc 4.0.1 */
3774 uint64_t u64 = ldq(addr);
3775 env->xmm_regs[i].XMM_Q(0);
3776 u64 = ldq(addr + 4);
3777 env->xmm_regs[i].XMM_Q(1) = u64;
3778# endif
3779#endif
3780 addr += 16;
3781 }
3782 }
3783}
3784
3785#ifndef USE_X86LDOUBLE
3786
3787void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3788{
3789 CPU86_LDoubleU temp;
3790 int e;
3791
3792 temp.d = f;
3793 /* mantissa */
3794 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3795 /* exponent + sign */
3796 e = EXPD(temp) - EXPBIAS + 16383;
3797 e |= SIGND(temp) >> 16;
3798 *pexp = e;
3799}
3800
3801CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3802{
3803 CPU86_LDoubleU temp;
3804 int e;
3805 uint64_t ll;
3806
3807 /* XXX: handle overflow ? */
3808 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3809 e |= (upper >> 4) & 0x800; /* sign */
3810 ll = (mant >> 11) & ((1LL << 52) - 1);
3811#ifdef __arm__
3812 temp.l.upper = (e << 20) | (ll >> 32);
3813 temp.l.lower = ll;
3814#else
3815 temp.ll = ll | ((uint64_t)e << 52);
3816#endif
3817 return temp.d;
3818}
3819
3820#else
3821
3822void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3823{
3824 CPU86_LDoubleU temp;
3825
3826 temp.d = f;
3827 *pmant = temp.l.lower;
3828 *pexp = temp.l.upper;
3829}
3830
3831CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3832{
3833 CPU86_LDoubleU temp;
3834
3835 temp.l.upper = upper;
3836 temp.l.lower = mant;
3837 return temp.d;
3838}
3839#endif
3840
3841#ifdef TARGET_X86_64
3842
3843//#define DEBUG_MULDIV
3844
3845static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3846{
3847 *plow += a;
3848 /* carry test */
3849 if (*plow < a)
3850 (*phigh)++;
3851 *phigh += b;
3852}
3853
3854static void neg128(uint64_t *plow, uint64_t *phigh)
3855{
3856 *plow = ~ *plow;
3857 *phigh = ~ *phigh;
3858 add128(plow, phigh, 1, 0);
3859}
3860
3861static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3862{
3863 uint32_t a0, a1, b0, b1;
3864 uint64_t v;
3865
3866 a0 = a;
3867 a1 = a >> 32;
3868
3869 b0 = b;
3870 b1 = b >> 32;
3871
3872 v = (uint64_t)a0 * (uint64_t)b0;
3873 *plow = v;
3874 *phigh = 0;
3875
3876 v = (uint64_t)a0 * (uint64_t)b1;
3877 add128(plow, phigh, v << 32, v >> 32);
3878
3879 v = (uint64_t)a1 * (uint64_t)b0;
3880 add128(plow, phigh, v << 32, v >> 32);
3881
3882 v = (uint64_t)a1 * (uint64_t)b1;
3883 *phigh += v;
3884#ifdef DEBUG_MULDIV
3885 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3886 a, b, *phigh, *plow);
3887#endif
3888}
3889
3890static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3891{
3892 int sa, sb;
3893 sa = (a < 0);
3894 if (sa)
3895 a = -a;
3896 sb = (b < 0);
3897 if (sb)
3898 b = -b;
3899 mul64(plow, phigh, a, b);
3900 if (sa ^ sb) {
3901 neg128(plow, phigh);
3902 }
3903}
3904
3905/* return TRUE if overflow */
3906static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3907{
3908 uint64_t q, r, a1, a0;
3909 int i, qb, ab;
3910
3911 a0 = *plow;
3912 a1 = *phigh;
3913 if (a1 == 0) {
3914 q = a0 / b;
3915 r = a0 % b;
3916 *plow = q;
3917 *phigh = r;
3918 } else {
3919 if (a1 >= b)
3920 return 1;
3921 /* XXX: use a better algorithm */
3922 for(i = 0; i < 64; i++) {
3923 ab = a1 >> 63;
3924 a1 = (a1 << 1) | (a0 >> 63);
3925 if (ab || a1 >= b) {
3926 a1 -= b;
3927 qb = 1;
3928 } else {
3929 qb = 0;
3930 }
3931 a0 = (a0 << 1) | qb;
3932 }
3933#if defined(DEBUG_MULDIV)
3934 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3935 *phigh, *plow, b, a0, a1);
3936#endif
3937 *plow = a0;
3938 *phigh = a1;
3939 }
3940 return 0;
3941}
3942
3943/* return TRUE if overflow */
3944static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3945{
3946 int sa, sb;
3947 sa = ((int64_t)*phigh < 0);
3948 if (sa)
3949 neg128(plow, phigh);
3950 sb = (b < 0);
3951 if (sb)
3952 b = -b;
3953 if (div64(plow, phigh, b) != 0)
3954 return 1;
3955 if (sa ^ sb) {
3956 if (*plow > (1ULL << 63))
3957 return 1;
3958 *plow = - *plow;
3959 } else {
3960 if (*plow >= (1ULL << 63))
3961 return 1;
3962 }
3963 if (sa)
3964 *phigh = - *phigh;
3965 return 0;
3966}
3967
3968void helper_mulq_EAX_T0(void)
3969{
3970 uint64_t r0, r1;
3971
3972 mul64(&r0, &r1, EAX, T0);
3973 EAX = r0;
3974 EDX = r1;
3975 CC_DST = r0;
3976 CC_SRC = r1;
3977}
3978
3979void helper_imulq_EAX_T0(void)
3980{
3981 uint64_t r0, r1;
3982
3983 imul64(&r0, &r1, EAX, T0);
3984 EAX = r0;
3985 EDX = r1;
3986 CC_DST = r0;
3987 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3988}
3989
3990void helper_imulq_T0_T1(void)
3991{
3992 uint64_t r0, r1;
3993
3994 imul64(&r0, &r1, T0, T1);
3995 T0 = r0;
3996 CC_DST = r0;
3997 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3998}
3999
4000void helper_divq_EAX_T0(void)
4001{
4002 uint64_t r0, r1;
4003 if (T0 == 0) {
4004 raise_exception(EXCP00_DIVZ);
4005 }
4006 r0 = EAX;
4007 r1 = EDX;
4008 if (div64(&r0, &r1, T0))
4009 raise_exception(EXCP00_DIVZ);
4010 EAX = r0;
4011 EDX = r1;
4012}
4013
4014void helper_idivq_EAX_T0(void)
4015{
4016 uint64_t r0, r1;
4017 if (T0 == 0) {
4018 raise_exception(EXCP00_DIVZ);
4019 }
4020 r0 = EAX;
4021 r1 = EDX;
4022 if (idiv64(&r0, &r1, T0))
4023 raise_exception(EXCP00_DIVZ);
4024 EAX = r0;
4025 EDX = r1;
4026}
4027
4028void helper_bswapq_T0(void)
4029{
4030 T0 = bswap64(T0);
4031}
4032#endif
4033
4034void helper_hlt(void)
4035{
4036 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4037 env->hflags |= HF_HALTED_MASK;
4038 env->exception_index = EXCP_HLT;
4039 cpu_loop_exit();
4040}
4041
4042void helper_monitor(void)
4043{
4044 if ((uint32_t)ECX != 0)
4045 raise_exception(EXCP0D_GPF);
4046 /* XXX: store address ? */
4047}
4048
4049void helper_mwait(void)
4050{
4051 if ((uint32_t)ECX != 0)
4052 raise_exception(EXCP0D_GPF);
4053#ifdef VBOX
4054 helper_hlt();
4055#else
4056 /* XXX: not complete but not completely erroneous */
4057 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4058 /* more than one CPU: do not sleep because another CPU may
4059 wake this one */
4060 } else {
4061 helper_hlt();
4062 }
4063#endif
4064}
4065
4066float approx_rsqrt(float a)
4067{
4068 return 1.0 / sqrt(a);
4069}
4070
4071float approx_rcp(float a)
4072{
4073 return 1.0 / a;
4074}
4075
4076void update_fp_status(void)
4077{
4078 int rnd_type;
4079
4080 /* set rounding mode */
4081 switch(env->fpuc & RC_MASK) {
4082 default:
4083 case RC_NEAR:
4084 rnd_type = float_round_nearest_even;
4085 break;
4086 case RC_DOWN:
4087 rnd_type = float_round_down;
4088 break;
4089 case RC_UP:
4090 rnd_type = float_round_up;
4091 break;
4092 case RC_CHOP:
4093 rnd_type = float_round_to_zero;
4094 break;
4095 }
4096 set_float_rounding_mode(rnd_type, &env->fp_status);
4097#ifdef FLOATX80
4098 switch((env->fpuc >> 8) & 3) {
4099 case 0:
4100 rnd_type = 32;
4101 break;
4102 case 2:
4103 rnd_type = 64;
4104 break;
4105 case 3:
4106 default:
4107 rnd_type = 80;
4108 break;
4109 }
4110 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4111#endif
4112}
4113
4114#if !defined(CONFIG_USER_ONLY)
4115
4116#define MMUSUFFIX _mmu
4117#define GETPC() (__builtin_return_address(0))
4118
4119#define SHIFT 0
4120#include "softmmu_template.h"
4121
4122#define SHIFT 1
4123#include "softmmu_template.h"
4124
4125#define SHIFT 2
4126#include "softmmu_template.h"
4127
4128#define SHIFT 3
4129#include "softmmu_template.h"
4130
4131#endif
4132
4133/* try to fill the TLB and return an exception if error. If retaddr is
4134 NULL, it means that the function was called in C code (i.e. not
4135 from generated code or from helper.c) */
4136/* XXX: fix it to restore all registers */
4137void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4138{
4139 TranslationBlock *tb;
4140 int ret;
4141 unsigned long pc;
4142 CPUX86State *saved_env;
4143
4144 /* XXX: hack to restore env in all cases, even if not called from
4145 generated code */
4146 saved_env = env;
4147 env = cpu_single_env;
4148
4149 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4150 if (ret) {
4151 if (retaddr) {
4152 /* now we have a real cpu fault */
4153 pc = (unsigned long)retaddr;
4154 tb = tb_find_pc(pc);
4155 if (tb) {
4156 /* the PC is inside the translated code. It means that we have
4157 a virtual CPU fault */
4158 cpu_restore_state(tb, env, pc, NULL);
4159 }
4160 }
4161 if (retaddr)
4162 raise_exception_err(env->exception_index, env->error_code);
4163 else
4164 raise_exception_err_norestore(env->exception_index, env->error_code);
4165 }
4166 env = saved_env;
4167}
4168
4169#ifdef VBOX
4170
4171/**
4172 * Correctly computes the eflags.
4173 * @returns eflags.
4174 * @param env1 CPU environment.
4175 */
4176uint32_t raw_compute_eflags(CPUX86State *env1)
4177{
4178 CPUX86State *savedenv = env;
4179 env = env1;
4180 uint32_t efl = compute_eflags();
4181 env = savedenv;
4182 return efl;
4183}
4184
4185/**
4186 * Reads byte from virtual address in guest memory area.
4187 * XXX: is it working for any addresses? swapped out pages?
4188 * @returns readed data byte.
4189 * @param env1 CPU environment.
4190 * @param pvAddr GC Virtual address.
4191 */
4192uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4193{
4194 CPUX86State *savedenv = env;
4195 env = env1;
4196 uint8_t u8 = ldub_kernel(addr);
4197 env = savedenv;
4198 return u8;
4199}
4200
4201/**
4202 * Reads byte from virtual address in guest memory area.
4203 * XXX: is it working for any addresses? swapped out pages?
4204 * @returns readed data byte.
4205 * @param env1 CPU environment.
4206 * @param pvAddr GC Virtual address.
4207 */
4208uint16_t read_word(CPUX86State *env1, target_ulong addr)
4209{
4210 CPUX86State *savedenv = env;
4211 env = env1;
4212 uint16_t u16 = lduw_kernel(addr);
4213 env = savedenv;
4214 return u16;
4215}
4216
4217/**
4218 * Reads byte from virtual address in guest memory area.
4219 * XXX: is it working for any addresses? swapped out pages?
4220 * @returns readed data byte.
4221 * @param env1 CPU environment.
4222 * @param pvAddr GC Virtual address.
4223 */
4224uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4225{
4226 CPUX86State *savedenv = env;
4227 env = env1;
4228 uint32_t u32 = ldl_kernel(addr);
4229 env = savedenv;
4230 return u32;
4231}
4232
4233/**
4234 * Writes byte to virtual address in guest memory area.
4235 * XXX: is it working for any addresses? swapped out pages?
4236 * @returns readed data byte.
4237 * @param env1 CPU environment.
4238 * @param pvAddr GC Virtual address.
4239 * @param val byte value
4240 */
4241void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4242{
4243 CPUX86State *savedenv = env;
4244 env = env1;
4245 stb(addr, val);
4246 env = savedenv;
4247}
4248
4249void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4250{
4251 CPUX86State *savedenv = env;
4252 env = env1;
4253 stw(addr, val);
4254 env = savedenv;
4255}
4256
4257void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4258{
4259 CPUX86State *savedenv = env;
4260 env = env1;
4261 stl(addr, val);
4262 env = savedenv;
4263}
4264
4265/**
4266 * Correctly loads selector into segment register with updating internal
4267 * qemu data/caches.
4268 * @param env1 CPU environment.
4269 * @param seg_reg Segment register.
4270 * @param selector Selector to load.
4271 */
4272void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4273{
4274 CPUX86State *savedenv = env;
4275 env = env1;
4276
4277 if (env->eflags & X86_EFL_VM)
4278 {
4279 load_seg_vm(seg_reg, selector);
4280
4281 env = savedenv;
4282
4283 /* Successful sync. */
4284 env1->segs[seg_reg].newselector = 0;
4285 }
4286 else
4287 {
4288 if (setjmp(env1->jmp_env) == 0)
4289 {
4290 if (seg_reg == R_CS)
4291 {
4292 uint32_t e1, e2;
4293 load_segment(&e1, &e2, selector);
4294 cpu_x86_load_seg_cache(env, R_CS, selector,
4295 get_seg_base(e1, e2),
4296 get_seg_limit(e1, e2),
4297 e2);
4298 }
4299 else
4300 load_seg(seg_reg, selector);
4301 env = savedenv;
4302
4303 /* Successful sync. */
4304 env1->segs[seg_reg].newselector = 0;
4305 }
4306 else
4307 {
4308 env = savedenv;
4309
4310 /* Postpone sync until the guest uses the selector. */
4311 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4312 env1->segs[seg_reg].newselector = selector;
4313 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4314 }
4315 }
4316
4317}
4318
4319
4320/**
4321 * Correctly loads a new ldtr selector.
4322 *
4323 * @param env1 CPU environment.
4324 * @param selector Selector to load.
4325 */
4326void sync_ldtr(CPUX86State *env1, int selector)
4327{
4328 CPUX86State *saved_env = env;
4329 target_ulong saved_T0 = T0;
4330 if (setjmp(env1->jmp_env) == 0)
4331 {
4332 env = env1;
4333 T0 = selector;
4334 helper_lldt_T0();
4335 T0 = saved_T0;
4336 env = saved_env;
4337 }
4338 else
4339 {
4340 T0 = saved_T0;
4341 env = saved_env;
4342#ifdef VBOX_STRICT
4343 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4344#endif
4345 }
4346}
4347
4348/**
4349 * Correctly loads a new tr selector.
4350 *
4351 * @param env1 CPU environment.
4352 * @param selector Selector to load.
4353 */
4354int sync_tr(CPUX86State *env1, int selector)
4355{
4356 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
4357 SegmentCache *dt;
4358 uint32_t e1, e2;
4359 int index, type, entry_limit;
4360 target_ulong ptr;
4361 CPUX86State *saved_env = env;
4362 env = env1;
4363
4364 selector &= 0xffff;
4365 if ((selector & 0xfffc) == 0) {
4366 /* NULL selector case: invalid TR */
4367 env->tr.base = 0;
4368 env->tr.limit = 0;
4369 env->tr.flags = 0;
4370 } else {
4371 if (selector & 0x4)
4372 goto l_failure;
4373 dt = &env->gdt;
4374 index = selector & ~7;
4375#ifdef TARGET_X86_64
4376 if (env->hflags & HF_LMA_MASK)
4377 entry_limit = 15;
4378 else
4379#endif
4380 entry_limit = 7;
4381 if ((index + entry_limit) > dt->limit)
4382 goto l_failure;
4383 ptr = dt->base + index;
4384 e1 = ldl_kernel(ptr);
4385 e2 = ldl_kernel(ptr + 4);
4386 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4387 if ((e2 & DESC_S_MASK) /*||
4388 (type != 1 && type != 9)*/)
4389 goto l_failure;
4390 if (!(e2 & DESC_P_MASK))
4391 goto l_failure;
4392#ifdef TARGET_X86_64
4393 if (env->hflags & HF_LMA_MASK) {
4394 uint32_t e3;
4395 e3 = ldl_kernel(ptr + 8);
4396 load_seg_cache_raw_dt(&env->tr, e1, e2);
4397 env->tr.base |= (target_ulong)e3 << 32;
4398 } else
4399#endif
4400 {
4401 load_seg_cache_raw_dt(&env->tr, e1, e2);
4402 }
4403 e2 |= DESC_TSS_BUSY_MASK;
4404 stl_kernel(ptr + 4, e2);
4405 }
4406 env->tr.selector = selector;
4407
4408 env = saved_env;
4409 return 0;
4410l_failure:
4411 AssertMsgFailed(("selector=%d\n", selector));
4412 return -1;
4413}
4414
4415int emulate_single_instr(CPUX86State *env1)
4416{
4417 TranslationBlock *current;
4418 TranslationBlock tb_temp;
4419 int csize;
4420 void (*gen_func)(void);
4421 uint8_t *tc_ptr;
4422 uint32_t old_eip;
4423
4424 /* ensures env is loaded in ebp! */
4425 CPUX86State *savedenv = env;
4426 env = env1;
4427
4428 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4429
4430 tc_ptr = env->pvCodeBuffer;
4431
4432 /*
4433 * Setup temporary translation block.
4434 */
4435 /* tb_alloc: */
4436 tb_temp.pc = env->segs[R_CS].base + env->eip;
4437 tb_temp.cflags = 0;
4438
4439 /* tb_find_slow: */
4440 tb_temp.tc_ptr = tc_ptr;
4441 tb_temp.cs_base = env->segs[R_CS].base;
4442 tb_temp.flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4443
4444 /* Initialize the rest with sensible values. */
4445 tb_temp.size = 0;
4446 tb_temp.phys_hash_next = NULL;
4447 tb_temp.page_next[0] = NULL;
4448 tb_temp.page_next[1] = NULL;
4449 tb_temp.page_addr[0] = 0;
4450 tb_temp.page_addr[1] = 0;
4451 tb_temp.tb_next_offset[0] = 0xffff;
4452 tb_temp.tb_next_offset[1] = 0xffff;
4453 tb_temp.tb_next[0] = 0xffff;
4454 tb_temp.tb_next[1] = 0xffff;
4455 tb_temp.jmp_next[0] = NULL;
4456 tb_temp.jmp_next[1] = NULL;
4457 tb_temp.jmp_first = NULL;
4458
4459 current = env->current_tb;
4460 env->current_tb = NULL;
4461
4462 /*
4463 * Translate only one instruction.
4464 */
4465 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4466 if (cpu_gen_code(env, &tb_temp, env->cbCodeBuffer, &csize) < 0)
4467 {
4468 AssertFailed();
4469 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4470 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4471 env = savedenv;
4472 return -1;
4473 }
4474#ifdef DEBUG
4475 if(csize > env->cbCodeBuffer)
4476 {
4477 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4478 AssertFailed();
4479 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4480 env = savedenv;
4481 return -1;
4482 }
4483 if (tb_temp.tc_ptr != tc_ptr)
4484 {
4485 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4486 AssertFailed();
4487 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4488 env = savedenv;
4489 return -1;
4490 }
4491#endif
4492 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4493
4494 /* tb_link_phys: */
4495 tb_temp.jmp_first = (TranslationBlock *)((intptr_t)&tb_temp | 2);
4496 Assert(tb_temp.jmp_next[0] == NULL); Assert(tb_temp.jmp_next[1] == NULL);
4497 if (tb_temp.tb_next_offset[0] != 0xffff)
4498 tb_set_jmp_target(&tb_temp, 0, (uintptr_t)(tb_temp.tc_ptr + tb_temp.tb_next_offset[0]));
4499 if (tb_temp.tb_next_offset[1] != 0xffff)
4500 tb_set_jmp_target(&tb_temp, 1, (uintptr_t)(tb_temp.tc_ptr + tb_temp.tb_next_offset[1]));
4501
4502 /*
4503 * Execute it using emulation
4504 */
4505 old_eip = env->eip;
4506 gen_func = (void *)tb_temp.tc_ptr;
4507 env->current_tb = &tb_temp;
4508
4509 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4510 // perhaps not a very safe hack
4511 while(old_eip == env->eip)
4512 {
4513 gen_func();
4514 /*
4515 * Exit once we detect an external interrupt and interrupts are enabled
4516 */
4517 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4518 ( (env->eflags & IF_MASK) &&
4519 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4520 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4521 {
4522 break;
4523 }
4524 }
4525 env->current_tb = current;
4526
4527 Assert(tb_temp.phys_hash_next == NULL);
4528 Assert(tb_temp.page_next[0] == NULL);
4529 Assert(tb_temp.page_next[1] == NULL);
4530 Assert(tb_temp.page_addr[0] == 0);
4531 Assert(tb_temp.page_addr[1] == 0);
4532/*
4533 Assert(tb_temp.tb_next_offset[0] == 0xffff);
4534 Assert(tb_temp.tb_next_offset[1] == 0xffff);
4535 Assert(tb_temp.tb_next[0] == 0xffff);
4536 Assert(tb_temp.tb_next[1] == 0xffff);
4537 Assert(tb_temp.jmp_next[0] == NULL);
4538 Assert(tb_temp.jmp_next[1] == NULL);
4539 Assert(tb_temp.jmp_first == NULL); */
4540
4541 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4542
4543 /*
4544 * Execute the next instruction when we encounter instruction fusing.
4545 */
4546 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4547 {
4548 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK)\n"));
4549 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4550 emulate_single_instr(env);
4551 }
4552
4553 env = savedenv;
4554 return 0;
4555}
4556
4557int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4558 uint32_t *esp_ptr, int dpl)
4559{
4560 int type, index, shift;
4561
4562 CPUX86State *savedenv = env;
4563 env = env1;
4564
4565 if (!(env->tr.flags & DESC_P_MASK))
4566 cpu_abort(env, "invalid tss");
4567 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4568 if ((type & 7) != 1)
4569 cpu_abort(env, "invalid tss type %d", type);
4570 shift = type >> 3;
4571 index = (dpl * 4 + 2) << shift;
4572 if (index + (4 << shift) - 1 > env->tr.limit)
4573 {
4574 env = savedenv;
4575 return 0;
4576 }
4577 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4578
4579 if (shift == 0) {
4580 *esp_ptr = lduw_kernel(env->tr.base + index);
4581 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4582 } else {
4583 *esp_ptr = ldl_kernel(env->tr.base + index);
4584 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4585 }
4586
4587 env = savedenv;
4588 return 1;
4589}
4590
4591//*****************************************************************************
4592// Needs to be at the bottom of the file (overriding macros)
4593
4594static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4595{
4596 return *(CPU86_LDouble *)ptr;
4597}
4598
4599static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4600{
4601 *(CPU86_LDouble *)ptr = f;
4602}
4603
4604#undef stw
4605#undef stl
4606#undef stq
4607#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4608#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4609#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4610#define data64 0
4611
4612//*****************************************************************************
4613void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4614{
4615 int fpus, fptag, i, nb_xmm_regs;
4616 CPU86_LDouble tmp;
4617 uint8_t *addr;
4618
4619 if (env->cpuid_features & CPUID_FXSR)
4620 {
4621 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4622 fptag = 0;
4623 for(i = 0; i < 8; i++) {
4624 fptag |= (env->fptags[i] << i);
4625 }
4626 stw(ptr, env->fpuc);
4627 stw(ptr + 2, fpus);
4628 stw(ptr + 4, fptag ^ 0xff);
4629
4630 addr = ptr + 0x20;
4631 for(i = 0;i < 8; i++) {
4632 tmp = ST(i);
4633 helper_fstt_raw(tmp, addr);
4634 addr += 16;
4635 }
4636
4637 if (env->cr[4] & CR4_OSFXSR_MASK) {
4638 /* XXX: finish it */
4639 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4640 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4641 nb_xmm_regs = 8 << data64;
4642 addr = ptr + 0xa0;
4643 for(i = 0; i < nb_xmm_regs; i++) {
4644#if __GNUC__ < 4
4645 stq(addr, env->xmm_regs[i].XMM_Q(0));
4646 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4647#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4648 stl(addr, env->xmm_regs[i].XMM_L(0));
4649 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4650 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4651 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4652#endif
4653 addr += 16;
4654 }
4655 }
4656 }
4657 else
4658 {
4659 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4660 int fptag;
4661
4662 fp->FCW = env->fpuc;
4663 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4664 fptag = 0;
4665 for (i=7; i>=0; i--) {
4666 fptag <<= 2;
4667 if (env->fptags[i]) {
4668 fptag |= 3;
4669 } else {
4670 /* the FPU automatically computes it */
4671 }
4672 }
4673 fp->FTW = fptag;
4674
4675 for(i = 0;i < 8; i++) {
4676 tmp = ST(i);
4677 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4678 }
4679 }
4680}
4681
4682//*****************************************************************************
4683#undef lduw
4684#undef ldl
4685#undef ldq
4686#define lduw(a) *(uint16_t *)(a)
4687#define ldl(a) *(uint32_t *)(a)
4688#define ldq(a) *(uint64_t *)(a)
4689//*****************************************************************************
4690void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4691{
4692 int i, fpus, fptag, nb_xmm_regs;
4693 CPU86_LDouble tmp;
4694 uint8_t *addr;
4695
4696 if (env->cpuid_features & CPUID_FXSR)
4697 {
4698 env->fpuc = lduw(ptr);
4699 fpus = lduw(ptr + 2);
4700 fptag = lduw(ptr + 4);
4701 env->fpstt = (fpus >> 11) & 7;
4702 env->fpus = fpus & ~0x3800;
4703 fptag ^= 0xff;
4704 for(i = 0;i < 8; i++) {
4705 env->fptags[i] = ((fptag >> i) & 1);
4706 }
4707
4708 addr = ptr + 0x20;
4709 for(i = 0;i < 8; i++) {
4710 tmp = helper_fldt_raw(addr);
4711 ST(i) = tmp;
4712 addr += 16;
4713 }
4714
4715 if (env->cr[4] & CR4_OSFXSR_MASK) {
4716 /* XXX: finish it, endianness */
4717 env->mxcsr = ldl(ptr + 0x18);
4718 //ldl(ptr + 0x1c);
4719 nb_xmm_regs = 8 << data64;
4720 addr = ptr + 0xa0;
4721 for(i = 0; i < nb_xmm_regs; i++) {
4722 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4723 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4724 addr += 16;
4725 }
4726 }
4727 }
4728 else
4729 {
4730 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4731 int fptag, j;
4732
4733 env->fpuc = fp->FCW;
4734 env->fpstt = (fp->FSW >> 11) & 7;
4735 env->fpus = fp->FSW & ~0x3800;
4736 fptag = fp->FTW;
4737 for(i = 0;i < 8; i++) {
4738 env->fptags[i] = ((fptag & 3) == 3);
4739 fptag >>= 2;
4740 }
4741 j = env->fpstt;
4742 for(i = 0;i < 8; i++) {
4743 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4744 ST(i) = tmp;
4745 }
4746 }
4747}
4748//*****************************************************************************
4749//*****************************************************************************
4750
4751#endif /* VBOX */
4752
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette