VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 13646

Last change on this file since 13646 was 13449, checked in by vboxsync, 16 years ago

Fixed default flags for selector register loads in real and V86 mode

  • Property svn:eol-style set to native
File size: 136.2 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#ifdef VBOX
30# include <VBox/err.h>
31# ifdef VBOX_WITH_VMI
32# include <VBox/parav.h>
33# endif
34#endif
35#include "exec.h"
36
37//#define DEBUG_PCALL
38
39#if 0
40#define raise_exception_err(a, b)\
41do {\
42 if (logfile)\
43 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
99const CPU86_LDouble f15rk[7] =
100{
101 0.00000000000000000000L,
102 1.00000000000000000000L,
103 3.14159265358979323851L, /*pi*/
104 0.30102999566398119523L, /*lg2*/
105 0.69314718055994530943L, /*ln2*/
106 1.44269504088896340739L, /*l2e*/
107 3.32192809488736234781L, /*l2t*/
108};
109
110/* thread support */
111
112spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
113
114void cpu_lock(void)
115{
116 spin_lock(&global_cpu_lock);
117}
118
119void cpu_unlock(void)
120{
121 spin_unlock(&global_cpu_lock);
122}
123
124void cpu_loop_exit(void)
125{
126 /* NOTE: the register at this point must be saved by hand because
127 longjmp restore them */
128 regs_to_env();
129 longjmp(env->jmp_env, 1);
130}
131
132/* return non zero if error */
133static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
134 int selector)
135{
136 SegmentCache *dt;
137 int index;
138 target_ulong ptr;
139
140 if (selector & 0x4)
141 dt = &env->ldt;
142 else
143 dt = &env->gdt;
144 index = selector & ~7;
145 if ((index + 7) > dt->limit)
146 return -1;
147 ptr = dt->base + index;
148 *e1_ptr = ldl_kernel(ptr);
149 *e2_ptr = ldl_kernel(ptr + 4);
150 return 0;
151}
152
153static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
154{
155 unsigned int limit;
156 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
157 if (e2 & DESC_G_MASK)
158 limit = (limit << 12) | 0xfff;
159 return limit;
160}
161
162static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
163{
164 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
165}
166
167static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
168{
169 sc->base = get_seg_base(e1, e2);
170 sc->limit = get_seg_limit(e1, e2);
171 sc->flags = e2;
172}
173
174/* init the segment cache in vm86 mode. */
175static inline void load_seg_vm(int seg, int selector)
176{
177 selector &= 0xffff;
178#ifdef VBOX
179 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
180
181 if (seg == R_CS)
182 flags |= DESC_CS_MASK;
183
184 cpu_x86_load_seg_cache(env, seg, selector,
185 (selector << 4), 0xffff, flags);
186#else
187 cpu_x86_load_seg_cache(env, seg, selector,
188 (selector << 4), 0xffff, 0);
189#endif
190}
191
192static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
193 uint32_t *esp_ptr, int dpl)
194{
195 int type, index, shift;
196
197#if 0
198 {
199 int i;
200 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
201 for(i=0;i<env->tr.limit;i++) {
202 printf("%02x ", env->tr.base[i]);
203 if ((i & 7) == 7) printf("\n");
204 }
205 printf("\n");
206 }
207#endif
208
209 if (!(env->tr.flags & DESC_P_MASK))
210 cpu_abort(env, "invalid tss");
211 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
212 if ((type & 7) != 1)
213 cpu_abort(env, "invalid tss type %d", type);
214 shift = type >> 3;
215 index = (dpl * 4 + 2) << shift;
216 if (index + (4 << shift) - 1 > env->tr.limit)
217 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
218 if (shift == 0) {
219 *esp_ptr = lduw_kernel(env->tr.base + index);
220 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
221 } else {
222 *esp_ptr = ldl_kernel(env->tr.base + index);
223 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
224 }
225}
226
227/* XXX: merge with load_seg() */
228static void tss_load_seg(int seg_reg, int selector)
229{
230 uint32_t e1, e2;
231 int rpl, dpl, cpl;
232
233 if ((selector & 0xfffc) != 0) {
234 if (load_segment(&e1, &e2, selector) != 0)
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (!(e2 & DESC_S_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 rpl = selector & 3;
239 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
240 cpl = env->hflags & HF_CPL_MASK;
241 if (seg_reg == R_CS) {
242 if (!(e2 & DESC_CS_MASK))
243 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244 /* XXX: is it correct ? */
245 if (dpl != rpl)
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if ((e2 & DESC_C_MASK) && dpl > rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else if (seg_reg == R_SS) {
250 /* SS must be writable data */
251 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 if (dpl != cpl || dpl != rpl)
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 } else {
256 /* not readable code */
257 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 /* if data or non conforming code, checks the rights */
260 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
261 if (dpl < cpl || dpl < rpl)
262 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263 }
264 }
265 if (!(e2 & DESC_P_MASK))
266 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
267 cpu_x86_load_seg_cache(env, seg_reg, selector,
268 get_seg_base(e1, e2),
269 get_seg_limit(e1, e2),
270 e2);
271 } else {
272 if (seg_reg == R_SS || seg_reg == R_CS)
273 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
274 }
275}
276
277#define SWITCH_TSS_JMP 0
278#define SWITCH_TSS_IRET 1
279#define SWITCH_TSS_CALL 2
280
281/* XXX: restore CPU state in registers (PowerPC case) */
282static void switch_tss(int tss_selector,
283 uint32_t e1, uint32_t e2, int source,
284 uint32_t next_eip)
285{
286 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
287 target_ulong tss_base;
288 uint32_t new_regs[8], new_segs[6];
289 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
290 uint32_t old_eflags, eflags_mask;
291 SegmentCache *dt;
292 int index;
293 target_ulong ptr;
294
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296#ifdef DEBUG_PCALL
297 if (loglevel & CPU_LOG_PCALL)
298 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
299#endif
300
301#if defined(VBOX) && defined(DEBUG)
302 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
303#endif
304
305 /* if task gate, we read the TSS segment and we load it */
306 if (type == 5) {
307 if (!(e2 & DESC_P_MASK))
308 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
309 tss_selector = e1 >> 16;
310 if (tss_selector & 4)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 if (load_segment(&e1, &e2, tss_selector) != 0)
313 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
314 if (e2 & DESC_S_MASK)
315 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
316 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
317 if ((type & 7) != 1)
318 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
319 }
320
321 if (!(e2 & DESC_P_MASK))
322 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
323
324 if (type & 8)
325 tss_limit_max = 103;
326 else
327 tss_limit_max = 43;
328 tss_limit = get_seg_limit(e1, e2);
329 tss_base = get_seg_base(e1, e2);
330 if ((tss_selector & 4) != 0 ||
331 tss_limit < tss_limit_max)
332 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
333 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
334 if (old_type & 8)
335 old_tss_limit_max = 103;
336 else
337 old_tss_limit_max = 43;
338
339 /* read all the registers from the new TSS */
340 if (type & 8) {
341 /* 32 bit */
342 new_cr3 = ldl_kernel(tss_base + 0x1c);
343 new_eip = ldl_kernel(tss_base + 0x20);
344 new_eflags = ldl_kernel(tss_base + 0x24);
345 for(i = 0; i < 8; i++)
346 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
347 for(i = 0; i < 6; i++)
348 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
349 new_ldt = lduw_kernel(tss_base + 0x60);
350 new_trap = ldl_kernel(tss_base + 0x64);
351 } else {
352 /* 16 bit */
353 new_cr3 = 0;
354 new_eip = lduw_kernel(tss_base + 0x0e);
355 new_eflags = lduw_kernel(tss_base + 0x10);
356 for(i = 0; i < 8; i++)
357 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
358 for(i = 0; i < 4; i++)
359 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
360 new_ldt = lduw_kernel(tss_base + 0x2a);
361 new_segs[R_FS] = 0;
362 new_segs[R_GS] = 0;
363 new_trap = 0;
364 }
365
366 /* NOTE: we must avoid memory exceptions during the task switch,
367 so we make dummy accesses before */
368 /* XXX: it can still fail in some cases, so a bigger hack is
369 necessary to valid the TLB after having done the accesses */
370
371 v1 = ldub_kernel(env->tr.base);
372 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
373 stb_kernel(env->tr.base, v1);
374 stb_kernel(env->tr.base + old_tss_limit_max, v2);
375
376 /* clear busy bit (it is restartable) */
377 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
378 target_ulong ptr;
379 uint32_t e2;
380 ptr = env->gdt.base + (env->tr.selector & ~7);
381 e2 = ldl_kernel(ptr + 4);
382 e2 &= ~DESC_TSS_BUSY_MASK;
383 stl_kernel(ptr + 4, e2);
384 }
385 old_eflags = compute_eflags();
386 if (source == SWITCH_TSS_IRET)
387 old_eflags &= ~NT_MASK;
388
389 /* save the current state in the old TSS */
390 if (type & 8) {
391 /* 32 bit */
392 stl_kernel(env->tr.base + 0x20, next_eip);
393 stl_kernel(env->tr.base + 0x24, old_eflags);
394 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
395 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
396 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
397 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
398 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
399 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
400 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
401 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
402 for(i = 0; i < 6; i++)
403 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
404#if defined(VBOX) && defined(DEBUG)
405 printf("TSS 32 bits switch\n");
406 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
407#endif
408 } else {
409 /* 16 bit */
410 stw_kernel(env->tr.base + 0x0e, next_eip);
411 stw_kernel(env->tr.base + 0x10, old_eflags);
412 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
413 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
414 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
415 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
416 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
417 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
418 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
419 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
420 for(i = 0; i < 4; i++)
421 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
422 }
423
424 /* now if an exception occurs, it will occurs in the next task
425 context */
426
427 if (source == SWITCH_TSS_CALL) {
428 stw_kernel(tss_base, env->tr.selector);
429 new_eflags |= NT_MASK;
430 }
431
432 /* set busy bit */
433 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
434 target_ulong ptr;
435 uint32_t e2;
436 ptr = env->gdt.base + (tss_selector & ~7);
437 e2 = ldl_kernel(ptr + 4);
438 e2 |= DESC_TSS_BUSY_MASK;
439 stl_kernel(ptr + 4, e2);
440 }
441
442 /* set the new CPU state */
443 /* from this point, any exception which occurs can give problems */
444 env->cr[0] |= CR0_TS_MASK;
445 env->hflags |= HF_TS_MASK;
446 env->tr.selector = tss_selector;
447 env->tr.base = tss_base;
448 env->tr.limit = tss_limit;
449 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
450
451 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
452 cpu_x86_update_cr3(env, new_cr3);
453 }
454
455 /* load all registers without an exception, then reload them with
456 possible exception */
457 env->eip = new_eip;
458 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
459 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
460 if (!(type & 8))
461 eflags_mask &= 0xffff;
462 load_eflags(new_eflags, eflags_mask);
463 /* XXX: what to do in 16 bit case ? */
464 EAX = new_regs[0];
465 ECX = new_regs[1];
466 EDX = new_regs[2];
467 EBX = new_regs[3];
468 ESP = new_regs[4];
469 EBP = new_regs[5];
470 ESI = new_regs[6];
471 EDI = new_regs[7];
472 if (new_eflags & VM_MASK) {
473 for(i = 0; i < 6; i++)
474 load_seg_vm(i, new_segs[i]);
475 /* in vm86, CPL is always 3 */
476 cpu_x86_set_cpl(env, 3);
477 } else {
478 /* CPL is set the RPL of CS */
479 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
480 /* first just selectors as the rest may trigger exceptions */
481 for(i = 0; i < 6; i++)
482 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
483 }
484
485 env->ldt.selector = new_ldt & ~4;
486 env->ldt.base = 0;
487 env->ldt.limit = 0;
488 env->ldt.flags = 0;
489
490 /* load the LDT */
491 if (new_ldt & 4)
492 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493
494 if ((new_ldt & 0xfffc) != 0) {
495 dt = &env->gdt;
496 index = new_ldt & ~7;
497 if ((index + 7) > dt->limit)
498 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
499 ptr = dt->base + index;
500 e1 = ldl_kernel(ptr);
501 e2 = ldl_kernel(ptr + 4);
502 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
503 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
504 if (!(e2 & DESC_P_MASK))
505 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
506 load_seg_cache_raw_dt(&env->ldt, e1, e2);
507 }
508
509 /* load the segments */
510 if (!(new_eflags & VM_MASK)) {
511 tss_load_seg(R_CS, new_segs[R_CS]);
512 tss_load_seg(R_SS, new_segs[R_SS]);
513 tss_load_seg(R_ES, new_segs[R_ES]);
514 tss_load_seg(R_DS, new_segs[R_DS]);
515 tss_load_seg(R_FS, new_segs[R_FS]);
516 tss_load_seg(R_GS, new_segs[R_GS]);
517 }
518
519 /* check that EIP is in the CS segment limits */
520 if (new_eip > env->segs[R_CS].limit) {
521 /* XXX: different exception if CALL ? */
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
524}
525
526/* check if Port I/O is allowed in TSS */
527static inline void check_io(int addr, int size)
528{
529 int io_offset, val, mask;
530
531 /* TSS must be a valid 32 bit one */
532 if (!(env->tr.flags & DESC_P_MASK) ||
533 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
534 env->tr.limit < 103)
535 goto fail;
536 io_offset = lduw_kernel(env->tr.base + 0x66);
537 io_offset += (addr >> 3);
538 /* Note: the check needs two bytes */
539 if ((io_offset + 1) > env->tr.limit)
540 goto fail;
541 val = lduw_kernel(env->tr.base + io_offset);
542 val >>= (addr & 7);
543 mask = (1 << size) - 1;
544 /* all bits must be zero to allow the I/O */
545 if ((val & mask) != 0) {
546 fail:
547 raise_exception_err(EXCP0D_GPF, 0);
548 }
549}
550
551void check_iob_T0(void)
552{
553 check_io(T0, 1);
554}
555
556void check_iow_T0(void)
557{
558 check_io(T0, 2);
559}
560
561void check_iol_T0(void)
562{
563 check_io(T0, 4);
564}
565
566void check_iob_DX(void)
567{
568 check_io(EDX & 0xffff, 1);
569}
570
571void check_iow_DX(void)
572{
573 check_io(EDX & 0xffff, 2);
574}
575
576void check_iol_DX(void)
577{
578 check_io(EDX & 0xffff, 4);
579}
580
581static inline unsigned int get_sp_mask(unsigned int e2)
582{
583 if (e2 & DESC_B_MASK)
584 return 0xffffffff;
585 else
586 return 0xffff;
587}
588
589#ifdef TARGET_X86_64
590#define SET_ESP(val, sp_mask)\
591do {\
592 if ((sp_mask) == 0xffff)\
593 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
594 else if ((sp_mask) == 0xffffffffLL)\
595 ESP = (uint32_t)(val);\
596 else\
597 ESP = (val);\
598} while (0)
599#else
600#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
601#endif
602
603/* XXX: add a is_user flag to have proper security support */
604#define PUSHW(ssp, sp, sp_mask, val)\
605{\
606 sp -= 2;\
607 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
608}
609
610#define PUSHL(ssp, sp, sp_mask, val)\
611{\
612 sp -= 4;\
613 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
614}
615
616#define POPW(ssp, sp, sp_mask, val)\
617{\
618 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
619 sp += 2;\
620}
621
622#define POPL(ssp, sp, sp_mask, val)\
623{\
624 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
625 sp += 4;\
626}
627
628/* protected mode interrupt */
629static void do_interrupt_protected(int intno, int is_int, int error_code,
630 unsigned int next_eip, int is_hw)
631{
632 SegmentCache *dt;
633 target_ulong ptr, ssp;
634 int type, dpl, selector, ss_dpl, cpl;
635 int has_error_code, new_stack, shift;
636 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
637 uint32_t old_eip, sp_mask;
638
639#ifdef VBOX
640# ifdef VBOX_WITH_VMI
641 if ( intno == 6
642 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
643 {
644 env->exception_index = EXCP_PARAV_CALL;
645 cpu_loop_exit();
646 }
647# endif
648 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
649 cpu_loop_exit();
650#endif
651
652 has_error_code = 0;
653 if (!is_int && !is_hw) {
654 switch(intno) {
655 case 8:
656 case 10:
657 case 11:
658 case 12:
659 case 13:
660 case 14:
661 case 17:
662 has_error_code = 1;
663 break;
664 }
665 }
666 if (is_int)
667 old_eip = next_eip;
668 else
669 old_eip = env->eip;
670
671 dt = &env->idt;
672 if (intno * 8 + 7 > dt->limit)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 ptr = dt->base + intno * 8;
675 e1 = ldl_kernel(ptr);
676 e2 = ldl_kernel(ptr + 4);
677 /* check gate type */
678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
679 switch(type) {
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2 & DESC_P_MASK))
683 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685 if (has_error_code) {
686 int type;
687 uint32_t mask;
688 /* push the error code */
689 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
690 shift = type >> 3;
691 if (env->segs[R_SS].flags & DESC_B_MASK)
692 mask = 0xffffffff;
693 else
694 mask = 0xffff;
695 esp = (ESP - (2 << shift)) & mask;
696 ssp = env->segs[R_SS].base + esp;
697 if (shift)
698 stl_kernel(ssp, error_code);
699 else
700 stw_kernel(ssp, error_code);
701 SET_ESP(esp, mask);
702 }
703 return;
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
708 break;
709 default:
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711 break;
712 }
713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714 cpl = env->hflags & HF_CPL_MASK;
715 /* check privledge if software int */
716 if (is_int && dpl < cpl)
717 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718 /* check valid bit */
719 if (!(e2 & DESC_P_MASK))
720 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
721 selector = e1 >> 16;
722 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723 if ((selector & 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF, 0);
725
726 if (load_segment(&e1, &e2, selector) != 0)
727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731 if (dpl > cpl)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_P_MASK))
734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736 /* to inner priviledge */
737 get_ss_esp_from_tss(&ss, &esp, dpl);
738 if ((ss & 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740 if ((ss & 3) != dpl)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
745 if (ss_dpl != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK))
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_P_MASK))
752#ifdef VBOX /* See page 3-477 of 253666.pdf */
753 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
754#else
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756#endif
757 new_stack = 1;
758 sp_mask = get_sp_mask(ss_e2);
759 ssp = get_seg_base(ss_e1, ss_e2);
760#if defined(VBOX) && defined(DEBUG)
761 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
762#endif
763 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
764 /* to same priviledge */
765 if (env->eflags & VM_MASK)
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0;
768 sp_mask = get_sp_mask(env->segs[R_SS].flags);
769 ssp = env->segs[R_SS].base;
770 esp = ESP;
771 dpl = cpl;
772 } else {
773 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
774 new_stack = 0; /* avoid warning */
775 sp_mask = 0; /* avoid warning */
776 ssp = 0; /* avoid warning */
777 esp = 0; /* avoid warning */
778 }
779
780 shift = type >> 3;
781
782#if 0
783 /* XXX: check that enough room is available */
784 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
785 if (env->eflags & VM_MASK)
786 push_size += 8;
787 push_size <<= shift;
788#endif
789 if (shift == 1) {
790 if (new_stack) {
791 if (env->eflags & VM_MASK) {
792 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
793 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
794 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
795 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
796 }
797 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
798 PUSHL(ssp, esp, sp_mask, ESP);
799 }
800 PUSHL(ssp, esp, sp_mask, compute_eflags());
801 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
802 PUSHL(ssp, esp, sp_mask, old_eip);
803 if (has_error_code) {
804 PUSHL(ssp, esp, sp_mask, error_code);
805 }
806 } else {
807 if (new_stack) {
808 if (env->eflags & VM_MASK) {
809 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
810 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
811 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
812 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
813 }
814 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
815 PUSHW(ssp, esp, sp_mask, ESP);
816 }
817 PUSHW(ssp, esp, sp_mask, compute_eflags());
818 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
819 PUSHW(ssp, esp, sp_mask, old_eip);
820 if (has_error_code) {
821 PUSHW(ssp, esp, sp_mask, error_code);
822 }
823 }
824
825 if (new_stack) {
826 if (env->eflags & VM_MASK) {
827 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
828 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
829 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
831 }
832 ss = (ss & ~3) | dpl;
833 cpu_x86_load_seg_cache(env, R_SS, ss,
834 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
835 }
836 SET_ESP(esp, sp_mask);
837
838 selector = (selector & ~3) | dpl;
839 cpu_x86_load_seg_cache(env, R_CS, selector,
840 get_seg_base(e1, e2),
841 get_seg_limit(e1, e2),
842 e2);
843 cpu_x86_set_cpl(env, dpl);
844 env->eip = offset;
845
846 /* interrupt gate clear IF mask */
847 if ((type & 1) == 0) {
848 env->eflags &= ~IF_MASK;
849 }
850 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
851}
852
853#ifdef VBOX
854
855/* check if VME interrupt redirection is enabled in TSS */
856static inline bool is_vme_irq_redirected(int intno)
857{
858 int io_offset, intredir_offset;
859 unsigned char val, mask;
860
861 /* TSS must be a valid 32 bit one */
862 if (!(env->tr.flags & DESC_P_MASK) ||
863 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
864 env->tr.limit < 103)
865 goto fail;
866 io_offset = lduw_kernel(env->tr.base + 0x66);
867 /* the virtual interrupt redirection bitmap is located below the io bitmap */
868 intredir_offset = io_offset - 0x20;
869
870 intredir_offset += (intno >> 3);
871 if ((intredir_offset) > env->tr.limit)
872 goto fail;
873
874 val = ldub_kernel(env->tr.base + intredir_offset);
875 mask = 1 << (unsigned char)(intno & 7);
876
877 /* bit set means no redirection. */
878 if ((val & mask) != 0) {
879 return false;
880 }
881 return true;
882
883fail:
884 raise_exception_err(EXCP0D_GPF, 0);
885 return true;
886}
887
888/* V86 mode software interrupt with CR4.VME=1 */
889static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
890{
891 target_ulong ptr, ssp;
892 int selector;
893 uint32_t offset, esp;
894 uint32_t old_cs, old_eflags;
895 uint32_t iopl;
896
897 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
898
899 if (!is_vme_irq_redirected(intno))
900 {
901 if (iopl == 3)
902 /* normal protected mode handler call */
903 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
904 else
905 raise_exception_err(EXCP0D_GPF, 0);
906 }
907
908 /* virtual mode idt is at linear address 0 */
909 ptr = 0 + intno * 4;
910 offset = lduw_kernel(ptr);
911 selector = lduw_kernel(ptr + 2);
912 esp = ESP;
913 ssp = env->segs[R_SS].base;
914 old_cs = env->segs[R_CS].selector;
915
916 old_eflags = compute_eflags();
917 if (iopl < 3)
918 {
919 /* copy VIF into IF and set IOPL to 3 */
920 if (env->eflags & VIF_MASK)
921 old_eflags |= IF_MASK;
922 else
923 old_eflags &= ~IF_MASK;
924
925 old_eflags |= (3 << IOPL_SHIFT);
926 }
927
928 /* XXX: use SS segment size ? */
929 PUSHW(ssp, esp, 0xffff, old_eflags);
930 PUSHW(ssp, esp, 0xffff, old_cs);
931 PUSHW(ssp, esp, 0xffff, next_eip);
932
933 /* update processor state */
934 ESP = (ESP & ~0xffff) | (esp & 0xffff);
935 env->eip = offset;
936 env->segs[R_CS].selector = selector;
937 env->segs[R_CS].base = (selector << 4);
938 env->eflags &= ~(TF_MASK | RF_MASK);
939
940 if (iopl < 3)
941 env->eflags &= ~VIF_MASK;
942 else
943 env->eflags &= ~IF_MASK;
944}
945#endif /* VBOX */
946
947#ifdef TARGET_X86_64
948
949#define PUSHQ(sp, val)\
950{\
951 sp -= 8;\
952 stq_kernel(sp, (val));\
953}
954
955#define POPQ(sp, val)\
956{\
957 val = ldq_kernel(sp);\
958 sp += 8;\
959}
960
961static inline target_ulong get_rsp_from_tss(int level)
962{
963 int index;
964
965#if 0
966 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
967 env->tr.base, env->tr.limit);
968#endif
969
970 if (!(env->tr.flags & DESC_P_MASK))
971 cpu_abort(env, "invalid tss");
972 index = 8 * level + 4;
973 if ((index + 7) > env->tr.limit)
974 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
975 return ldq_kernel(env->tr.base + index);
976}
977
978/* 64 bit interrupt */
979static void do_interrupt64(int intno, int is_int, int error_code,
980 target_ulong next_eip, int is_hw)
981{
982 SegmentCache *dt;
983 target_ulong ptr;
984 int type, dpl, selector, cpl, ist;
985 int has_error_code, new_stack;
986 uint32_t e1, e2, e3, ss;
987 target_ulong old_eip, esp, offset;
988
989#ifdef VBOX
990 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
991 cpu_loop_exit();
992#endif
993
994 has_error_code = 0;
995 if (!is_int && !is_hw) {
996 switch(intno) {
997 case 8:
998 case 10:
999 case 11:
1000 case 12:
1001 case 13:
1002 case 14:
1003 case 17:
1004 has_error_code = 1;
1005 break;
1006 }
1007 }
1008 if (is_int)
1009 old_eip = next_eip;
1010 else
1011 old_eip = env->eip;
1012
1013 dt = &env->idt;
1014 if (intno * 16 + 15 > dt->limit)
1015 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1016 ptr = dt->base + intno * 16;
1017 e1 = ldl_kernel(ptr);
1018 e2 = ldl_kernel(ptr + 4);
1019 e3 = ldl_kernel(ptr + 8);
1020 /* check gate type */
1021 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1022 switch(type) {
1023 case 14: /* 386 interrupt gate */
1024 case 15: /* 386 trap gate */
1025 break;
1026 default:
1027 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1028 break;
1029 }
1030 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1031 cpl = env->hflags & HF_CPL_MASK;
1032 /* check privledge if software int */
1033 if (is_int && dpl < cpl)
1034 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1035 /* check valid bit */
1036 if (!(e2 & DESC_P_MASK))
1037 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1038 selector = e1 >> 16;
1039 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1040 ist = e2 & 7;
1041 if ((selector & 0xfffc) == 0)
1042 raise_exception_err(EXCP0D_GPF, 0);
1043
1044 if (load_segment(&e1, &e2, selector) != 0)
1045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1046 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1047 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1048 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1049 if (dpl > cpl)
1050 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1051 if (!(e2 & DESC_P_MASK))
1052 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1053 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1054 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1055 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1056 /* to inner priviledge */
1057 if (ist != 0)
1058 esp = get_rsp_from_tss(ist + 3);
1059 else
1060 esp = get_rsp_from_tss(dpl);
1061 esp &= ~0xfLL; /* align stack */
1062 ss = 0;
1063 new_stack = 1;
1064 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1065 /* to same priviledge */
1066 if (env->eflags & VM_MASK)
1067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1068 new_stack = 0;
1069 if (ist != 0)
1070 esp = get_rsp_from_tss(ist + 3);
1071 else
1072 esp = ESP;
1073 esp &= ~0xfLL; /* align stack */
1074 dpl = cpl;
1075 } else {
1076 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1077 new_stack = 0; /* avoid warning */
1078 esp = 0; /* avoid warning */
1079 }
1080
1081 PUSHQ(esp, env->segs[R_SS].selector);
1082 PUSHQ(esp, ESP);
1083 PUSHQ(esp, compute_eflags());
1084 PUSHQ(esp, env->segs[R_CS].selector);
1085 PUSHQ(esp, old_eip);
1086 if (has_error_code) {
1087 PUSHQ(esp, error_code);
1088 }
1089
1090 if (new_stack) {
1091 ss = 0 | dpl;
1092 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1093 }
1094 ESP = esp;
1095
1096 selector = (selector & ~3) | dpl;
1097 cpu_x86_load_seg_cache(env, R_CS, selector,
1098 get_seg_base(e1, e2),
1099 get_seg_limit(e1, e2),
1100 e2);
1101 cpu_x86_set_cpl(env, dpl);
1102 env->eip = offset;
1103
1104 /* interrupt gate clear IF mask */
1105 if ((type & 1) == 0) {
1106 env->eflags &= ~IF_MASK;
1107 }
1108 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1109}
1110#endif
1111
1112void helper_syscall(int next_eip_addend)
1113{
1114 int selector;
1115
1116 if (!(env->efer & MSR_EFER_SCE)) {
1117 raise_exception_err(EXCP06_ILLOP, 0);
1118 }
1119 selector = (env->star >> 32) & 0xffff;
1120#ifdef TARGET_X86_64
1121 if (env->hflags & HF_LMA_MASK) {
1122 int code64;
1123
1124 ECX = env->eip + next_eip_addend;
1125 env->regs[11] = compute_eflags();
1126
1127 code64 = env->hflags & HF_CS64_MASK;
1128
1129 cpu_x86_set_cpl(env, 0);
1130 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1131 0, 0xffffffff,
1132 DESC_G_MASK | DESC_P_MASK |
1133 DESC_S_MASK |
1134 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1135 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1136 0, 0xffffffff,
1137 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1138 DESC_S_MASK |
1139 DESC_W_MASK | DESC_A_MASK);
1140 env->eflags &= ~env->fmask;
1141 load_eflags(env->eflags, 0);
1142 if (code64)
1143 env->eip = env->lstar;
1144 else
1145 env->eip = env->cstar;
1146 } else
1147#endif
1148 {
1149 ECX = (uint32_t)(env->eip + next_eip_addend);
1150
1151 cpu_x86_set_cpl(env, 0);
1152 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1153 0, 0xffffffff,
1154 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1155 DESC_S_MASK |
1156 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1157 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1158 0, 0xffffffff,
1159 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1160 DESC_S_MASK |
1161 DESC_W_MASK | DESC_A_MASK);
1162 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1163 env->eip = (uint32_t)env->star;
1164 }
1165}
1166
1167void helper_sysret(int dflag)
1168{
1169 int cpl, selector;
1170
1171 if (!(env->efer & MSR_EFER_SCE)) {
1172 raise_exception_err(EXCP06_ILLOP, 0);
1173 }
1174 cpl = env->hflags & HF_CPL_MASK;
1175 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1176 raise_exception_err(EXCP0D_GPF, 0);
1177 }
1178 selector = (env->star >> 48) & 0xffff;
1179#ifdef TARGET_X86_64
1180 if (env->hflags & HF_LMA_MASK) {
1181 if (dflag == 2) {
1182 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1183 0, 0xffffffff,
1184 DESC_G_MASK | DESC_P_MASK |
1185 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1186 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1187 DESC_L_MASK);
1188 env->eip = ECX;
1189 } else {
1190 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1191 0, 0xffffffff,
1192 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1193 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1194 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1195 env->eip = (uint32_t)ECX;
1196 }
1197 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1198 0, 0xffffffff,
1199 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1200 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1201 DESC_W_MASK | DESC_A_MASK);
1202 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1203 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1204 cpu_x86_set_cpl(env, 3);
1205 } else
1206#endif
1207 {
1208 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1209 0, 0xffffffff,
1210 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1211 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1212 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1213 env->eip = (uint32_t)ECX;
1214 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1215 0, 0xffffffff,
1216 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1217 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1218 DESC_W_MASK | DESC_A_MASK);
1219 env->eflags |= IF_MASK;
1220 cpu_x86_set_cpl(env, 3);
1221 }
1222#ifdef USE_KQEMU
1223 if (kqemu_is_ok(env)) {
1224 if (env->hflags & HF_LMA_MASK)
1225 CC_OP = CC_OP_EFLAGS;
1226 env->exception_index = -1;
1227 cpu_loop_exit();
1228 }
1229#endif
1230}
1231
1232#ifdef VBOX
1233/**
1234 * Checks and processes external VMM events.
1235 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1236 */
1237void helper_external_event(void)
1238{
1239#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1240 uintptr_t uESP;
1241 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1242 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1243#endif
1244 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1245 {
1246 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1247 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1248 }
1249 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1250 {
1251 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1252 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1253 }
1254 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1255 {
1256 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1257 remR3DmaRun(env);
1258 }
1259 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1260 {
1261 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1262 remR3TimersRun(env);
1263 }
1264}
1265/* helper for recording call instruction addresses for later scanning */
1266void helper_record_call()
1267{
1268 if ( !(env->state & CPU_RAW_RING0)
1269 && (env->cr[0] & CR0_PG_MASK)
1270 && !(env->eflags & X86_EFL_IF))
1271 remR3RecordCall(env);
1272}
1273#endif /* VBOX */
1274
1275/* real mode interrupt */
1276static void do_interrupt_real(int intno, int is_int, int error_code,
1277 unsigned int next_eip)
1278{
1279 SegmentCache *dt;
1280 target_ulong ptr, ssp;
1281 int selector;
1282 uint32_t offset, esp;
1283 uint32_t old_cs, old_eip;
1284
1285 /* real mode (simpler !) */
1286 dt = &env->idt;
1287 if (intno * 4 + 3 > dt->limit)
1288 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1289 ptr = dt->base + intno * 4;
1290 offset = lduw_kernel(ptr);
1291 selector = lduw_kernel(ptr + 2);
1292 esp = ESP;
1293 ssp = env->segs[R_SS].base;
1294 if (is_int)
1295 old_eip = next_eip;
1296 else
1297 old_eip = env->eip;
1298 old_cs = env->segs[R_CS].selector;
1299 /* XXX: use SS segment size ? */
1300 PUSHW(ssp, esp, 0xffff, compute_eflags());
1301 PUSHW(ssp, esp, 0xffff, old_cs);
1302 PUSHW(ssp, esp, 0xffff, old_eip);
1303
1304 /* update processor state */
1305 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1306 env->eip = offset;
1307 env->segs[R_CS].selector = selector;
1308 env->segs[R_CS].base = (selector << 4);
1309 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1310}
1311
1312/* fake user mode interrupt */
1313void do_interrupt_user(int intno, int is_int, int error_code,
1314 target_ulong next_eip)
1315{
1316 SegmentCache *dt;
1317 target_ulong ptr;
1318 int dpl, cpl;
1319 uint32_t e2;
1320
1321 dt = &env->idt;
1322 ptr = dt->base + (intno * 8);
1323 e2 = ldl_kernel(ptr + 4);
1324
1325 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1326 cpl = env->hflags & HF_CPL_MASK;
1327 /* check privledge if software int */
1328 if (is_int && dpl < cpl)
1329 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1330
1331 /* Since we emulate only user space, we cannot do more than
1332 exiting the emulation with the suitable exception and error
1333 code */
1334 if (is_int)
1335 EIP = next_eip;
1336}
1337
1338/*
1339 * Begin execution of an interruption. is_int is TRUE if coming from
1340 * the int instruction. next_eip is the EIP value AFTER the interrupt
1341 * instruction. It is only relevant if is_int is TRUE.
1342 */
1343void do_interrupt(int intno, int is_int, int error_code,
1344 target_ulong next_eip, int is_hw)
1345{
1346 if (loglevel & CPU_LOG_INT) {
1347 if ((env->cr[0] & CR0_PE_MASK)) {
1348 static int count;
1349 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1350 count, intno, error_code, is_int,
1351 env->hflags & HF_CPL_MASK,
1352 env->segs[R_CS].selector, EIP,
1353 (int)env->segs[R_CS].base + EIP,
1354 env->segs[R_SS].selector, ESP);
1355 if (intno == 0x0e) {
1356 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1357 } else {
1358 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1359 }
1360 fprintf(logfile, "\n");
1361 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1362#if 0
1363 {
1364 int i;
1365 uint8_t *ptr;
1366 fprintf(logfile, " code=");
1367 ptr = env->segs[R_CS].base + env->eip;
1368 for(i = 0; i < 16; i++) {
1369 fprintf(logfile, " %02x", ldub(ptr + i));
1370 }
1371 fprintf(logfile, "\n");
1372 }
1373#endif
1374 count++;
1375 }
1376 }
1377 if (env->cr[0] & CR0_PE_MASK) {
1378#ifdef TARGET_X86_64
1379 if (env->hflags & HF_LMA_MASK) {
1380 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1381 } else
1382#endif
1383 {
1384#ifdef VBOX
1385 /* int xx *, v86 code and VME enabled? */
1386 if ( (env->eflags & VM_MASK)
1387 && (env->cr[4] & CR4_VME_MASK)
1388 && is_int
1389 && !is_hw
1390 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1391 )
1392 do_soft_interrupt_vme(intno, error_code, next_eip);
1393 else
1394#endif /* VBOX */
1395 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1396 }
1397 } else {
1398 do_interrupt_real(intno, is_int, error_code, next_eip);
1399 }
1400}
1401
1402/*
1403 * Signal an interruption. It is executed in the main CPU loop.
1404 * is_int is TRUE if coming from the int instruction. next_eip is the
1405 * EIP value AFTER the interrupt instruction. It is only relevant if
1406 * is_int is TRUE.
1407 */
1408void raise_interrupt(int intno, int is_int, int error_code,
1409 int next_eip_addend)
1410{
1411#if defined(VBOX) && defined(DEBUG)
1412 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %VGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1413#endif
1414 env->exception_index = intno;
1415 env->error_code = error_code;
1416 env->exception_is_int = is_int;
1417 env->exception_next_eip = env->eip + next_eip_addend;
1418 cpu_loop_exit();
1419}
1420
1421/* same as raise_exception_err, but do not restore global registers */
1422static void raise_exception_err_norestore(int exception_index, int error_code)
1423{
1424 env->exception_index = exception_index;
1425 env->error_code = error_code;
1426 env->exception_is_int = 0;
1427 env->exception_next_eip = 0;
1428 longjmp(env->jmp_env, 1);
1429}
1430
1431/* shortcuts to generate exceptions */
1432
1433void (raise_exception_err)(int exception_index, int error_code)
1434{
1435 raise_interrupt(exception_index, 0, error_code, 0);
1436}
1437
1438void raise_exception(int exception_index)
1439{
1440 raise_interrupt(exception_index, 0, 0, 0);
1441}
1442
1443/* SMM support */
1444
1445#if defined(CONFIG_USER_ONLY)
1446
1447void do_smm_enter(void)
1448{
1449}
1450
1451void helper_rsm(void)
1452{
1453}
1454
1455#else
1456
1457#ifdef TARGET_X86_64
1458#define SMM_REVISION_ID 0x00020064
1459#else
1460#define SMM_REVISION_ID 0x00020000
1461#endif
1462
1463void do_smm_enter(void)
1464{
1465#ifdef VBOX
1466 cpu_abort(env, "do_ssm_enter");
1467#else /* !VBOX */
1468 target_ulong sm_state;
1469 SegmentCache *dt;
1470 int i, offset;
1471
1472 if (loglevel & CPU_LOG_INT) {
1473 fprintf(logfile, "SMM: enter\n");
1474 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1475 }
1476
1477 env->hflags |= HF_SMM_MASK;
1478 cpu_smm_update(env);
1479
1480 sm_state = env->smbase + 0x8000;
1481
1482#ifdef TARGET_X86_64
1483 for(i = 0; i < 6; i++) {
1484 dt = &env->segs[i];
1485 offset = 0x7e00 + i * 16;
1486 stw_phys(sm_state + offset, dt->selector);
1487 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1488 stl_phys(sm_state + offset + 4, dt->limit);
1489 stq_phys(sm_state + offset + 8, dt->base);
1490 }
1491
1492 stq_phys(sm_state + 0x7e68, env->gdt.base);
1493 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1494
1495 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1496 stq_phys(sm_state + 0x7e78, env->ldt.base);
1497 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1498 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1499
1500 stq_phys(sm_state + 0x7e88, env->idt.base);
1501 stl_phys(sm_state + 0x7e84, env->idt.limit);
1502
1503 stw_phys(sm_state + 0x7e90, env->tr.selector);
1504 stq_phys(sm_state + 0x7e98, env->tr.base);
1505 stl_phys(sm_state + 0x7e94, env->tr.limit);
1506 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1507
1508 stq_phys(sm_state + 0x7ed0, env->efer);
1509
1510 stq_phys(sm_state + 0x7ff8, EAX);
1511 stq_phys(sm_state + 0x7ff0, ECX);
1512 stq_phys(sm_state + 0x7fe8, EDX);
1513 stq_phys(sm_state + 0x7fe0, EBX);
1514 stq_phys(sm_state + 0x7fd8, ESP);
1515 stq_phys(sm_state + 0x7fd0, EBP);
1516 stq_phys(sm_state + 0x7fc8, ESI);
1517 stq_phys(sm_state + 0x7fc0, EDI);
1518 for(i = 8; i < 16; i++)
1519 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1520 stq_phys(sm_state + 0x7f78, env->eip);
1521 stl_phys(sm_state + 0x7f70, compute_eflags());
1522 stl_phys(sm_state + 0x7f68, env->dr[6]);
1523 stl_phys(sm_state + 0x7f60, env->dr[7]);
1524
1525 stl_phys(sm_state + 0x7f48, env->cr[4]);
1526 stl_phys(sm_state + 0x7f50, env->cr[3]);
1527 stl_phys(sm_state + 0x7f58, env->cr[0]);
1528
1529 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1530 stl_phys(sm_state + 0x7f00, env->smbase);
1531#else
1532 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1533 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1534 stl_phys(sm_state + 0x7ff4, compute_eflags());
1535 stl_phys(sm_state + 0x7ff0, env->eip);
1536 stl_phys(sm_state + 0x7fec, EDI);
1537 stl_phys(sm_state + 0x7fe8, ESI);
1538 stl_phys(sm_state + 0x7fe4, EBP);
1539 stl_phys(sm_state + 0x7fe0, ESP);
1540 stl_phys(sm_state + 0x7fdc, EBX);
1541 stl_phys(sm_state + 0x7fd8, EDX);
1542 stl_phys(sm_state + 0x7fd4, ECX);
1543 stl_phys(sm_state + 0x7fd0, EAX);
1544 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1545 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1546
1547 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1548 stl_phys(sm_state + 0x7f64, env->tr.base);
1549 stl_phys(sm_state + 0x7f60, env->tr.limit);
1550 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1551
1552 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1553 stl_phys(sm_state + 0x7f80, env->ldt.base);
1554 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1555 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1556
1557 stl_phys(sm_state + 0x7f74, env->gdt.base);
1558 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1559
1560 stl_phys(sm_state + 0x7f58, env->idt.base);
1561 stl_phys(sm_state + 0x7f54, env->idt.limit);
1562
1563 for(i = 0; i < 6; i++) {
1564 dt = &env->segs[i];
1565 if (i < 3)
1566 offset = 0x7f84 + i * 12;
1567 else
1568 offset = 0x7f2c + (i - 3) * 12;
1569 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1570 stl_phys(sm_state + offset + 8, dt->base);
1571 stl_phys(sm_state + offset + 4, dt->limit);
1572 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1573 }
1574 stl_phys(sm_state + 0x7f14, env->cr[4]);
1575
1576 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1577 stl_phys(sm_state + 0x7ef8, env->smbase);
1578#endif
1579 /* init SMM cpu state */
1580
1581#ifdef TARGET_X86_64
1582 env->efer = 0;
1583 env->hflags &= ~HF_LMA_MASK;
1584#endif
1585 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1586 env->eip = 0x00008000;
1587 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1588 0xffffffff, 0);
1589 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1590 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1591 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1592 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1593 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1594
1595 cpu_x86_update_cr0(env,
1596 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1597 cpu_x86_update_cr4(env, 0);
1598 env->dr[7] = 0x00000400;
1599 CC_OP = CC_OP_EFLAGS;
1600#endif /* VBOX */
1601}
1602
1603void helper_rsm(void)
1604{
1605#ifdef VBOX
1606 cpu_abort(env, "helper_rsm");
1607#else /* !VBOX */
1608 target_ulong sm_state;
1609 int i, offset;
1610 uint32_t val;
1611
1612 sm_state = env->smbase + 0x8000;
1613#ifdef TARGET_X86_64
1614 env->efer = ldq_phys(sm_state + 0x7ed0);
1615 if (env->efer & MSR_EFER_LMA)
1616 env->hflags |= HF_LMA_MASK;
1617 else
1618 env->hflags &= ~HF_LMA_MASK;
1619
1620 for(i = 0; i < 6; i++) {
1621 offset = 0x7e00 + i * 16;
1622 cpu_x86_load_seg_cache(env, i,
1623 lduw_phys(sm_state + offset),
1624 ldq_phys(sm_state + offset + 8),
1625 ldl_phys(sm_state + offset + 4),
1626 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1627 }
1628
1629 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1630 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1631
1632 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1633 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1634 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1635 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1636
1637 env->idt.base = ldq_phys(sm_state + 0x7e88);
1638 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1639
1640 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1641 env->tr.base = ldq_phys(sm_state + 0x7e98);
1642 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1643 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1644
1645 EAX = ldq_phys(sm_state + 0x7ff8);
1646 ECX = ldq_phys(sm_state + 0x7ff0);
1647 EDX = ldq_phys(sm_state + 0x7fe8);
1648 EBX = ldq_phys(sm_state + 0x7fe0);
1649 ESP = ldq_phys(sm_state + 0x7fd8);
1650 EBP = ldq_phys(sm_state + 0x7fd0);
1651 ESI = ldq_phys(sm_state + 0x7fc8);
1652 EDI = ldq_phys(sm_state + 0x7fc0);
1653 for(i = 8; i < 16; i++)
1654 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1655 env->eip = ldq_phys(sm_state + 0x7f78);
1656 load_eflags(ldl_phys(sm_state + 0x7f70),
1657 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1658 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1659 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1660
1661 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1662 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1663 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1664
1665 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1666 if (val & 0x20000) {
1667 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1668 }
1669#else
1670 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1671 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1672 load_eflags(ldl_phys(sm_state + 0x7ff4),
1673 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1674 env->eip = ldl_phys(sm_state + 0x7ff0);
1675 EDI = ldl_phys(sm_state + 0x7fec);
1676 ESI = ldl_phys(sm_state + 0x7fe8);
1677 EBP = ldl_phys(sm_state + 0x7fe4);
1678 ESP = ldl_phys(sm_state + 0x7fe0);
1679 EBX = ldl_phys(sm_state + 0x7fdc);
1680 EDX = ldl_phys(sm_state + 0x7fd8);
1681 ECX = ldl_phys(sm_state + 0x7fd4);
1682 EAX = ldl_phys(sm_state + 0x7fd0);
1683 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1684 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1685
1686 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1687 env->tr.base = ldl_phys(sm_state + 0x7f64);
1688 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1689 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1690
1691 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1692 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1693 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1694 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1695
1696 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1697 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1698
1699 env->idt.base = ldl_phys(sm_state + 0x7f58);
1700 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1701
1702 for(i = 0; i < 6; i++) {
1703 if (i < 3)
1704 offset = 0x7f84 + i * 12;
1705 else
1706 offset = 0x7f2c + (i - 3) * 12;
1707 cpu_x86_load_seg_cache(env, i,
1708 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1709 ldl_phys(sm_state + offset + 8),
1710 ldl_phys(sm_state + offset + 4),
1711 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1712 }
1713 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1714
1715 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1716 if (val & 0x20000) {
1717 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1718 }
1719#endif
1720 CC_OP = CC_OP_EFLAGS;
1721 env->hflags &= ~HF_SMM_MASK;
1722 cpu_smm_update(env);
1723
1724 if (loglevel & CPU_LOG_INT) {
1725 fprintf(logfile, "SMM: after RSM\n");
1726 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1727 }
1728#endif /* !VBOX */
1729}
1730
1731#endif /* !CONFIG_USER_ONLY */
1732
1733
1734#ifdef BUGGY_GCC_DIV64
1735/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1736 call it from another function */
1737uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1738{
1739 *q_ptr = num / den;
1740 return num % den;
1741}
1742
1743int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1744{
1745 *q_ptr = num / den;
1746 return num % den;
1747}
1748#endif
1749
1750void helper_divl_EAX_T0(void)
1751{
1752 unsigned int den, r;
1753 uint64_t num, q;
1754
1755 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1756 den = T0;
1757 if (den == 0) {
1758 raise_exception(EXCP00_DIVZ);
1759 }
1760#ifdef BUGGY_GCC_DIV64
1761 r = div32(&q, num, den);
1762#else
1763 q = (num / den);
1764 r = (num % den);
1765#endif
1766 if (q > 0xffffffff)
1767 raise_exception(EXCP00_DIVZ);
1768 EAX = (uint32_t)q;
1769 EDX = (uint32_t)r;
1770}
1771
1772void helper_idivl_EAX_T0(void)
1773{
1774 int den, r;
1775 int64_t num, q;
1776
1777 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1778 den = T0;
1779 if (den == 0) {
1780 raise_exception(EXCP00_DIVZ);
1781 }
1782#ifdef BUGGY_GCC_DIV64
1783 r = idiv32(&q, num, den);
1784#else
1785 q = (num / den);
1786 r = (num % den);
1787#endif
1788 if (q != (int32_t)q)
1789 raise_exception(EXCP00_DIVZ);
1790 EAX = (uint32_t)q;
1791 EDX = (uint32_t)r;
1792}
1793
1794void helper_cmpxchg8b(void)
1795{
1796 uint64_t d;
1797 int eflags;
1798
1799 eflags = cc_table[CC_OP].compute_all();
1800 d = ldq(A0);
1801 if (d == (((uint64_t)EDX << 32) | EAX)) {
1802 stq(A0, ((uint64_t)ECX << 32) | EBX);
1803 eflags |= CC_Z;
1804 } else {
1805 /* always do the store */
1806 stq(A0, d);
1807 EDX = (uint32_t)(d >> 32);
1808 EAX = (uint32_t)d;
1809 eflags &= ~CC_Z;
1810 }
1811 CC_SRC = eflags;
1812}
1813
1814void helper_single_step()
1815{
1816 env->dr[6] |= 0x4000;
1817 raise_exception(EXCP01_SSTP);
1818}
1819
1820void helper_cpuid(void)
1821{
1822#ifndef VBOX
1823 uint32_t index;
1824 index = (uint32_t)EAX;
1825
1826 /* test if maximum index reached */
1827 if (index & 0x80000000) {
1828 if (index > env->cpuid_xlevel)
1829 index = env->cpuid_level;
1830 } else {
1831 if (index > env->cpuid_level)
1832 index = env->cpuid_level;
1833 }
1834
1835 switch(index) {
1836 case 0:
1837 EAX = env->cpuid_level;
1838 EBX = env->cpuid_vendor1;
1839 EDX = env->cpuid_vendor2;
1840 ECX = env->cpuid_vendor3;
1841 break;
1842 case 1:
1843 EAX = env->cpuid_version;
1844 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1845 ECX = env->cpuid_ext_features;
1846 EDX = env->cpuid_features;
1847 break;
1848 case 2:
1849 /* cache info: needed for Pentium Pro compatibility */
1850 EAX = 0x410601;
1851 EBX = 0;
1852 ECX = 0;
1853 EDX = 0;
1854 break;
1855 case 0x80000000:
1856 EAX = env->cpuid_xlevel;
1857 EBX = env->cpuid_vendor1;
1858 EDX = env->cpuid_vendor2;
1859 ECX = env->cpuid_vendor3;
1860 break;
1861 case 0x80000001:
1862 EAX = env->cpuid_features;
1863 EBX = 0;
1864 ECX = 0;
1865 EDX = env->cpuid_ext2_features;
1866 break;
1867 case 0x80000002:
1868 case 0x80000003:
1869 case 0x80000004:
1870 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1871 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1872 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1873 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1874 break;
1875 case 0x80000005:
1876 /* cache info (L1 cache) */
1877 EAX = 0x01ff01ff;
1878 EBX = 0x01ff01ff;
1879 ECX = 0x40020140;
1880 EDX = 0x40020140;
1881 break;
1882 case 0x80000006:
1883 /* cache info (L2 cache) */
1884 EAX = 0;
1885 EBX = 0x42004200;
1886 ECX = 0x02008140;
1887 EDX = 0;
1888 break;
1889 case 0x80000008:
1890 /* virtual & phys address size in low 2 bytes. */
1891 EAX = 0x00003028;
1892 EBX = 0;
1893 ECX = 0;
1894 EDX = 0;
1895 break;
1896 default:
1897 /* reserved values: zero */
1898 EAX = 0;
1899 EBX = 0;
1900 ECX = 0;
1901 EDX = 0;
1902 break;
1903 }
1904#else /* VBOX */
1905 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1906#endif /* VBOX */
1907}
1908
1909void helper_enter_level(int level, int data32)
1910{
1911 target_ulong ssp;
1912 uint32_t esp_mask, esp, ebp;
1913
1914 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1915 ssp = env->segs[R_SS].base;
1916 ebp = EBP;
1917 esp = ESP;
1918 if (data32) {
1919 /* 32 bit */
1920 esp -= 4;
1921 while (--level) {
1922 esp -= 4;
1923 ebp -= 4;
1924 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1925 }
1926 esp -= 4;
1927 stl(ssp + (esp & esp_mask), T1);
1928 } else {
1929 /* 16 bit */
1930 esp -= 2;
1931 while (--level) {
1932 esp -= 2;
1933 ebp -= 2;
1934 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1935 }
1936 esp -= 2;
1937 stw(ssp + (esp & esp_mask), T1);
1938 }
1939}
1940
1941#ifdef TARGET_X86_64
1942void helper_enter64_level(int level, int data64)
1943{
1944 target_ulong esp, ebp;
1945 ebp = EBP;
1946 esp = ESP;
1947
1948 if (data64) {
1949 /* 64 bit */
1950 esp -= 8;
1951 while (--level) {
1952 esp -= 8;
1953 ebp -= 8;
1954 stq(esp, ldq(ebp));
1955 }
1956 esp -= 8;
1957 stq(esp, T1);
1958 } else {
1959 /* 16 bit */
1960 esp -= 2;
1961 while (--level) {
1962 esp -= 2;
1963 ebp -= 2;
1964 stw(esp, lduw(ebp));
1965 }
1966 esp -= 2;
1967 stw(esp, T1);
1968 }
1969}
1970#endif
1971
1972void helper_lldt_T0(void)
1973{
1974 int selector;
1975 SegmentCache *dt;
1976 uint32_t e1, e2;
1977 int index, entry_limit;
1978 target_ulong ptr;
1979#ifdef VBOX
1980 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n",
1981 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1982#endif
1983
1984 selector = T0 & 0xffff;
1985 if ((selector & 0xfffc) == 0) {
1986 /* XXX: NULL selector case: invalid LDT */
1987 env->ldt.base = 0;
1988 env->ldt.limit = 0;
1989 } else {
1990 if (selector & 0x4)
1991 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1992 dt = &env->gdt;
1993 index = selector & ~7;
1994#ifdef TARGET_X86_64
1995 if (env->hflags & HF_LMA_MASK)
1996 entry_limit = 15;
1997 else
1998#endif
1999 entry_limit = 7;
2000 if ((index + entry_limit) > dt->limit)
2001 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2002 ptr = dt->base + index;
2003 e1 = ldl_kernel(ptr);
2004 e2 = ldl_kernel(ptr + 4);
2005 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2006 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2007 if (!(e2 & DESC_P_MASK))
2008 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2009#ifdef TARGET_X86_64
2010 if (env->hflags & HF_LMA_MASK) {
2011 uint32_t e3;
2012 e3 = ldl_kernel(ptr + 8);
2013 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2014 env->ldt.base |= (target_ulong)e3 << 32;
2015 } else
2016#endif
2017 {
2018 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2019 }
2020 }
2021 env->ldt.selector = selector;
2022#ifdef VBOX
2023 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n",
2024 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2025#endif
2026}
2027
2028void helper_ltr_T0(void)
2029{
2030 int selector;
2031 SegmentCache *dt;
2032 uint32_t e1, e2;
2033 int index, type, entry_limit;
2034 target_ulong ptr;
2035
2036#ifdef VBOX
2037 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2038 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2039 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2040#endif
2041
2042 selector = T0 & 0xffff;
2043 if ((selector & 0xfffc) == 0) {
2044 /* NULL selector case: invalid TR */
2045 env->tr.base = 0;
2046 env->tr.limit = 0;
2047 env->tr.flags = 0;
2048 } else {
2049 if (selector & 0x4)
2050 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2051 dt = &env->gdt;
2052 index = selector & ~7;
2053#ifdef TARGET_X86_64
2054 if (env->hflags & HF_LMA_MASK)
2055 entry_limit = 15;
2056 else
2057#endif
2058 entry_limit = 7;
2059 if ((index + entry_limit) > dt->limit)
2060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2061 ptr = dt->base + index;
2062 e1 = ldl_kernel(ptr);
2063 e2 = ldl_kernel(ptr + 4);
2064 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2065 if ((e2 & DESC_S_MASK) ||
2066 (type != 1 && type != 9))
2067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2068 if (!(e2 & DESC_P_MASK))
2069 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2070#ifdef TARGET_X86_64
2071 if (env->hflags & HF_LMA_MASK) {
2072 uint32_t e3;
2073 e3 = ldl_kernel(ptr + 8);
2074 load_seg_cache_raw_dt(&env->tr, e1, e2);
2075 env->tr.base |= (target_ulong)e3 << 32;
2076 } else
2077#endif
2078 {
2079 load_seg_cache_raw_dt(&env->tr, e1, e2);
2080 }
2081 e2 |= DESC_TSS_BUSY_MASK;
2082 stl_kernel(ptr + 4, e2);
2083 }
2084 env->tr.selector = selector;
2085#ifdef VBOX
2086 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2087 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2088 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2089#endif
2090}
2091
2092/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2093void load_seg(int seg_reg, int selector)
2094{
2095 uint32_t e1, e2;
2096 int cpl, dpl, rpl;
2097 SegmentCache *dt;
2098 int index;
2099 target_ulong ptr;
2100
2101 selector &= 0xffff;
2102 cpl = env->hflags & HF_CPL_MASK;
2103
2104#ifdef VBOX
2105 /* Trying to load a selector with CPL=1? */
2106 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2107 {
2108 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2109 selector = selector & 0xfffc;
2110 }
2111#endif
2112
2113 if ((selector & 0xfffc) == 0) {
2114 /* null selector case */
2115 if (seg_reg == R_SS
2116#ifdef TARGET_X86_64
2117 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2118#endif
2119 )
2120 raise_exception_err(EXCP0D_GPF, 0);
2121 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2122 } else {
2123
2124 if (selector & 0x4)
2125 dt = &env->ldt;
2126 else
2127 dt = &env->gdt;
2128 index = selector & ~7;
2129 if ((index + 7) > dt->limit)
2130 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131 ptr = dt->base + index;
2132 e1 = ldl_kernel(ptr);
2133 e2 = ldl_kernel(ptr + 4);
2134
2135 if (!(e2 & DESC_S_MASK))
2136 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2137 rpl = selector & 3;
2138 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2139 if (seg_reg == R_SS) {
2140 /* must be writable segment */
2141 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2142 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2143 if (rpl != cpl || dpl != cpl)
2144 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145 } else {
2146 /* must be readable segment */
2147 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2148 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2149
2150 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2151 /* if not conforming code, test rights */
2152 if (dpl < cpl || dpl < rpl)
2153 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154 }
2155 }
2156
2157 if (!(e2 & DESC_P_MASK)) {
2158 if (seg_reg == R_SS)
2159 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2160 else
2161 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2162 }
2163
2164 /* set the access bit if not already set */
2165 if (!(e2 & DESC_A_MASK)) {
2166 e2 |= DESC_A_MASK;
2167 stl_kernel(ptr + 4, e2);
2168 }
2169
2170 cpu_x86_load_seg_cache(env, seg_reg, selector,
2171 get_seg_base(e1, e2),
2172 get_seg_limit(e1, e2),
2173 e2);
2174#if 0
2175 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2176 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2177#endif
2178 }
2179}
2180
2181/* protected mode jump */
2182void helper_ljmp_protected_T0_T1(int next_eip_addend)
2183{
2184 int new_cs, gate_cs, type;
2185 uint32_t e1, e2, cpl, dpl, rpl, limit;
2186 target_ulong new_eip, next_eip;
2187
2188 new_cs = T0;
2189 new_eip = T1;
2190 if ((new_cs & 0xfffc) == 0)
2191 raise_exception_err(EXCP0D_GPF, 0);
2192 if (load_segment(&e1, &e2, new_cs) != 0)
2193 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2194 cpl = env->hflags & HF_CPL_MASK;
2195 if (e2 & DESC_S_MASK) {
2196 if (!(e2 & DESC_CS_MASK))
2197 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2198 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2199 if (e2 & DESC_C_MASK) {
2200 /* conforming code segment */
2201 if (dpl > cpl)
2202 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2203 } else {
2204 /* non conforming code segment */
2205 rpl = new_cs & 3;
2206 if (rpl > cpl)
2207 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208 if (dpl != cpl)
2209 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2210 }
2211 if (!(e2 & DESC_P_MASK))
2212 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2213 limit = get_seg_limit(e1, e2);
2214 if (new_eip > limit &&
2215 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2218 get_seg_base(e1, e2), limit, e2);
2219 EIP = new_eip;
2220 } else {
2221 /* jump to call or task gate */
2222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2223 rpl = new_cs & 3;
2224 cpl = env->hflags & HF_CPL_MASK;
2225 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2226 switch(type) {
2227 case 1: /* 286 TSS */
2228 case 9: /* 386 TSS */
2229 case 5: /* task gate */
2230 if (dpl < cpl || dpl < rpl)
2231 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2232 next_eip = env->eip + next_eip_addend;
2233 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2234 CC_OP = CC_OP_EFLAGS;
2235 break;
2236 case 4: /* 286 call gate */
2237 case 12: /* 386 call gate */
2238 if ((dpl < cpl) || (dpl < rpl))
2239 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2240 if (!(e2 & DESC_P_MASK))
2241 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2242 gate_cs = e1 >> 16;
2243 new_eip = (e1 & 0xffff);
2244 if (type == 12)
2245 new_eip |= (e2 & 0xffff0000);
2246 if (load_segment(&e1, &e2, gate_cs) != 0)
2247 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2248 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2249 /* must be code segment */
2250 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2251 (DESC_S_MASK | DESC_CS_MASK)))
2252 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2253 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2254 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2255 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2256 if (!(e2 & DESC_P_MASK))
2257#ifdef VBOX /* See page 3-514 of 253666.pdf */
2258 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2259#else
2260 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261#endif
2262 limit = get_seg_limit(e1, e2);
2263 if (new_eip > limit)
2264 raise_exception_err(EXCP0D_GPF, 0);
2265 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2266 get_seg_base(e1, e2), limit, e2);
2267 EIP = new_eip;
2268 break;
2269 default:
2270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271 break;
2272 }
2273 }
2274}
2275
2276/* real mode call */
2277void helper_lcall_real_T0_T1(int shift, int next_eip)
2278{
2279 int new_cs, new_eip;
2280 uint32_t esp, esp_mask;
2281 target_ulong ssp;
2282
2283 new_cs = T0;
2284 new_eip = T1;
2285 esp = ESP;
2286 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2287 ssp = env->segs[R_SS].base;
2288 if (shift) {
2289 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2290 PUSHL(ssp, esp, esp_mask, next_eip);
2291 } else {
2292 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2293 PUSHW(ssp, esp, esp_mask, next_eip);
2294 }
2295
2296 SET_ESP(esp, esp_mask);
2297 env->eip = new_eip;
2298 env->segs[R_CS].selector = new_cs;
2299 env->segs[R_CS].base = (new_cs << 4);
2300}
2301
2302/* protected mode call */
2303void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2304{
2305 int new_cs, new_stack, i;
2306 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2307 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2308 uint32_t val, limit, old_sp_mask;
2309 target_ulong ssp, old_ssp, next_eip, new_eip;
2310
2311 new_cs = T0;
2312 new_eip = T1;
2313 next_eip = env->eip + next_eip_addend;
2314#ifdef DEBUG_PCALL
2315 if (loglevel & CPU_LOG_PCALL) {
2316 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2317 new_cs, (uint32_t)new_eip, shift);
2318 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2319 }
2320#endif
2321 if ((new_cs & 0xfffc) == 0)
2322 raise_exception_err(EXCP0D_GPF, 0);
2323 if (load_segment(&e1, &e2, new_cs) != 0)
2324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325 cpl = env->hflags & HF_CPL_MASK;
2326#ifdef DEBUG_PCALL
2327 if (loglevel & CPU_LOG_PCALL) {
2328 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2329 }
2330#endif
2331 if (e2 & DESC_S_MASK) {
2332 if (!(e2 & DESC_CS_MASK))
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335 if (e2 & DESC_C_MASK) {
2336 /* conforming code segment */
2337 if (dpl > cpl)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 } else {
2340 /* non conforming code segment */
2341 rpl = new_cs & 3;
2342 if (rpl > cpl)
2343 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344 if (dpl != cpl)
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 }
2347 if (!(e2 & DESC_P_MASK))
2348 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349
2350#ifdef TARGET_X86_64
2351 /* XXX: check 16/32 bit cases in long mode */
2352 if (shift == 2) {
2353 target_ulong rsp;
2354 /* 64 bit case */
2355 rsp = ESP;
2356 PUSHQ(rsp, env->segs[R_CS].selector);
2357 PUSHQ(rsp, next_eip);
2358 /* from this point, not restartable */
2359 ESP = rsp;
2360 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361 get_seg_base(e1, e2),
2362 get_seg_limit(e1, e2), e2);
2363 EIP = new_eip;
2364 } else
2365#endif
2366 {
2367 sp = ESP;
2368 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369 ssp = env->segs[R_SS].base;
2370 if (shift) {
2371 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372 PUSHL(ssp, sp, sp_mask, next_eip);
2373 } else {
2374 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375 PUSHW(ssp, sp, sp_mask, next_eip);
2376 }
2377
2378 limit = get_seg_limit(e1, e2);
2379 if (new_eip > limit)
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 /* from this point, not restartable */
2382 SET_ESP(sp, sp_mask);
2383 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384 get_seg_base(e1, e2), limit, e2);
2385 EIP = new_eip;
2386 }
2387 } else {
2388 /* check gate type */
2389 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391 rpl = new_cs & 3;
2392 switch(type) {
2393 case 1: /* available 286 TSS */
2394 case 9: /* available 386 TSS */
2395 case 5: /* task gate */
2396 if (dpl < cpl || dpl < rpl)
2397 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399 CC_OP = CC_OP_EFLAGS;
2400 return;
2401 case 4: /* 286 call gate */
2402 case 12: /* 386 call gate */
2403 break;
2404 default:
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406 break;
2407 }
2408 shift = type >> 3;
2409
2410 if (dpl < cpl || dpl < rpl)
2411 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412 /* check valid bit */
2413 if (!(e2 & DESC_P_MASK))
2414 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2415 selector = e1 >> 16;
2416 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417 param_count = e2 & 0x1f;
2418 if ((selector & 0xfffc) == 0)
2419 raise_exception_err(EXCP0D_GPF, 0);
2420
2421 if (load_segment(&e1, &e2, selector) != 0)
2422 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426 if (dpl > cpl)
2427 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428 if (!(e2 & DESC_P_MASK))
2429 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430
2431 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432 /* to inner priviledge */
2433 get_ss_esp_from_tss(&ss, &sp, dpl);
2434#ifdef DEBUG_PCALL
2435 if (loglevel & CPU_LOG_PCALL)
2436 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2437 ss, sp, param_count, ESP);
2438#endif
2439 if ((ss & 0xfffc) == 0)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if ((ss & 3) != dpl)
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2446 if (ss_dpl != dpl)
2447 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2448 if (!(ss_e2 & DESC_S_MASK) ||
2449 (ss_e2 & DESC_CS_MASK) ||
2450 !(ss_e2 & DESC_W_MASK))
2451 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2452 if (!(ss_e2 & DESC_P_MASK))
2453#ifdef VBOX /* See page 3-99 of 253666.pdf */
2454 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2455#else
2456 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2457#endif
2458
2459 // push_size = ((param_count * 2) + 8) << shift;
2460
2461 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2462 old_ssp = env->segs[R_SS].base;
2463
2464 sp_mask = get_sp_mask(ss_e2);
2465 ssp = get_seg_base(ss_e1, ss_e2);
2466 if (shift) {
2467 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468 PUSHL(ssp, sp, sp_mask, ESP);
2469 for(i = param_count - 1; i >= 0; i--) {
2470 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2471 PUSHL(ssp, sp, sp_mask, val);
2472 }
2473 } else {
2474 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2475 PUSHW(ssp, sp, sp_mask, ESP);
2476 for(i = param_count - 1; i >= 0; i--) {
2477 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2478 PUSHW(ssp, sp, sp_mask, val);
2479 }
2480 }
2481 new_stack = 1;
2482 } else {
2483 /* to same priviledge */
2484 sp = ESP;
2485 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2486 ssp = env->segs[R_SS].base;
2487 // push_size = (4 << shift);
2488 new_stack = 0;
2489 }
2490
2491 if (shift) {
2492 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2493 PUSHL(ssp, sp, sp_mask, next_eip);
2494 } else {
2495 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2496 PUSHW(ssp, sp, sp_mask, next_eip);
2497 }
2498
2499 /* from this point, not restartable */
2500
2501 if (new_stack) {
2502 ss = (ss & ~3) | dpl;
2503 cpu_x86_load_seg_cache(env, R_SS, ss,
2504 ssp,
2505 get_seg_limit(ss_e1, ss_e2),
2506 ss_e2);
2507 }
2508
2509 selector = (selector & ~3) | dpl;
2510 cpu_x86_load_seg_cache(env, R_CS, selector,
2511 get_seg_base(e1, e2),
2512 get_seg_limit(e1, e2),
2513 e2);
2514 cpu_x86_set_cpl(env, dpl);
2515 SET_ESP(sp, sp_mask);
2516 EIP = offset;
2517 }
2518#ifdef USE_KQEMU
2519 if (kqemu_is_ok(env)) {
2520 env->exception_index = -1;
2521 cpu_loop_exit();
2522 }
2523#endif
2524}
2525
2526/* real and vm86 mode iret */
2527void helper_iret_real(int shift)
2528{
2529 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2530 target_ulong ssp;
2531 int eflags_mask;
2532#ifdef VBOX
2533 bool fVME = false;
2534
2535 remR3TrapClear(env->pVM);
2536#endif /* VBOX */
2537
2538 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2539 sp = ESP;
2540 ssp = env->segs[R_SS].base;
2541 if (shift == 1) {
2542 /* 32 bits */
2543 POPL(ssp, sp, sp_mask, new_eip);
2544 POPL(ssp, sp, sp_mask, new_cs);
2545 new_cs &= 0xffff;
2546 POPL(ssp, sp, sp_mask, new_eflags);
2547 } else {
2548 /* 16 bits */
2549 POPW(ssp, sp, sp_mask, new_eip);
2550 POPW(ssp, sp, sp_mask, new_cs);
2551 POPW(ssp, sp, sp_mask, new_eflags);
2552 }
2553#ifdef VBOX
2554 if ( (env->eflags & VM_MASK)
2555 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2556 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2557 {
2558 fVME = true;
2559 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2560 /* if TF will be set -> #GP */
2561 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2562 || (new_eflags & TF_MASK))
2563 raise_exception(EXCP0D_GPF);
2564 }
2565#endif /* VBOX */
2566
2567 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2568 load_seg_vm(R_CS, new_cs);
2569 env->eip = new_eip;
2570#ifdef VBOX
2571 if (fVME)
2572 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2573 else
2574#endif
2575 if (env->eflags & VM_MASK)
2576 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2577 else
2578 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2579 if (shift == 0)
2580 eflags_mask &= 0xffff;
2581 load_eflags(new_eflags, eflags_mask);
2582
2583#ifdef VBOX
2584 if (fVME)
2585 {
2586 if (new_eflags & IF_MASK)
2587 env->eflags |= VIF_MASK;
2588 else
2589 env->eflags &= ~VIF_MASK;
2590 }
2591#endif /* VBOX */
2592}
2593
2594static inline void validate_seg(int seg_reg, int cpl)
2595{
2596 int dpl;
2597 uint32_t e2;
2598
2599 /* XXX: on x86_64, we do not want to nullify FS and GS because
2600 they may still contain a valid base. I would be interested to
2601 know how a real x86_64 CPU behaves */
2602 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2603 (env->segs[seg_reg].selector & 0xfffc) == 0)
2604 return;
2605
2606 e2 = env->segs[seg_reg].flags;
2607 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2608 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2609 /* data or non conforming code segment */
2610 if (dpl < cpl) {
2611 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2612 }
2613 }
2614}
2615
2616/* protected mode iret */
2617static inline void helper_ret_protected(int shift, int is_iret, int addend)
2618{
2619 uint32_t new_cs, new_eflags, new_ss;
2620 uint32_t new_es, new_ds, new_fs, new_gs;
2621 uint32_t e1, e2, ss_e1, ss_e2;
2622 int cpl, dpl, rpl, eflags_mask, iopl;
2623 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2624
2625#ifdef TARGET_X86_64
2626 if (shift == 2)
2627 sp_mask = -1;
2628 else
2629#endif
2630 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2631 sp = ESP;
2632 ssp = env->segs[R_SS].base;
2633 new_eflags = 0; /* avoid warning */
2634#ifdef TARGET_X86_64
2635 if (shift == 2) {
2636 POPQ(sp, new_eip);
2637 POPQ(sp, new_cs);
2638 new_cs &= 0xffff;
2639 if (is_iret) {
2640 POPQ(sp, new_eflags);
2641 }
2642 } else
2643#endif
2644 if (shift == 1) {
2645 /* 32 bits */
2646 POPL(ssp, sp, sp_mask, new_eip);
2647 POPL(ssp, sp, sp_mask, new_cs);
2648 new_cs &= 0xffff;
2649 if (is_iret) {
2650 POPL(ssp, sp, sp_mask, new_eflags);
2651#if defined(VBOX) && defined(DEBUG)
2652 printf("iret: new CS %04X\n", new_cs);
2653 printf("iret: new EIP %08X\n", new_eip);
2654 printf("iret: new EFLAGS %08X\n", new_eflags);
2655 printf("iret: EAX=%08x\n", EAX);
2656#endif
2657
2658 if (new_eflags & VM_MASK)
2659 goto return_to_vm86;
2660 }
2661#ifdef VBOX
2662 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2663 {
2664#ifdef DEBUG
2665 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2666#endif
2667 new_cs = new_cs & 0xfffc;
2668 }
2669#endif
2670 } else {
2671 /* 16 bits */
2672 POPW(ssp, sp, sp_mask, new_eip);
2673 POPW(ssp, sp, sp_mask, new_cs);
2674 if (is_iret)
2675 POPW(ssp, sp, sp_mask, new_eflags);
2676 }
2677#ifdef DEBUG_PCALL
2678 if (loglevel & CPU_LOG_PCALL) {
2679 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2680 new_cs, new_eip, shift, addend);
2681 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2682 }
2683#endif
2684 if ((new_cs & 0xfffc) == 0)
2685 {
2686#if defined(VBOX) && defined(DEBUG)
2687 printf("new_cs & 0xfffc) == 0\n");
2688#endif
2689 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690 }
2691 if (load_segment(&e1, &e2, new_cs) != 0)
2692 {
2693#if defined(VBOX) && defined(DEBUG)
2694 printf("load_segment failed\n");
2695#endif
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 }
2698 if (!(e2 & DESC_S_MASK) ||
2699 !(e2 & DESC_CS_MASK))
2700 {
2701#if defined(VBOX) && defined(DEBUG)
2702 printf("e2 mask %08x\n", e2);
2703#endif
2704 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2705 }
2706 cpl = env->hflags & HF_CPL_MASK;
2707 rpl = new_cs & 3;
2708 if (rpl < cpl)
2709 {
2710#if defined(VBOX) && defined(DEBUG)
2711 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2712#endif
2713 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2714 }
2715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2716 if (e2 & DESC_C_MASK) {
2717 if (dpl > rpl)
2718 {
2719#if defined(VBOX) && defined(DEBUG)
2720 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2721#endif
2722 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2723 }
2724 } else {
2725 if (dpl != rpl)
2726 {
2727#if defined(VBOX) && defined(DEBUG)
2728 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2729#endif
2730 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2731 }
2732 }
2733 if (!(e2 & DESC_P_MASK))
2734 {
2735#if defined(VBOX) && defined(DEBUG)
2736 printf("DESC_P_MASK e2=%08x\n", e2);
2737#endif
2738 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2739 }
2740 sp += addend;
2741 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2742 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2743 /* return to same priledge level */
2744 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2745 get_seg_base(e1, e2),
2746 get_seg_limit(e1, e2),
2747 e2);
2748 } else {
2749 /* return to different priviledge level */
2750#ifdef TARGET_X86_64
2751 if (shift == 2) {
2752 POPQ(sp, new_esp);
2753 POPQ(sp, new_ss);
2754 new_ss &= 0xffff;
2755 } else
2756#endif
2757 if (shift == 1) {
2758 /* 32 bits */
2759 POPL(ssp, sp, sp_mask, new_esp);
2760 POPL(ssp, sp, sp_mask, new_ss);
2761 new_ss &= 0xffff;
2762 } else {
2763 /* 16 bits */
2764 POPW(ssp, sp, sp_mask, new_esp);
2765 POPW(ssp, sp, sp_mask, new_ss);
2766 }
2767#ifdef DEBUG_PCALL
2768 if (loglevel & CPU_LOG_PCALL) {
2769 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2770 new_ss, new_esp);
2771 }
2772#endif
2773 if ((new_ss & 0xfffc) == 0) {
2774#ifdef TARGET_X86_64
2775 /* NULL ss is allowed in long mode if cpl != 3*/
2776 /* XXX: test CS64 ? */
2777 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2778 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2779 0, 0xffffffff,
2780 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2781 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2782 DESC_W_MASK | DESC_A_MASK);
2783 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2784 } else
2785#endif
2786 {
2787 raise_exception_err(EXCP0D_GPF, 0);
2788 }
2789 } else {
2790 if ((new_ss & 3) != rpl)
2791 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2792 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2793 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2794 if (!(ss_e2 & DESC_S_MASK) ||
2795 (ss_e2 & DESC_CS_MASK) ||
2796 !(ss_e2 & DESC_W_MASK))
2797 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2798 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2799 if (dpl != rpl)
2800 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2801 if (!(ss_e2 & DESC_P_MASK))
2802 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2803 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2804 get_seg_base(ss_e1, ss_e2),
2805 get_seg_limit(ss_e1, ss_e2),
2806 ss_e2);
2807 }
2808
2809 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2810 get_seg_base(e1, e2),
2811 get_seg_limit(e1, e2),
2812 e2);
2813 cpu_x86_set_cpl(env, rpl);
2814 sp = new_esp;
2815#ifdef TARGET_X86_64
2816 if (env->hflags & HF_CS64_MASK)
2817 sp_mask = -1;
2818 else
2819#endif
2820 sp_mask = get_sp_mask(ss_e2);
2821
2822 /* validate data segments */
2823 validate_seg(R_ES, rpl);
2824 validate_seg(R_DS, rpl);
2825 validate_seg(R_FS, rpl);
2826 validate_seg(R_GS, rpl);
2827
2828 sp += addend;
2829 }
2830 SET_ESP(sp, sp_mask);
2831 env->eip = new_eip;
2832 if (is_iret) {
2833 /* NOTE: 'cpl' is the _old_ CPL */
2834 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2835 if (cpl == 0)
2836#ifdef VBOX
2837 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2838#else
2839 eflags_mask |= IOPL_MASK;
2840#endif
2841 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2842 if (cpl <= iopl)
2843 eflags_mask |= IF_MASK;
2844 if (shift == 0)
2845 eflags_mask &= 0xffff;
2846 load_eflags(new_eflags, eflags_mask);
2847 }
2848 return;
2849
2850 return_to_vm86:
2851
2852#if 0 // defined(VBOX) && defined(DEBUG)
2853 printf("V86: new CS %04X\n", new_cs);
2854 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2855 printf("V86: new EIP %08X\n", new_eip);
2856 printf("V86: new EFLAGS %08X\n", new_eflags);
2857#endif
2858
2859 POPL(ssp, sp, sp_mask, new_esp);
2860 POPL(ssp, sp, sp_mask, new_ss);
2861 POPL(ssp, sp, sp_mask, new_es);
2862 POPL(ssp, sp, sp_mask, new_ds);
2863 POPL(ssp, sp, sp_mask, new_fs);
2864 POPL(ssp, sp, sp_mask, new_gs);
2865
2866 /* modify processor state */
2867 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2868 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2869 load_seg_vm(R_CS, new_cs & 0xffff);
2870 cpu_x86_set_cpl(env, 3);
2871 load_seg_vm(R_SS, new_ss & 0xffff);
2872 load_seg_vm(R_ES, new_es & 0xffff);
2873 load_seg_vm(R_DS, new_ds & 0xffff);
2874 load_seg_vm(R_FS, new_fs & 0xffff);
2875 load_seg_vm(R_GS, new_gs & 0xffff);
2876
2877 env->eip = new_eip & 0xffff;
2878 ESP = new_esp;
2879}
2880
2881void helper_iret_protected(int shift, int next_eip)
2882{
2883 int tss_selector, type;
2884 uint32_t e1, e2;
2885
2886#ifdef VBOX
2887 remR3TrapClear(env->pVM);
2888#endif
2889
2890 /* specific case for TSS */
2891 if (env->eflags & NT_MASK) {
2892#ifdef TARGET_X86_64
2893 if (env->hflags & HF_LMA_MASK)
2894 raise_exception_err(EXCP0D_GPF, 0);
2895#endif
2896 tss_selector = lduw_kernel(env->tr.base + 0);
2897 if (tss_selector & 4)
2898 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2899 if (load_segment(&e1, &e2, tss_selector) != 0)
2900 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2901 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2902 /* NOTE: we check both segment and busy TSS */
2903 if (type != 3)
2904 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2905 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2906 } else {
2907 helper_ret_protected(shift, 1, 0);
2908 }
2909#ifdef USE_KQEMU
2910 if (kqemu_is_ok(env)) {
2911 CC_OP = CC_OP_EFLAGS;
2912 env->exception_index = -1;
2913 cpu_loop_exit();
2914 }
2915#endif
2916}
2917
2918void helper_lret_protected(int shift, int addend)
2919{
2920 helper_ret_protected(shift, 0, addend);
2921#ifdef USE_KQEMU
2922 if (kqemu_is_ok(env)) {
2923 env->exception_index = -1;
2924 cpu_loop_exit();
2925 }
2926#endif
2927}
2928
2929void helper_sysenter(void)
2930{
2931 if (env->sysenter_cs == 0) {
2932 raise_exception_err(EXCP0D_GPF, 0);
2933 }
2934 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2935 cpu_x86_set_cpl(env, 0);
2936 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2937 0, 0xffffffff,
2938 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2939 DESC_S_MASK |
2940 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2941 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2942 0, 0xffffffff,
2943 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2944 DESC_S_MASK |
2945 DESC_W_MASK | DESC_A_MASK);
2946 ESP = env->sysenter_esp;
2947 EIP = env->sysenter_eip;
2948}
2949
2950void helper_sysexit(void)
2951{
2952 int cpl;
2953
2954 cpl = env->hflags & HF_CPL_MASK;
2955 if (env->sysenter_cs == 0 || cpl != 0) {
2956 raise_exception_err(EXCP0D_GPF, 0);
2957 }
2958 cpu_x86_set_cpl(env, 3);
2959 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2960 0, 0xffffffff,
2961 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2962 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2963 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2964 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2965 0, 0xffffffff,
2966 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2967 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2968 DESC_W_MASK | DESC_A_MASK);
2969 ESP = ECX;
2970 EIP = EDX;
2971#ifdef USE_KQEMU
2972 if (kqemu_is_ok(env)) {
2973 env->exception_index = -1;
2974 cpu_loop_exit();
2975 }
2976#endif
2977}
2978
2979void helper_movl_crN_T0(int reg)
2980{
2981#if !defined(CONFIG_USER_ONLY)
2982 switch(reg) {
2983 case 0:
2984 cpu_x86_update_cr0(env, T0);
2985 break;
2986 case 3:
2987 cpu_x86_update_cr3(env, T0);
2988 break;
2989 case 4:
2990 cpu_x86_update_cr4(env, T0);
2991 break;
2992 case 8:
2993 cpu_set_apic_tpr(env, T0);
2994 break;
2995 default:
2996 env->cr[reg] = T0;
2997 break;
2998 }
2999#endif
3000}
3001
3002/* XXX: do more */
3003void helper_movl_drN_T0(int reg)
3004{
3005 env->dr[reg] = T0;
3006}
3007
3008void helper_invlpg(target_ulong addr)
3009{
3010 cpu_x86_flush_tlb(env, addr);
3011}
3012
3013void helper_rdtsc(void)
3014{
3015 uint64_t val;
3016
3017 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3018 raise_exception(EXCP0D_GPF);
3019 }
3020 val = cpu_get_tsc(env);
3021 EAX = (uint32_t)(val);
3022 EDX = (uint32_t)(val >> 32);
3023}
3024
3025#if defined(CONFIG_USER_ONLY)
3026void helper_wrmsr(void)
3027{
3028}
3029
3030void helper_rdmsr(void)
3031{
3032}
3033#else
3034void helper_wrmsr(void)
3035{
3036 uint64_t val;
3037
3038 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3039
3040 switch((uint32_t)ECX) {
3041 case MSR_IA32_SYSENTER_CS:
3042 env->sysenter_cs = val & 0xffff;
3043 break;
3044 case MSR_IA32_SYSENTER_ESP:
3045 env->sysenter_esp = val;
3046 break;
3047 case MSR_IA32_SYSENTER_EIP:
3048 env->sysenter_eip = val;
3049 break;
3050 case MSR_IA32_APICBASE:
3051 cpu_set_apic_base(env, val);
3052 break;
3053 case MSR_EFER:
3054 {
3055 uint64_t update_mask;
3056 update_mask = 0;
3057 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3058 update_mask |= MSR_EFER_SCE;
3059 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3060 update_mask |= MSR_EFER_LME;
3061 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3062 update_mask |= MSR_EFER_FFXSR;
3063 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3064 update_mask |= MSR_EFER_NXE;
3065 env->efer = (env->efer & ~update_mask) |
3066 (val & update_mask);
3067 }
3068 break;
3069 case MSR_STAR:
3070 env->star = val;
3071 break;
3072 case MSR_PAT:
3073 env->pat = val;
3074 break;
3075#ifdef TARGET_X86_64
3076 case MSR_LSTAR:
3077 env->lstar = val;
3078 break;
3079 case MSR_CSTAR:
3080 env->cstar = val;
3081 break;
3082 case MSR_FMASK:
3083 env->fmask = val;
3084 break;
3085 case MSR_FSBASE:
3086 env->segs[R_FS].base = val;
3087 break;
3088 case MSR_GSBASE:
3089 env->segs[R_GS].base = val;
3090 break;
3091 case MSR_KERNELGSBASE:
3092 env->kernelgsbase = val;
3093 break;
3094#endif
3095 default:
3096#ifndef VBOX
3097 /* XXX: exception ? */
3098 break;
3099#else /* VBOX */
3100 {
3101 uint32_t ecx = (uint32_t)ECX;
3102 /* In X2APIC specification this range is reserved for APIC control. */
3103 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3104 cpu_apic_wrmsr(env, ecx, val);
3105 /** @todo else exception? */
3106 break;
3107 }
3108#endif /* VBOX */
3109 }
3110}
3111
3112void helper_rdmsr(void)
3113{
3114 uint64_t val;
3115 switch((uint32_t)ECX) {
3116 case MSR_IA32_SYSENTER_CS:
3117 val = env->sysenter_cs;
3118 break;
3119 case MSR_IA32_SYSENTER_ESP:
3120 val = env->sysenter_esp;
3121 break;
3122 case MSR_IA32_SYSENTER_EIP:
3123 val = env->sysenter_eip;
3124 break;
3125 case MSR_IA32_APICBASE:
3126 val = cpu_get_apic_base(env);
3127 break;
3128 case MSR_EFER:
3129 val = env->efer;
3130 break;
3131 case MSR_STAR:
3132 val = env->star;
3133 break;
3134 case MSR_PAT:
3135 val = env->pat;
3136 break;
3137#ifdef TARGET_X86_64
3138 case MSR_LSTAR:
3139 val = env->lstar;
3140 break;
3141 case MSR_CSTAR:
3142 val = env->cstar;
3143 break;
3144 case MSR_FMASK:
3145 val = env->fmask;
3146 break;
3147 case MSR_FSBASE:
3148 val = env->segs[R_FS].base;
3149 break;
3150 case MSR_GSBASE:
3151 val = env->segs[R_GS].base;
3152 break;
3153 case MSR_KERNELGSBASE:
3154 val = env->kernelgsbase;
3155 break;
3156#endif
3157 default:
3158#ifndef VBOX
3159 /* XXX: exception ? */
3160 val = 0;
3161 break;
3162#else /* VBOX */
3163 {
3164 uint32_t ecx = (uint32_t)ECX;
3165 /* In X2APIC specification this range is reserved for APIC control. */
3166 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3167 val = cpu_apic_rdmsr(env, ecx);
3168 else
3169 val = 0; /** @todo else exception? */
3170 break;
3171 }
3172#endif /* VBOX */
3173 }
3174 EAX = (uint32_t)(val);
3175 EDX = (uint32_t)(val >> 32);
3176}
3177#endif
3178
3179void helper_lsl(void)
3180{
3181 unsigned int selector, limit;
3182 uint32_t e1, e2, eflags;
3183 int rpl, dpl, cpl, type;
3184
3185 eflags = cc_table[CC_OP].compute_all();
3186 selector = T0 & 0xffff;
3187 if (load_segment(&e1, &e2, selector) != 0)
3188 goto fail;
3189 rpl = selector & 3;
3190 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3191 cpl = env->hflags & HF_CPL_MASK;
3192 if (e2 & DESC_S_MASK) {
3193 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3194 /* conforming */
3195 } else {
3196 if (dpl < cpl || dpl < rpl)
3197 goto fail;
3198 }
3199 } else {
3200 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3201 switch(type) {
3202 case 1:
3203 case 2:
3204 case 3:
3205 case 9:
3206 case 11:
3207 break;
3208 default:
3209 goto fail;
3210 }
3211 if (dpl < cpl || dpl < rpl) {
3212 fail:
3213 CC_SRC = eflags & ~CC_Z;
3214 return;
3215 }
3216 }
3217 limit = get_seg_limit(e1, e2);
3218 T1 = limit;
3219 CC_SRC = eflags | CC_Z;
3220}
3221
3222void helper_lar(void)
3223{
3224 unsigned int selector;
3225 uint32_t e1, e2, eflags;
3226 int rpl, dpl, cpl, type;
3227
3228 eflags = cc_table[CC_OP].compute_all();
3229 selector = T0 & 0xffff;
3230 if ((selector & 0xfffc) == 0)
3231 goto fail;
3232 if (load_segment(&e1, &e2, selector) != 0)
3233 goto fail;
3234 rpl = selector & 3;
3235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3236 cpl = env->hflags & HF_CPL_MASK;
3237 if (e2 & DESC_S_MASK) {
3238 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3239 /* conforming */
3240 } else {
3241 if (dpl < cpl || dpl < rpl)
3242 goto fail;
3243 }
3244 } else {
3245 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3246 switch(type) {
3247 case 1:
3248 case 2:
3249 case 3:
3250 case 4:
3251 case 5:
3252 case 9:
3253 case 11:
3254 case 12:
3255 break;
3256 default:
3257 goto fail;
3258 }
3259 if (dpl < cpl || dpl < rpl) {
3260 fail:
3261 CC_SRC = eflags & ~CC_Z;
3262 return;
3263 }
3264 }
3265 T1 = e2 & 0x00f0ff00;
3266 CC_SRC = eflags | CC_Z;
3267}
3268
3269void helper_verr(void)
3270{
3271 unsigned int selector;
3272 uint32_t e1, e2, eflags;
3273 int rpl, dpl, cpl;
3274
3275 eflags = cc_table[CC_OP].compute_all();
3276 selector = T0 & 0xffff;
3277 if ((selector & 0xfffc) == 0)
3278 goto fail;
3279 if (load_segment(&e1, &e2, selector) != 0)
3280 goto fail;
3281 if (!(e2 & DESC_S_MASK))
3282 goto fail;
3283 rpl = selector & 3;
3284 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3285 cpl = env->hflags & HF_CPL_MASK;
3286 if (e2 & DESC_CS_MASK) {
3287 if (!(e2 & DESC_R_MASK))
3288 goto fail;
3289 if (!(e2 & DESC_C_MASK)) {
3290 if (dpl < cpl || dpl < rpl)
3291 goto fail;
3292 }
3293 } else {
3294 if (dpl < cpl || dpl < rpl) {
3295 fail:
3296 CC_SRC = eflags & ~CC_Z;
3297 return;
3298 }
3299 }
3300 CC_SRC = eflags | CC_Z;
3301}
3302
3303void helper_verw(void)
3304{
3305 unsigned int selector;
3306 uint32_t e1, e2, eflags;
3307 int rpl, dpl, cpl;
3308
3309 eflags = cc_table[CC_OP].compute_all();
3310 selector = T0 & 0xffff;
3311 if ((selector & 0xfffc) == 0)
3312 goto fail;
3313 if (load_segment(&e1, &e2, selector) != 0)
3314 goto fail;
3315 if (!(e2 & DESC_S_MASK))
3316 goto fail;
3317 rpl = selector & 3;
3318 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3319 cpl = env->hflags & HF_CPL_MASK;
3320 if (e2 & DESC_CS_MASK) {
3321 goto fail;
3322 } else {
3323 if (dpl < cpl || dpl < rpl)
3324 goto fail;
3325 if (!(e2 & DESC_W_MASK)) {
3326 fail:
3327 CC_SRC = eflags & ~CC_Z;
3328 return;
3329 }
3330 }
3331 CC_SRC = eflags | CC_Z;
3332}
3333
3334/* FPU helpers */
3335
3336void helper_fldt_ST0_A0(void)
3337{
3338 int new_fpstt;
3339 new_fpstt = (env->fpstt - 1) & 7;
3340 env->fpregs[new_fpstt].d = helper_fldt(A0);
3341 env->fpstt = new_fpstt;
3342 env->fptags[new_fpstt] = 0; /* validate stack entry */
3343}
3344
3345void helper_fstt_ST0_A0(void)
3346{
3347 helper_fstt(ST0, A0);
3348}
3349
3350void fpu_set_exception(int mask)
3351{
3352 env->fpus |= mask;
3353 if (env->fpus & (~env->fpuc & FPUC_EM))
3354 env->fpus |= FPUS_SE | FPUS_B;
3355}
3356
3357CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3358{
3359 if (b == 0.0)
3360 fpu_set_exception(FPUS_ZE);
3361 return a / b;
3362}
3363
3364void fpu_raise_exception(void)
3365{
3366 if (env->cr[0] & CR0_NE_MASK) {
3367 raise_exception(EXCP10_COPR);
3368 }
3369#if !defined(CONFIG_USER_ONLY)
3370 else {
3371 cpu_set_ferr(env);
3372 }
3373#endif
3374}
3375
3376/* BCD ops */
3377
3378void helper_fbld_ST0_A0(void)
3379{
3380 CPU86_LDouble tmp;
3381 uint64_t val;
3382 unsigned int v;
3383 int i;
3384
3385 val = 0;
3386 for(i = 8; i >= 0; i--) {
3387 v = ldub(A0 + i);
3388 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3389 }
3390 tmp = val;
3391 if (ldub(A0 + 9) & 0x80)
3392 tmp = -tmp;
3393 fpush();
3394 ST0 = tmp;
3395}
3396
3397void helper_fbst_ST0_A0(void)
3398{
3399 int v;
3400 target_ulong mem_ref, mem_end;
3401 int64_t val;
3402
3403 val = floatx_to_int64(ST0, &env->fp_status);
3404 mem_ref = A0;
3405 mem_end = mem_ref + 9;
3406 if (val < 0) {
3407 stb(mem_end, 0x80);
3408 val = -val;
3409 } else {
3410 stb(mem_end, 0x00);
3411 }
3412 while (mem_ref < mem_end) {
3413 if (val == 0)
3414 break;
3415 v = val % 100;
3416 val = val / 100;
3417 v = ((v / 10) << 4) | (v % 10);
3418 stb(mem_ref++, v);
3419 }
3420 while (mem_ref < mem_end) {
3421 stb(mem_ref++, 0);
3422 }
3423}
3424
3425void helper_f2xm1(void)
3426{
3427 ST0 = pow(2.0,ST0) - 1.0;
3428}
3429
3430void helper_fyl2x(void)
3431{
3432 CPU86_LDouble fptemp;
3433
3434 fptemp = ST0;
3435 if (fptemp>0.0){
3436 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3437 ST1 *= fptemp;
3438 fpop();
3439 } else {
3440 env->fpus &= (~0x4700);
3441 env->fpus |= 0x400;
3442 }
3443}
3444
3445void helper_fptan(void)
3446{
3447 CPU86_LDouble fptemp;
3448
3449 fptemp = ST0;
3450 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3451 env->fpus |= 0x400;
3452 } else {
3453 ST0 = tan(fptemp);
3454 fpush();
3455 ST0 = 1.0;
3456 env->fpus &= (~0x400); /* C2 <-- 0 */
3457 /* the above code is for |arg| < 2**52 only */
3458 }
3459}
3460
3461void helper_fpatan(void)
3462{
3463 CPU86_LDouble fptemp, fpsrcop;
3464
3465 fpsrcop = ST1;
3466 fptemp = ST0;
3467 ST1 = atan2(fpsrcop,fptemp);
3468 fpop();
3469}
3470
3471void helper_fxtract(void)
3472{
3473 CPU86_LDoubleU temp;
3474 unsigned int expdif;
3475
3476 temp.d = ST0;
3477 expdif = EXPD(temp) - EXPBIAS;
3478 /*DP exponent bias*/
3479 ST0 = expdif;
3480 fpush();
3481 BIASEXPONENT(temp);
3482 ST0 = temp.d;
3483}
3484
3485void helper_fprem1(void)
3486{
3487 CPU86_LDouble dblq, fpsrcop, fptemp;
3488 CPU86_LDoubleU fpsrcop1, fptemp1;
3489 int expdif;
3490 int q;
3491
3492 fpsrcop = ST0;
3493 fptemp = ST1;
3494 fpsrcop1.d = fpsrcop;
3495 fptemp1.d = fptemp;
3496 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3497 if (expdif < 53) {
3498 dblq = fpsrcop / fptemp;
3499 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3500 ST0 = fpsrcop - fptemp*dblq;
3501 q = (int)dblq; /* cutting off top bits is assumed here */
3502 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3503 /* (C0,C1,C3) <-- (q2,q1,q0) */
3504 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3505 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3506 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3507 } else {
3508 env->fpus |= 0x400; /* C2 <-- 1 */
3509 fptemp = pow(2.0, expdif-50);
3510 fpsrcop = (ST0 / ST1) / fptemp;
3511 /* fpsrcop = integer obtained by rounding to the nearest */
3512 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3513 floor(fpsrcop): ceil(fpsrcop);
3514 ST0 -= (ST1 * fpsrcop * fptemp);
3515 }
3516}
3517
3518void helper_fprem(void)
3519{
3520 CPU86_LDouble dblq, fpsrcop, fptemp;
3521 CPU86_LDoubleU fpsrcop1, fptemp1;
3522 int expdif;
3523 int q;
3524
3525 fpsrcop = ST0;
3526 fptemp = ST1;
3527 fpsrcop1.d = fpsrcop;
3528 fptemp1.d = fptemp;
3529 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3530 if ( expdif < 53 ) {
3531 dblq = fpsrcop / fptemp;
3532 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3533 ST0 = fpsrcop - fptemp*dblq;
3534 q = (int)dblq; /* cutting off top bits is assumed here */
3535 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3536 /* (C0,C1,C3) <-- (q2,q1,q0) */
3537 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3538 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3539 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3540 } else {
3541 env->fpus |= 0x400; /* C2 <-- 1 */
3542 fptemp = pow(2.0, expdif-50);
3543 fpsrcop = (ST0 / ST1) / fptemp;
3544 /* fpsrcop = integer obtained by chopping */
3545 fpsrcop = (fpsrcop < 0.0)?
3546 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3547 ST0 -= (ST1 * fpsrcop * fptemp);
3548 }
3549}
3550
3551void helper_fyl2xp1(void)
3552{
3553 CPU86_LDouble fptemp;
3554
3555 fptemp = ST0;
3556 if ((fptemp+1.0)>0.0) {
3557 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3558 ST1 *= fptemp;
3559 fpop();
3560 } else {
3561 env->fpus &= (~0x4700);
3562 env->fpus |= 0x400;
3563 }
3564}
3565
3566void helper_fsqrt(void)
3567{
3568 CPU86_LDouble fptemp;
3569
3570 fptemp = ST0;
3571 if (fptemp<0.0) {
3572 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3573 env->fpus |= 0x400;
3574 }
3575 ST0 = sqrt(fptemp);
3576}
3577
3578void helper_fsincos(void)
3579{
3580 CPU86_LDouble fptemp;
3581
3582 fptemp = ST0;
3583 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3584 env->fpus |= 0x400;
3585 } else {
3586 ST0 = sin(fptemp);
3587 fpush();
3588 ST0 = cos(fptemp);
3589 env->fpus &= (~0x400); /* C2 <-- 0 */
3590 /* the above code is for |arg| < 2**63 only */
3591 }
3592}
3593
3594void helper_frndint(void)
3595{
3596 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3597}
3598
3599void helper_fscale(void)
3600{
3601 ST0 = ldexp (ST0, (int)(ST1));
3602}
3603
3604void helper_fsin(void)
3605{
3606 CPU86_LDouble fptemp;
3607
3608 fptemp = ST0;
3609 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3610 env->fpus |= 0x400;
3611 } else {
3612 ST0 = sin(fptemp);
3613 env->fpus &= (~0x400); /* C2 <-- 0 */
3614 /* the above code is for |arg| < 2**53 only */
3615 }
3616}
3617
3618void helper_fcos(void)
3619{
3620 CPU86_LDouble fptemp;
3621
3622 fptemp = ST0;
3623 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3624 env->fpus |= 0x400;
3625 } else {
3626 ST0 = cos(fptemp);
3627 env->fpus &= (~0x400); /* C2 <-- 0 */
3628 /* the above code is for |arg5 < 2**63 only */
3629 }
3630}
3631
3632void helper_fxam_ST0(void)
3633{
3634 CPU86_LDoubleU temp;
3635 int expdif;
3636
3637 temp.d = ST0;
3638
3639 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3640 if (SIGND(temp))
3641 env->fpus |= 0x200; /* C1 <-- 1 */
3642
3643 /* XXX: test fptags too */
3644 expdif = EXPD(temp);
3645 if (expdif == MAXEXPD) {
3646#ifdef USE_X86LDOUBLE
3647 if (MANTD(temp) == 0x8000000000000000ULL)
3648#else
3649 if (MANTD(temp) == 0)
3650#endif
3651 env->fpus |= 0x500 /*Infinity*/;
3652 else
3653 env->fpus |= 0x100 /*NaN*/;
3654 } else if (expdif == 0) {
3655 if (MANTD(temp) == 0)
3656 env->fpus |= 0x4000 /*Zero*/;
3657 else
3658 env->fpus |= 0x4400 /*Denormal*/;
3659 } else {
3660 env->fpus |= 0x400;
3661 }
3662}
3663
3664void helper_fstenv(target_ulong ptr, int data32)
3665{
3666 int fpus, fptag, exp, i;
3667 uint64_t mant;
3668 CPU86_LDoubleU tmp;
3669
3670 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3671 fptag = 0;
3672 for (i=7; i>=0; i--) {
3673 fptag <<= 2;
3674 if (env->fptags[i]) {
3675 fptag |= 3;
3676 } else {
3677 tmp.d = env->fpregs[i].d;
3678 exp = EXPD(tmp);
3679 mant = MANTD(tmp);
3680 if (exp == 0 && mant == 0) {
3681 /* zero */
3682 fptag |= 1;
3683 } else if (exp == 0 || exp == MAXEXPD
3684#ifdef USE_X86LDOUBLE
3685 || (mant & (1LL << 63)) == 0
3686#endif
3687 ) {
3688 /* NaNs, infinity, denormal */
3689 fptag |= 2;
3690 }
3691 }
3692 }
3693 if (data32) {
3694 /* 32 bit */
3695 stl(ptr, env->fpuc);
3696 stl(ptr + 4, fpus);
3697 stl(ptr + 8, fptag);
3698 stl(ptr + 12, 0); /* fpip */
3699 stl(ptr + 16, 0); /* fpcs */
3700 stl(ptr + 20, 0); /* fpoo */
3701 stl(ptr + 24, 0); /* fpos */
3702 } else {
3703 /* 16 bit */
3704 stw(ptr, env->fpuc);
3705 stw(ptr + 2, fpus);
3706 stw(ptr + 4, fptag);
3707 stw(ptr + 6, 0);
3708 stw(ptr + 8, 0);
3709 stw(ptr + 10, 0);
3710 stw(ptr + 12, 0);
3711 }
3712}
3713
3714void helper_fldenv(target_ulong ptr, int data32)
3715{
3716 int i, fpus, fptag;
3717
3718 if (data32) {
3719 env->fpuc = lduw(ptr);
3720 fpus = lduw(ptr + 4);
3721 fptag = lduw(ptr + 8);
3722 }
3723 else {
3724 env->fpuc = lduw(ptr);
3725 fpus = lduw(ptr + 2);
3726 fptag = lduw(ptr + 4);
3727 }
3728 env->fpstt = (fpus >> 11) & 7;
3729 env->fpus = fpus & ~0x3800;
3730 for(i = 0;i < 8; i++) {
3731 env->fptags[i] = ((fptag & 3) == 3);
3732 fptag >>= 2;
3733 }
3734}
3735
3736void helper_fsave(target_ulong ptr, int data32)
3737{
3738 CPU86_LDouble tmp;
3739 int i;
3740
3741 helper_fstenv(ptr, data32);
3742
3743 ptr += (14 << data32);
3744 for(i = 0;i < 8; i++) {
3745 tmp = ST(i);
3746 helper_fstt(tmp, ptr);
3747 ptr += 10;
3748 }
3749
3750 /* fninit */
3751 env->fpus = 0;
3752 env->fpstt = 0;
3753 env->fpuc = 0x37f;
3754 env->fptags[0] = 1;
3755 env->fptags[1] = 1;
3756 env->fptags[2] = 1;
3757 env->fptags[3] = 1;
3758 env->fptags[4] = 1;
3759 env->fptags[5] = 1;
3760 env->fptags[6] = 1;
3761 env->fptags[7] = 1;
3762}
3763
3764void helper_frstor(target_ulong ptr, int data32)
3765{
3766 CPU86_LDouble tmp;
3767 int i;
3768
3769 helper_fldenv(ptr, data32);
3770 ptr += (14 << data32);
3771
3772 for(i = 0;i < 8; i++) {
3773 tmp = helper_fldt(ptr);
3774 ST(i) = tmp;
3775 ptr += 10;
3776 }
3777}
3778
3779void helper_fxsave(target_ulong ptr, int data64)
3780{
3781 int fpus, fptag, i, nb_xmm_regs;
3782 CPU86_LDouble tmp;
3783 target_ulong addr;
3784
3785 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3786 fptag = 0;
3787 for(i = 0; i < 8; i++) {
3788 fptag |= (env->fptags[i] << i);
3789 }
3790 stw(ptr, env->fpuc);
3791 stw(ptr + 2, fpus);
3792 stw(ptr + 4, fptag ^ 0xff);
3793
3794 addr = ptr + 0x20;
3795 for(i = 0;i < 8; i++) {
3796 tmp = ST(i);
3797 helper_fstt(tmp, addr);
3798 addr += 16;
3799 }
3800
3801 if (env->cr[4] & CR4_OSFXSR_MASK) {
3802 /* XXX: finish it */
3803 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3804 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3805 nb_xmm_regs = 8 << data64;
3806 addr = ptr + 0xa0;
3807 for(i = 0; i < nb_xmm_regs; i++) {
3808 stq(addr, env->xmm_regs[i].XMM_Q(0));
3809 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3810 addr += 16;
3811 }
3812 }
3813}
3814
3815void helper_fxrstor(target_ulong ptr, int data64)
3816{
3817 int i, fpus, fptag, nb_xmm_regs;
3818 CPU86_LDouble tmp;
3819 target_ulong addr;
3820
3821 env->fpuc = lduw(ptr);
3822 fpus = lduw(ptr + 2);
3823 fptag = lduw(ptr + 4);
3824 env->fpstt = (fpus >> 11) & 7;
3825 env->fpus = fpus & ~0x3800;
3826 fptag ^= 0xff;
3827 for(i = 0;i < 8; i++) {
3828 env->fptags[i] = ((fptag >> i) & 1);
3829 }
3830
3831 addr = ptr + 0x20;
3832 for(i = 0;i < 8; i++) {
3833 tmp = helper_fldt(addr);
3834 ST(i) = tmp;
3835 addr += 16;
3836 }
3837
3838 if (env->cr[4] & CR4_OSFXSR_MASK) {
3839 /* XXX: finish it */
3840 env->mxcsr = ldl(ptr + 0x18);
3841 //ldl(ptr + 0x1c);
3842 nb_xmm_regs = 8 << data64;
3843 addr = ptr + 0xa0;
3844 for(i = 0; i < nb_xmm_regs; i++) {
3845#if !defined(VBOX) || __GNUC__ < 4
3846 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3847 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3848#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3849# if 1
3850 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3851 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3852 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3853 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3854# else
3855 /* this works fine on Mac OS X, gcc 4.0.1 */
3856 uint64_t u64 = ldq(addr);
3857 env->xmm_regs[i].XMM_Q(0);
3858 u64 = ldq(addr + 4);
3859 env->xmm_regs[i].XMM_Q(1) = u64;
3860# endif
3861#endif
3862 addr += 16;
3863 }
3864 }
3865}
3866
3867#ifndef USE_X86LDOUBLE
3868
3869void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3870{
3871 CPU86_LDoubleU temp;
3872 int e;
3873
3874 temp.d = f;
3875 /* mantissa */
3876 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3877 /* exponent + sign */
3878 e = EXPD(temp) - EXPBIAS + 16383;
3879 e |= SIGND(temp) >> 16;
3880 *pexp = e;
3881}
3882
3883CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3884{
3885 CPU86_LDoubleU temp;
3886 int e;
3887 uint64_t ll;
3888
3889 /* XXX: handle overflow ? */
3890 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3891 e |= (upper >> 4) & 0x800; /* sign */
3892 ll = (mant >> 11) & ((1LL << 52) - 1);
3893#ifdef __arm__
3894 temp.l.upper = (e << 20) | (ll >> 32);
3895 temp.l.lower = ll;
3896#else
3897 temp.ll = ll | ((uint64_t)e << 52);
3898#endif
3899 return temp.d;
3900}
3901
3902#else
3903
3904void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3905{
3906 CPU86_LDoubleU temp;
3907
3908 temp.d = f;
3909 *pmant = temp.l.lower;
3910 *pexp = temp.l.upper;
3911}
3912
3913CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3914{
3915 CPU86_LDoubleU temp;
3916
3917 temp.l.upper = upper;
3918 temp.l.lower = mant;
3919 return temp.d;
3920}
3921#endif
3922
3923#ifdef TARGET_X86_64
3924
3925//#define DEBUG_MULDIV
3926
3927static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3928{
3929 *plow += a;
3930 /* carry test */
3931 if (*plow < a)
3932 (*phigh)++;
3933 *phigh += b;
3934}
3935
3936static void neg128(uint64_t *plow, uint64_t *phigh)
3937{
3938 *plow = ~ *plow;
3939 *phigh = ~ *phigh;
3940 add128(plow, phigh, 1, 0);
3941}
3942
3943static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3944{
3945 uint32_t a0, a1, b0, b1;
3946 uint64_t v;
3947
3948 a0 = a;
3949 a1 = a >> 32;
3950
3951 b0 = b;
3952 b1 = b >> 32;
3953
3954 v = (uint64_t)a0 * (uint64_t)b0;
3955 *plow = v;
3956 *phigh = 0;
3957
3958 v = (uint64_t)a0 * (uint64_t)b1;
3959 add128(plow, phigh, v << 32, v >> 32);
3960
3961 v = (uint64_t)a1 * (uint64_t)b0;
3962 add128(plow, phigh, v << 32, v >> 32);
3963
3964 v = (uint64_t)a1 * (uint64_t)b1;
3965 *phigh += v;
3966#ifdef DEBUG_MULDIV
3967 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3968 a, b, *phigh, *plow);
3969#endif
3970}
3971
3972static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3973{
3974 int sa, sb;
3975 sa = (a < 0);
3976 if (sa)
3977 a = -a;
3978 sb = (b < 0);
3979 if (sb)
3980 b = -b;
3981 mul64(plow, phigh, a, b);
3982 if (sa ^ sb) {
3983 neg128(plow, phigh);
3984 }
3985}
3986
3987/* return TRUE if overflow */
3988static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3989{
3990 uint64_t q, r, a1, a0;
3991 int i, qb, ab;
3992
3993 a0 = *plow;
3994 a1 = *phigh;
3995 if (a1 == 0) {
3996 q = a0 / b;
3997 r = a0 % b;
3998 *plow = q;
3999 *phigh = r;
4000 } else {
4001 if (a1 >= b)
4002 return 1;
4003 /* XXX: use a better algorithm */
4004 for(i = 0; i < 64; i++) {
4005 ab = a1 >> 63;
4006 a1 = (a1 << 1) | (a0 >> 63);
4007 if (ab || a1 >= b) {
4008 a1 -= b;
4009 qb = 1;
4010 } else {
4011 qb = 0;
4012 }
4013 a0 = (a0 << 1) | qb;
4014 }
4015#if defined(DEBUG_MULDIV)
4016 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4017 *phigh, *plow, b, a0, a1);
4018#endif
4019 *plow = a0;
4020 *phigh = a1;
4021 }
4022 return 0;
4023}
4024
4025/* return TRUE if overflow */
4026static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4027{
4028 int sa, sb;
4029 sa = ((int64_t)*phigh < 0);
4030 if (sa)
4031 neg128(plow, phigh);
4032 sb = (b < 0);
4033 if (sb)
4034 b = -b;
4035 if (div64(plow, phigh, b) != 0)
4036 return 1;
4037 if (sa ^ sb) {
4038 if (*plow > (1ULL << 63))
4039 return 1;
4040 *plow = - *plow;
4041 } else {
4042 if (*plow >= (1ULL << 63))
4043 return 1;
4044 }
4045 if (sa)
4046 *phigh = - *phigh;
4047 return 0;
4048}
4049
4050void helper_mulq_EAX_T0(void)
4051{
4052 uint64_t r0, r1;
4053
4054 mul64(&r0, &r1, EAX, T0);
4055 EAX = r0;
4056 EDX = r1;
4057 CC_DST = r0;
4058 CC_SRC = r1;
4059}
4060
4061void helper_imulq_EAX_T0(void)
4062{
4063 uint64_t r0, r1;
4064
4065 imul64(&r0, &r1, EAX, T0);
4066 EAX = r0;
4067 EDX = r1;
4068 CC_DST = r0;
4069 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4070}
4071
4072void helper_imulq_T0_T1(void)
4073{
4074 uint64_t r0, r1;
4075
4076 imul64(&r0, &r1, T0, T1);
4077 T0 = r0;
4078 CC_DST = r0;
4079 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4080}
4081
4082void helper_divq_EAX_T0(void)
4083{
4084 uint64_t r0, r1;
4085 if (T0 == 0) {
4086 raise_exception(EXCP00_DIVZ);
4087 }
4088 r0 = EAX;
4089 r1 = EDX;
4090 if (div64(&r0, &r1, T0))
4091 raise_exception(EXCP00_DIVZ);
4092 EAX = r0;
4093 EDX = r1;
4094}
4095
4096void helper_idivq_EAX_T0(void)
4097{
4098 uint64_t r0, r1;
4099 if (T0 == 0) {
4100 raise_exception(EXCP00_DIVZ);
4101 }
4102 r0 = EAX;
4103 r1 = EDX;
4104 if (idiv64(&r0, &r1, T0))
4105 raise_exception(EXCP00_DIVZ);
4106 EAX = r0;
4107 EDX = r1;
4108}
4109
4110void helper_bswapq_T0(void)
4111{
4112 T0 = bswap64(T0);
4113}
4114#endif
4115
4116void helper_hlt(void)
4117{
4118 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4119 env->hflags |= HF_HALTED_MASK;
4120 env->exception_index = EXCP_HLT;
4121 cpu_loop_exit();
4122}
4123
4124void helper_monitor(void)
4125{
4126 if ((uint32_t)ECX != 0)
4127 raise_exception(EXCP0D_GPF);
4128 /* XXX: store address ? */
4129}
4130
4131void helper_mwait(void)
4132{
4133 if ((uint32_t)ECX != 0)
4134 raise_exception(EXCP0D_GPF);
4135#ifdef VBOX
4136 helper_hlt();
4137#else
4138 /* XXX: not complete but not completely erroneous */
4139 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4140 /* more than one CPU: do not sleep because another CPU may
4141 wake this one */
4142 } else {
4143 helper_hlt();
4144 }
4145#endif
4146}
4147
4148float approx_rsqrt(float a)
4149{
4150 return 1.0 / sqrt(a);
4151}
4152
4153float approx_rcp(float a)
4154{
4155 return 1.0 / a;
4156}
4157
4158void update_fp_status(void)
4159{
4160 int rnd_type;
4161
4162 /* set rounding mode */
4163 switch(env->fpuc & RC_MASK) {
4164 default:
4165 case RC_NEAR:
4166 rnd_type = float_round_nearest_even;
4167 break;
4168 case RC_DOWN:
4169 rnd_type = float_round_down;
4170 break;
4171 case RC_UP:
4172 rnd_type = float_round_up;
4173 break;
4174 case RC_CHOP:
4175 rnd_type = float_round_to_zero;
4176 break;
4177 }
4178 set_float_rounding_mode(rnd_type, &env->fp_status);
4179#ifdef FLOATX80
4180 switch((env->fpuc >> 8) & 3) {
4181 case 0:
4182 rnd_type = 32;
4183 break;
4184 case 2:
4185 rnd_type = 64;
4186 break;
4187 case 3:
4188 default:
4189 rnd_type = 80;
4190 break;
4191 }
4192 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4193#endif
4194}
4195
4196#if !defined(CONFIG_USER_ONLY)
4197
4198#define MMUSUFFIX _mmu
4199#define GETPC() (__builtin_return_address(0))
4200
4201#define SHIFT 0
4202#include "softmmu_template.h"
4203
4204#define SHIFT 1
4205#include "softmmu_template.h"
4206
4207#define SHIFT 2
4208#include "softmmu_template.h"
4209
4210#define SHIFT 3
4211#include "softmmu_template.h"
4212
4213#endif
4214
4215/* try to fill the TLB and return an exception if error. If retaddr is
4216 NULL, it means that the function was called in C code (i.e. not
4217 from generated code or from helper.c) */
4218/* XXX: fix it to restore all registers */
4219void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4220{
4221 TranslationBlock *tb;
4222 int ret;
4223 unsigned long pc;
4224 CPUX86State *saved_env;
4225
4226 /* XXX: hack to restore env in all cases, even if not called from
4227 generated code */
4228 saved_env = env;
4229 env = cpu_single_env;
4230
4231 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4232 if (ret) {
4233 if (retaddr) {
4234 /* now we have a real cpu fault */
4235 pc = (unsigned long)retaddr;
4236 tb = tb_find_pc(pc);
4237 if (tb) {
4238 /* the PC is inside the translated code. It means that we have
4239 a virtual CPU fault */
4240 cpu_restore_state(tb, env, pc, NULL);
4241 }
4242 }
4243 if (retaddr)
4244 raise_exception_err(env->exception_index, env->error_code);
4245 else
4246 raise_exception_err_norestore(env->exception_index, env->error_code);
4247 }
4248 env = saved_env;
4249}
4250
4251#ifdef VBOX
4252
4253/**
4254 * Correctly computes the eflags.
4255 * @returns eflags.
4256 * @param env1 CPU environment.
4257 */
4258uint32_t raw_compute_eflags(CPUX86State *env1)
4259{
4260 CPUX86State *savedenv = env;
4261 env = env1;
4262 uint32_t efl = compute_eflags();
4263 env = savedenv;
4264 return efl;
4265}
4266
4267/**
4268 * Reads byte from virtual address in guest memory area.
4269 * XXX: is it working for any addresses? swapped out pages?
4270 * @returns readed data byte.
4271 * @param env1 CPU environment.
4272 * @param pvAddr GC Virtual address.
4273 */
4274uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4275{
4276 CPUX86State *savedenv = env;
4277 env = env1;
4278 uint8_t u8 = ldub_kernel(addr);
4279 env = savedenv;
4280 return u8;
4281}
4282
4283/**
4284 * Reads byte from virtual address in guest memory area.
4285 * XXX: is it working for any addresses? swapped out pages?
4286 * @returns readed data byte.
4287 * @param env1 CPU environment.
4288 * @param pvAddr GC Virtual address.
4289 */
4290uint16_t read_word(CPUX86State *env1, target_ulong addr)
4291{
4292 CPUX86State *savedenv = env;
4293 env = env1;
4294 uint16_t u16 = lduw_kernel(addr);
4295 env = savedenv;
4296 return u16;
4297}
4298
4299/**
4300 * Reads byte from virtual address in guest memory area.
4301 * XXX: is it working for any addresses? swapped out pages?
4302 * @returns readed data byte.
4303 * @param env1 CPU environment.
4304 * @param pvAddr GC Virtual address.
4305 */
4306uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4307{
4308 CPUX86State *savedenv = env;
4309 env = env1;
4310 uint32_t u32 = ldl_kernel(addr);
4311 env = savedenv;
4312 return u32;
4313}
4314
4315/**
4316 * Writes byte to virtual address in guest memory area.
4317 * XXX: is it working for any addresses? swapped out pages?
4318 * @returns readed data byte.
4319 * @param env1 CPU environment.
4320 * @param pvAddr GC Virtual address.
4321 * @param val byte value
4322 */
4323void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4324{
4325 CPUX86State *savedenv = env;
4326 env = env1;
4327 stb(addr, val);
4328 env = savedenv;
4329}
4330
4331void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4332{
4333 CPUX86State *savedenv = env;
4334 env = env1;
4335 stw(addr, val);
4336 env = savedenv;
4337}
4338
4339void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4340{
4341 CPUX86State *savedenv = env;
4342 env = env1;
4343 stl(addr, val);
4344 env = savedenv;
4345}
4346
4347/**
4348 * Correctly loads selector into segment register with updating internal
4349 * qemu data/caches.
4350 * @param env1 CPU environment.
4351 * @param seg_reg Segment register.
4352 * @param selector Selector to load.
4353 */
4354void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4355{
4356 CPUX86State *savedenv = env;
4357 env = env1;
4358
4359 if ( env->eflags & X86_EFL_VM
4360 || !(env->cr[0] & X86_CR0_PE))
4361 {
4362 load_seg_vm(seg_reg, selector);
4363
4364 env = savedenv;
4365
4366 /* Successful sync. */
4367 env1->segs[seg_reg].newselector = 0;
4368 }
4369 else
4370 {
4371 if (setjmp(env1->jmp_env) == 0)
4372 {
4373 if (seg_reg == R_CS)
4374 {
4375 uint32_t e1, e2;
4376 load_segment(&e1, &e2, selector);
4377 cpu_x86_load_seg_cache(env, R_CS, selector,
4378 get_seg_base(e1, e2),
4379 get_seg_limit(e1, e2),
4380 e2);
4381 }
4382 else
4383 load_seg(seg_reg, selector);
4384 env = savedenv;
4385
4386 /* Successful sync. */
4387 env1->segs[seg_reg].newselector = 0;
4388 }
4389 else
4390 {
4391 env = savedenv;
4392
4393 /* Postpone sync until the guest uses the selector. */
4394 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4395 env1->segs[seg_reg].newselector = selector;
4396 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4397 }
4398 }
4399
4400}
4401
4402
4403/**
4404 * Correctly loads a new ldtr selector.
4405 *
4406 * @param env1 CPU environment.
4407 * @param selector Selector to load.
4408 */
4409void sync_ldtr(CPUX86State *env1, int selector)
4410{
4411 CPUX86State *saved_env = env;
4412 target_ulong saved_T0 = T0;
4413 if (setjmp(env1->jmp_env) == 0)
4414 {
4415 env = env1;
4416 T0 = selector;
4417 helper_lldt_T0();
4418 T0 = saved_T0;
4419 env = saved_env;
4420 }
4421 else
4422 {
4423 T0 = saved_T0;
4424 env = saved_env;
4425#ifdef VBOX_STRICT
4426 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4427#endif
4428 }
4429}
4430
4431/**
4432 * Correctly loads a new tr selector.
4433 *
4434 * @param env1 CPU environment.
4435 * @param selector Selector to load.
4436 */
4437int sync_tr(CPUX86State *env1, int selector)
4438{
4439 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
4440 SegmentCache *dt;
4441 uint32_t e1, e2;
4442 int index, type, entry_limit;
4443 target_ulong ptr;
4444 CPUX86State *saved_env = env;
4445 env = env1;
4446
4447 selector &= 0xffff;
4448 if ((selector & 0xfffc) == 0) {
4449 /* NULL selector case: invalid TR */
4450 env->tr.base = 0;
4451 env->tr.limit = 0;
4452 env->tr.flags = 0;
4453 } else {
4454 if (selector & 0x4)
4455 goto l_failure;
4456 dt = &env->gdt;
4457 index = selector & ~7;
4458#ifdef TARGET_X86_64
4459 if (env->hflags & HF_LMA_MASK)
4460 entry_limit = 15;
4461 else
4462#endif
4463 entry_limit = 7;
4464 if ((index + entry_limit) > dt->limit)
4465 goto l_failure;
4466 ptr = dt->base + index;
4467 e1 = ldl_kernel(ptr);
4468 e2 = ldl_kernel(ptr + 4);
4469 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4470 if ((e2 & DESC_S_MASK) /*||
4471 (type != 1 && type != 9)*/)
4472 goto l_failure;
4473 if (!(e2 & DESC_P_MASK))
4474 goto l_failure;
4475#ifdef TARGET_X86_64
4476 if (env->hflags & HF_LMA_MASK) {
4477 uint32_t e3;
4478 e3 = ldl_kernel(ptr + 8);
4479 load_seg_cache_raw_dt(&env->tr, e1, e2);
4480 env->tr.base |= (target_ulong)e3 << 32;
4481 } else
4482#endif
4483 {
4484 load_seg_cache_raw_dt(&env->tr, e1, e2);
4485 }
4486 e2 |= DESC_TSS_BUSY_MASK;
4487 stl_kernel(ptr + 4, e2);
4488 }
4489 env->tr.selector = selector;
4490
4491 env = saved_env;
4492 return 0;
4493l_failure:
4494 AssertMsgFailed(("selector=%d\n", selector));
4495 return -1;
4496}
4497
4498int emulate_single_instr(CPUX86State *env1)
4499{
4500#if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */
4501 /* This has to be static because it needs to be addressible
4502 using 32-bit immediate addresses on 64-bit machines. This
4503 is dictated by the gcc code model used when building this
4504 module / op.o. Using a static here pushes the problem
4505 onto the module loader. */
4506 static TranslationBlock tb_temp;
4507#endif
4508 TranslationBlock *tb;
4509 TranslationBlock *current;
4510 int csize;
4511 void (*gen_func)(void);
4512 uint8_t *tc_ptr;
4513 target_ulong old_eip;
4514
4515 /* ensures env is loaded in ebp! */
4516 CPUX86State *savedenv = env;
4517 env = env1;
4518
4519 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4520
4521#if 1 /* see above */
4522 tc_ptr = env->pvCodeBuffer;
4523#else
4524 tc_ptr = code_gen_ptr;
4525#endif
4526
4527 /*
4528 * Setup temporary translation block.
4529 */
4530 /* tb_alloc: */
4531#if 1 /* see above */
4532 tb = &tb_temp;
4533 tb->pc = env->segs[R_CS].base + env->eip;
4534 tb->cflags = 0;
4535#else
4536 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4537 if (!tb)
4538 {
4539 tb_flush(env);
4540 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4541 }
4542#endif
4543
4544 /* tb_find_slow: */
4545 tb->tc_ptr = tc_ptr;
4546 tb->cs_base = env->segs[R_CS].base;
4547 tb->flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4548
4549 /* Initialize the rest with sensible values. */
4550 tb->size = 0;
4551 tb->phys_hash_next = NULL;
4552 tb->page_next[0] = NULL;
4553 tb->page_next[1] = NULL;
4554 tb->page_addr[0] = 0;
4555 tb->page_addr[1] = 0;
4556 tb->tb_next_offset[0] = 0xffff;
4557 tb->tb_next_offset[1] = 0xffff;
4558 tb->tb_next[0] = 0xffff;
4559 tb->tb_next[1] = 0xffff;
4560 tb->jmp_next[0] = NULL;
4561 tb->jmp_next[1] = NULL;
4562 tb->jmp_first = NULL;
4563
4564 current = env->current_tb;
4565 env->current_tb = NULL;
4566
4567 /*
4568 * Translate only one instruction.
4569 */
4570 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4571 if (cpu_gen_code(env, tb, env->cbCodeBuffer, &csize) < 0)
4572 {
4573 AssertFailed();
4574 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4575 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4576 env = savedenv;
4577 return -1;
4578 }
4579#ifdef DEBUG
4580 if(csize > env->cbCodeBuffer)
4581 {
4582 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4583 AssertFailed();
4584 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4585 env = savedenv;
4586 return -1;
4587 }
4588 if (tb->tc_ptr != tc_ptr)
4589 {
4590 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4591 AssertFailed();
4592 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4593 env = savedenv;
4594 return -1;
4595 }
4596#endif
4597 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4598
4599 /* tb_link_phys: */
4600 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
4601 Assert(tb->jmp_next[0] == NULL); Assert(tb->jmp_next[1] == NULL);
4602 if (tb->tb_next_offset[0] != 0xffff)
4603 tb_set_jmp_target(tb, 0, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[0]));
4604 if (tb->tb_next_offset[1] != 0xffff)
4605 tb_set_jmp_target(tb, 1, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[1]));
4606
4607 /*
4608 * Execute it using emulation
4609 */
4610 old_eip = env->eip;
4611 gen_func = (void *)tb->tc_ptr;
4612 env->current_tb = tb;
4613
4614 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4615 // perhaps not a very safe hack
4616 while(old_eip == env->eip)
4617 {
4618 gen_func();
4619 /*
4620 * Exit once we detect an external interrupt and interrupts are enabled
4621 */
4622 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4623 ( (env->eflags & IF_MASK) &&
4624 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4625 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4626 {
4627 break;
4628 }
4629 }
4630 env->current_tb = current;
4631
4632 Assert(tb->phys_hash_next == NULL);
4633 Assert(tb->page_next[0] == NULL);
4634 Assert(tb->page_next[1] == NULL);
4635 Assert(tb->page_addr[0] == 0);
4636 Assert(tb->page_addr[1] == 0);
4637/*
4638 Assert(tb->tb_next_offset[0] == 0xffff);
4639 Assert(tb->tb_next_offset[1] == 0xffff);
4640 Assert(tb->tb_next[0] == 0xffff);
4641 Assert(tb->tb_next[1] == 0xffff);
4642 Assert(tb->jmp_next[0] == NULL);
4643 Assert(tb->jmp_next[1] == NULL);
4644 Assert(tb->jmp_first == NULL); */
4645
4646 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4647
4648 /*
4649 * Execute the next instruction when we encounter instruction fusing.
4650 */
4651 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4652 {
4653 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %VGv\n", env->eip));
4654 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4655 emulate_single_instr(env);
4656 }
4657
4658 env = savedenv;
4659 return 0;
4660}
4661
4662int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4663 uint32_t *esp_ptr, int dpl)
4664{
4665 int type, index, shift;
4666
4667 CPUX86State *savedenv = env;
4668 env = env1;
4669
4670 if (!(env->tr.flags & DESC_P_MASK))
4671 cpu_abort(env, "invalid tss");
4672 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4673 if ((type & 7) != 1)
4674 cpu_abort(env, "invalid tss type %d", type);
4675 shift = type >> 3;
4676 index = (dpl * 4 + 2) << shift;
4677 if (index + (4 << shift) - 1 > env->tr.limit)
4678 {
4679 env = savedenv;
4680 return 0;
4681 }
4682 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4683
4684 if (shift == 0) {
4685 *esp_ptr = lduw_kernel(env->tr.base + index);
4686 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4687 } else {
4688 *esp_ptr = ldl_kernel(env->tr.base + index);
4689 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4690 }
4691
4692 env = savedenv;
4693 return 1;
4694}
4695
4696//*****************************************************************************
4697// Needs to be at the bottom of the file (overriding macros)
4698
4699static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4700{
4701 return *(CPU86_LDouble *)ptr;
4702}
4703
4704static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4705{
4706 *(CPU86_LDouble *)ptr = f;
4707}
4708
4709#undef stw
4710#undef stl
4711#undef stq
4712#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4713#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4714#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4715#define data64 0
4716
4717//*****************************************************************************
4718void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4719{
4720 int fpus, fptag, i, nb_xmm_regs;
4721 CPU86_LDouble tmp;
4722 uint8_t *addr;
4723
4724 if (env->cpuid_features & CPUID_FXSR)
4725 {
4726 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4727 fptag = 0;
4728 for(i = 0; i < 8; i++) {
4729 fptag |= (env->fptags[i] << i);
4730 }
4731 stw(ptr, env->fpuc);
4732 stw(ptr + 2, fpus);
4733 stw(ptr + 4, fptag ^ 0xff);
4734
4735 addr = ptr + 0x20;
4736 for(i = 0;i < 8; i++) {
4737 tmp = ST(i);
4738 helper_fstt_raw(tmp, addr);
4739 addr += 16;
4740 }
4741
4742 if (env->cr[4] & CR4_OSFXSR_MASK) {
4743 /* XXX: finish it */
4744 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4745 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4746 nb_xmm_regs = 8 << data64;
4747 addr = ptr + 0xa0;
4748 for(i = 0; i < nb_xmm_regs; i++) {
4749#if __GNUC__ < 4
4750 stq(addr, env->xmm_regs[i].XMM_Q(0));
4751 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4752#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4753 stl(addr, env->xmm_regs[i].XMM_L(0));
4754 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4755 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4756 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4757#endif
4758 addr += 16;
4759 }
4760 }
4761 }
4762 else
4763 {
4764 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4765 int fptag;
4766
4767 fp->FCW = env->fpuc;
4768 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4769 fptag = 0;
4770 for (i=7; i>=0; i--) {
4771 fptag <<= 2;
4772 if (env->fptags[i]) {
4773 fptag |= 3;
4774 } else {
4775 /* the FPU automatically computes it */
4776 }
4777 }
4778 fp->FTW = fptag;
4779
4780 for(i = 0;i < 8; i++) {
4781 tmp = ST(i);
4782 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4783 }
4784 }
4785}
4786
4787//*****************************************************************************
4788#undef lduw
4789#undef ldl
4790#undef ldq
4791#define lduw(a) *(uint16_t *)(a)
4792#define ldl(a) *(uint32_t *)(a)
4793#define ldq(a) *(uint64_t *)(a)
4794//*****************************************************************************
4795void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4796{
4797 int i, fpus, fptag, nb_xmm_regs;
4798 CPU86_LDouble tmp;
4799 uint8_t *addr;
4800
4801 if (env->cpuid_features & CPUID_FXSR)
4802 {
4803 env->fpuc = lduw(ptr);
4804 fpus = lduw(ptr + 2);
4805 fptag = lduw(ptr + 4);
4806 env->fpstt = (fpus >> 11) & 7;
4807 env->fpus = fpus & ~0x3800;
4808 fptag ^= 0xff;
4809 for(i = 0;i < 8; i++) {
4810 env->fptags[i] = ((fptag >> i) & 1);
4811 }
4812
4813 addr = ptr + 0x20;
4814 for(i = 0;i < 8; i++) {
4815 tmp = helper_fldt_raw(addr);
4816 ST(i) = tmp;
4817 addr += 16;
4818 }
4819
4820 if (env->cr[4] & CR4_OSFXSR_MASK) {
4821 /* XXX: finish it, endianness */
4822 env->mxcsr = ldl(ptr + 0x18);
4823 //ldl(ptr + 0x1c);
4824 nb_xmm_regs = 8 << data64;
4825 addr = ptr + 0xa0;
4826 for(i = 0; i < nb_xmm_regs; i++) {
4827#if HC_ARCH_BITS == 32
4828 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
4829 env->xmm_regs[i].XMM_L(0) = ldl(addr);
4830 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
4831 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
4832 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
4833#else
4834 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4835 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4836#endif
4837 addr += 16;
4838 }
4839 }
4840 }
4841 else
4842 {
4843 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4844 int fptag, j;
4845
4846 env->fpuc = fp->FCW;
4847 env->fpstt = (fp->FSW >> 11) & 7;
4848 env->fpus = fp->FSW & ~0x3800;
4849 fptag = fp->FTW;
4850 for(i = 0;i < 8; i++) {
4851 env->fptags[i] = ((fptag & 3) == 3);
4852 fptag >>= 2;
4853 }
4854 j = env->fpstt;
4855 for(i = 0;i < 8; i++) {
4856 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4857 ST(i) = tmp;
4858 }
4859 }
4860}
4861//*****************************************************************************
4862//*****************************************************************************
4863
4864#endif /* VBOX */
4865
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette