VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 48954

Last change on this file since 48954 was 48472, checked in by vboxsync, 11 years ago

REM: Correctly propagate triple faults to EM.

  • Property svn:eol-style set to native
File size: 201.0 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232 if (selector & 0x4)
233 dt = &env->ldt;
234 else
235 dt = &env->gdt;
236 index = selector & ~7;
237 if ((index + 7) > dt->limit)
238 return -1;
239 ptr = dt->base + index;
240 *e1_ptr = ldl_kernel(ptr);
241 *e2_ptr = ldl_kernel(ptr + 4);
242 return 0;
243}
244
245static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
246{
247 unsigned int limit;
248 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
249 if (e2 & DESC_G_MASK)
250 limit = (limit << 12) | 0xfff;
251 return limit;
252}
253
254static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
255{
256 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
257}
258
259static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
260{
261 sc->base = get_seg_base(e1, e2);
262 sc->limit = get_seg_limit(e1, e2);
263#ifndef VBOX
264 sc->flags = e2;
265#else
266 sc->flags = e2 & DESC_RAW_FLAG_BITS;
267 sc->newselector = 0;
268 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
269#endif
270}
271
272/* init the segment cache in vm86 mode. */
273static inline void load_seg_vm(int seg, int selector)
274{
275 selector &= 0xffff;
276#ifdef VBOX
277 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
278 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
279 flags |= (3 << DESC_DPL_SHIFT);
280
281 cpu_x86_load_seg_cache(env, seg, selector,
282 (selector << 4), 0xffff, flags);
283#else /* VBOX */
284 cpu_x86_load_seg_cache(env, seg, selector,
285 (selector << 4), 0xffff, 0);
286#endif /* VBOX */
287}
288
289static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0; /* gcc warning? */
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif /* VBOX */
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 0, 0, 0);
390# endif
391#endif /* VBOX */
392 }
393}
394
395#define SWITCH_TSS_JMP 0
396#define SWITCH_TSS_IRET 1
397#define SWITCH_TSS_CALL 2
398
399/* XXX: restore CPU state in registers (PowerPC case) */
400static void switch_tss(int tss_selector,
401 uint32_t e1, uint32_t e2, int source,
402 uint32_t next_eip)
403{
404 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
405 target_ulong tss_base;
406 uint32_t new_regs[8], new_segs[6];
407 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
408 uint32_t old_eflags, eflags_mask;
409 SegmentCache *dt;
410#ifndef VBOX
411 int index;
412#else
413 unsigned int index;
414#endif
415 target_ulong ptr;
416
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
419
420 /* if task gate, we read the TSS segment and we load it */
421 if (type == 5) {
422 if (!(e2 & DESC_P_MASK))
423 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
424 tss_selector = e1 >> 16;
425 if (tss_selector & 4)
426 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
427 if (load_segment(&e1, &e2, tss_selector) != 0)
428 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
429 if (e2 & DESC_S_MASK)
430 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
431 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
432 if ((type & 7) != 1)
433 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
434 }
435
436 if (!(e2 & DESC_P_MASK))
437 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
438
439 if (type & 8)
440 tss_limit_max = 103;
441 else
442 tss_limit_max = 43;
443 tss_limit = get_seg_limit(e1, e2);
444 tss_base = get_seg_base(e1, e2);
445 if ((tss_selector & 4) != 0 ||
446 tss_limit < tss_limit_max)
447 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
448 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
449 if (old_type & 8)
450 old_tss_limit_max = 103;
451 else
452 old_tss_limit_max = 43;
453
454#ifndef VBOX /* The old TSS is written first... */
455 /* read all the registers from the new TSS */
456 if (type & 8) {
457 /* 32 bit */
458 new_cr3 = ldl_kernel(tss_base + 0x1c);
459 new_eip = ldl_kernel(tss_base + 0x20);
460 new_eflags = ldl_kernel(tss_base + 0x24);
461 for(i = 0; i < 8; i++)
462 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
463 for(i = 0; i < 6; i++)
464 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
465 new_ldt = lduw_kernel(tss_base + 0x60);
466 new_trap = ldl_kernel(tss_base + 0x64);
467 } else {
468 /* 16 bit */
469 new_cr3 = 0;
470 new_eip = lduw_kernel(tss_base + 0x0e);
471 new_eflags = lduw_kernel(tss_base + 0x10);
472 for(i = 0; i < 8; i++)
473 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
474 for(i = 0; i < 4; i++)
475 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
476 new_ldt = lduw_kernel(tss_base + 0x2a);
477 new_segs[R_FS] = 0;
478 new_segs[R_GS] = 0;
479 new_trap = 0;
480 }
481#endif
482
483 /* NOTE: we must avoid memory exceptions during the task switch,
484 so we make dummy accesses before */
485 /* XXX: it can still fail in some cases, so a bigger hack is
486 necessary to valid the TLB after having done the accesses */
487
488 v1 = ldub_kernel(env->tr.base);
489 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
490 stb_kernel(env->tr.base, v1);
491 stb_kernel(env->tr.base + old_tss_limit_max, v2);
492
493 /* clear busy bit (it is restartable) */
494 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
495 target_ulong ptr;
496 uint32_t e2;
497 ptr = env->gdt.base + (env->tr.selector & ~7);
498 e2 = ldl_kernel(ptr + 4);
499 e2 &= ~DESC_TSS_BUSY_MASK;
500 stl_kernel(ptr + 4, e2);
501 }
502 old_eflags = compute_eflags();
503 if (source == SWITCH_TSS_IRET)
504 old_eflags &= ~NT_MASK;
505
506 /* save the current state in the old TSS */
507 if (type & 8) {
508 /* 32 bit */
509 stl_kernel(env->tr.base + 0x20, next_eip);
510 stl_kernel(env->tr.base + 0x24, old_eflags);
511 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
512 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
513 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
514 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
515 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
516 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
517 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
518 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
519 for(i = 0; i < 6; i++)
520 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
521#if defined(VBOX) && defined(DEBUG)
522 printf("TSS 32 bits switch\n");
523 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
524#endif
525 } else {
526 /* 16 bit */
527 stw_kernel(env->tr.base + 0x0e, next_eip);
528 stw_kernel(env->tr.base + 0x10, old_eflags);
529 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
530 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
531 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
532 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
533 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
534 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
535 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
536 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
537 for(i = 0; i < 4; i++)
538 stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector);
539 }
540
541#ifdef VBOX
542 /* read all the registers from the new TSS - may be the same as the old one */
543 if (type & 8) {
544 /* 32 bit */
545 new_cr3 = ldl_kernel(tss_base + 0x1c);
546 new_eip = ldl_kernel(tss_base + 0x20);
547 new_eflags = ldl_kernel(tss_base + 0x24);
548 for(i = 0; i < 8; i++)
549 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
550 for(i = 0; i < 6; i++)
551 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
552 new_ldt = lduw_kernel(tss_base + 0x60);
553 new_trap = ldl_kernel(tss_base + 0x64);
554 } else {
555 /* 16 bit */
556 new_cr3 = 0;
557 new_eip = lduw_kernel(tss_base + 0x0e);
558 new_eflags = lduw_kernel(tss_base + 0x10);
559 for(i = 0; i < 8; i++)
560 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
561 for(i = 0; i < 4; i++)
562 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
563 new_ldt = lduw_kernel(tss_base + 0x2a);
564 new_segs[R_FS] = 0;
565 new_segs[R_GS] = 0;
566 new_trap = 0;
567 }
568#endif
569
570 /* now if an exception occurs, it will occurs in the next task
571 context */
572
573 if (source == SWITCH_TSS_CALL) {
574 stw_kernel(tss_base, env->tr.selector);
575 new_eflags |= NT_MASK;
576 }
577
578 /* set busy bit */
579 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
580 target_ulong ptr;
581 uint32_t e2;
582 ptr = env->gdt.base + (tss_selector & ~7);
583 e2 = ldl_kernel(ptr + 4);
584 e2 |= DESC_TSS_BUSY_MASK;
585 stl_kernel(ptr + 4, e2);
586 }
587
588 /* set the new CPU state */
589 /* from this point, any exception which occurs can give problems */
590 env->cr[0] |= CR0_TS_MASK;
591 env->hflags |= HF_TS_MASK;
592 env->tr.selector = tss_selector;
593 env->tr.base = tss_base;
594 env->tr.limit = tss_limit;
595#ifndef VBOX
596 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
597#else
598 env->tr.flags = e2 & (DESC_RAW_FLAG_BITS & ~(DESC_TSS_BUSY_MASK)); /** @todo stop clearing the busy bit, VT-x and AMD-V seems to set it in the hidden bits. */
599 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
600 env->tr.newselector = 0;
601#endif
602
603 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
604 cpu_x86_update_cr3(env, new_cr3);
605 }
606
607 /* load all registers without an exception, then reload them with
608 possible exception */
609 env->eip = new_eip;
610 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
611 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
612 if (!(type & 8))
613 eflags_mask &= 0xffff;
614 load_eflags(new_eflags, eflags_mask);
615 /* XXX: what to do in 16 bit case ? */
616 EAX = new_regs[0];
617 ECX = new_regs[1];
618 EDX = new_regs[2];
619 EBX = new_regs[3];
620 ESP = new_regs[4];
621 EBP = new_regs[5];
622 ESI = new_regs[6];
623 EDI = new_regs[7];
624 if (new_eflags & VM_MASK) {
625 for(i = 0; i < 6; i++)
626 load_seg_vm(i, new_segs[i]);
627 /* in vm86, CPL is always 3 */
628 cpu_x86_set_cpl(env, 3);
629 } else {
630 /* CPL is set the RPL of CS */
631 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
632 /* first just selectors as the rest may trigger exceptions */
633 for(i = 0; i < 6; i++)
634 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
635 }
636
637 env->ldt.selector = new_ldt & ~4;
638 env->ldt.base = 0;
639 env->ldt.limit = 0;
640 env->ldt.flags = 0;
641#ifdef VBOX
642 env->ldt.flags = DESC_INTEL_UNUSABLE;
643 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
644 env->ldt.newselector = 0;
645#endif
646
647 /* load the LDT */
648 if (new_ldt & 4)
649 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
650
651 if ((new_ldt & 0xfffc) != 0) {
652 dt = &env->gdt;
653 index = new_ldt & ~7;
654 if ((index + 7) > dt->limit)
655 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
656 ptr = dt->base + index;
657 e1 = ldl_kernel(ptr);
658 e2 = ldl_kernel(ptr + 4);
659 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
660 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
661 if (!(e2 & DESC_P_MASK))
662 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
663 load_seg_cache_raw_dt(&env->ldt, e1, e2);
664 }
665
666 /* load the segments */
667 if (!(new_eflags & VM_MASK)) {
668 tss_load_seg(R_CS, new_segs[R_CS]);
669 tss_load_seg(R_SS, new_segs[R_SS]);
670 tss_load_seg(R_ES, new_segs[R_ES]);
671 tss_load_seg(R_DS, new_segs[R_DS]);
672 tss_load_seg(R_FS, new_segs[R_FS]);
673 tss_load_seg(R_GS, new_segs[R_GS]);
674 }
675
676 /* check that EIP is in the CS segment limits */
677 if (new_eip > env->segs[R_CS].limit) {
678 /* XXX: different exception if CALL ? */
679 raise_exception_err(EXCP0D_GPF, 0);
680 }
681
682#ifndef CONFIG_USER_ONLY
683 /* reset local breakpoints */
684 if (env->dr[7] & 0x55) {
685 for (i = 0; i < 4; i++) {
686 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
687 hw_breakpoint_remove(env, i);
688 }
689 env->dr[7] &= ~0x55;
690 }
691#endif
692}
693
694/* check if Port I/O is allowed in TSS */
695static inline void check_io(int addr, int size)
696{
697#ifndef VBOX
698 int io_offset, val, mask;
699#else
700 int val, mask;
701 unsigned int io_offset;
702#endif /* VBOX */
703
704 /* TSS must be a valid 32 bit one */
705 if (!(env->tr.flags & DESC_P_MASK) ||
706 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
707 env->tr.limit < 103)
708 goto fail;
709 io_offset = lduw_kernel(env->tr.base + 0x66);
710 io_offset += (addr >> 3);
711 /* Note: the check needs two bytes */
712 if ((io_offset + 1) > env->tr.limit)
713 goto fail;
714 val = lduw_kernel(env->tr.base + io_offset);
715 val >>= (addr & 7);
716 mask = (1 << size) - 1;
717 /* all bits must be zero to allow the I/O */
718 if ((val & mask) != 0) {
719 fail:
720 raise_exception_err(EXCP0D_GPF, 0);
721 }
722}
723
724#ifdef VBOX
725
726/* Keep in sync with gen_check_external_event() */
727void helper_check_external_event()
728{
729 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
730 | CPU_INTERRUPT_EXTERNAL_EXIT
731 | CPU_INTERRUPT_EXTERNAL_TIMER
732 | CPU_INTERRUPT_EXTERNAL_DMA))
733 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
734 && (env->eflags & IF_MASK)
735 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
736 {
737 helper_external_event();
738 }
739
740}
741
742void helper_sync_seg(uint32_t reg)
743{
744 if (env->segs[reg].newselector)
745 sync_seg(env, reg, env->segs[reg].newselector);
746}
747
748#endif /* VBOX */
749
750void helper_check_iob(uint32_t t0)
751{
752 check_io(t0, 1);
753}
754
755void helper_check_iow(uint32_t t0)
756{
757 check_io(t0, 2);
758}
759
760void helper_check_iol(uint32_t t0)
761{
762 check_io(t0, 4);
763}
764
765void helper_outb(uint32_t port, uint32_t data)
766{
767#ifndef VBOX
768 cpu_outb(port, data & 0xff);
769#else
770 cpu_outb(env, port, data & 0xff);
771#endif
772}
773
774target_ulong helper_inb(uint32_t port)
775{
776#ifndef VBOX
777 return cpu_inb(port);
778#else
779 return cpu_inb(env, port);
780#endif
781}
782
783void helper_outw(uint32_t port, uint32_t data)
784{
785#ifndef VBOX
786 cpu_outw(port, data & 0xffff);
787#else
788 cpu_outw(env, port, data & 0xffff);
789#endif
790}
791
792target_ulong helper_inw(uint32_t port)
793{
794#ifndef VBOX
795 return cpu_inw(port);
796#else
797 return cpu_inw(env, port);
798#endif
799}
800
801void helper_outl(uint32_t port, uint32_t data)
802{
803#ifndef VBOX
804 cpu_outl(port, data);
805#else
806 cpu_outl(env, port, data);
807#endif
808}
809
810target_ulong helper_inl(uint32_t port)
811{
812#ifndef VBOX
813 return cpu_inl(port);
814#else
815 return cpu_inl(env, port);
816#endif
817}
818
819static inline unsigned int get_sp_mask(unsigned int e2)
820{
821 if (e2 & DESC_B_MASK)
822 return 0xffffffff;
823 else
824 return 0xffff;
825}
826
827static int exeption_has_error_code(int intno)
828{
829 switch(intno) {
830 case 8:
831 case 10:
832 case 11:
833 case 12:
834 case 13:
835 case 14:
836 case 17:
837 return 1;
838 }
839 return 0;
840}
841
842#ifdef TARGET_X86_64
843#define SET_ESP(val, sp_mask)\
844do {\
845 if ((sp_mask) == 0xffff)\
846 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
847 else if ((sp_mask) == 0xffffffffLL)\
848 ESP = (uint32_t)(val);\
849 else\
850 ESP = (val);\
851} while (0)
852#else
853#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
854#endif
855
856/* in 64-bit machines, this can overflow. So this segment addition macro
857 * can be used to trim the value to 32-bit whenever needed */
858#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
859
860/* XXX: add a is_user flag to have proper security support */
861#define PUSHW(ssp, sp, sp_mask, val)\
862{\
863 sp -= 2;\
864 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
865}
866
867#define PUSHL(ssp, sp, sp_mask, val)\
868{\
869 sp -= 4;\
870 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
871}
872
873#define POPW(ssp, sp, sp_mask, val)\
874{\
875 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
876 sp += 2;\
877}
878
879#define POPL(ssp, sp, sp_mask, val)\
880{\
881 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
882 sp += 4;\
883}
884
885/* protected mode interrupt */
886static void do_interrupt_protected(int intno, int is_int, int error_code,
887 unsigned int next_eip, int is_hw)
888{
889 SegmentCache *dt;
890 target_ulong ptr, ssp;
891 int type, dpl, selector, ss_dpl, cpl;
892 int has_error_code, new_stack, shift;
893 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
894 uint32_t old_eip, sp_mask;
895
896#ifdef VBOX
897 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
898 cpu_loop_exit();
899#endif
900
901 has_error_code = 0;
902 if (!is_int && !is_hw)
903 has_error_code = exeption_has_error_code(intno);
904 if (is_int)
905 old_eip = next_eip;
906 else
907 old_eip = env->eip;
908
909 dt = &env->idt;
910#ifndef VBOX
911 if (intno * 8 + 7 > dt->limit)
912#else
913 if ((unsigned)intno * 8 + 7 > dt->limit)
914#endif
915 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
916 ptr = dt->base + intno * 8;
917 e1 = ldl_kernel(ptr);
918 e2 = ldl_kernel(ptr + 4);
919 /* check gate type */
920 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
921 switch(type) {
922 case 5: /* task gate */
923#ifdef VBOX
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 cpl = env->hflags & HF_CPL_MASK;
926 /* check privilege if software int */
927 if (is_int && dpl < cpl)
928 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
929#endif
930 /* must do that check here to return the correct error code */
931 if (!(e2 & DESC_P_MASK))
932 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
933 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
934 if (has_error_code) {
935 int type;
936 uint32_t mask;
937 /* push the error code */
938 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
939 shift = type >> 3;
940 if (env->segs[R_SS].flags & DESC_B_MASK)
941 mask = 0xffffffff;
942 else
943 mask = 0xffff;
944 esp = (ESP - (2 << shift)) & mask;
945 ssp = env->segs[R_SS].base + esp;
946 if (shift)
947 stl_kernel(ssp, error_code);
948 else
949 stw_kernel(ssp, error_code);
950 SET_ESP(esp, mask);
951 }
952 return;
953 case 6: /* 286 interrupt gate */
954 case 7: /* 286 trap gate */
955 case 14: /* 386 interrupt gate */
956 case 15: /* 386 trap gate */
957 break;
958 default:
959 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
960 break;
961 }
962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
963 cpl = env->hflags & HF_CPL_MASK;
964 /* check privilege if software int */
965 if (is_int && dpl < cpl)
966 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
967 /* check valid bit */
968 if (!(e2 & DESC_P_MASK))
969 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
970 selector = e1 >> 16;
971 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
972 if ((selector & 0xfffc) == 0)
973 raise_exception_err(EXCP0D_GPF, 0);
974
975 if (load_segment(&e1, &e2, selector) != 0)
976 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
977#ifdef VBOX /** @todo figure out when this is done one day... */
978 if (!(e2 & DESC_A_MASK))
979 e2 = set_segment_accessed(selector, e2);
980#endif
981 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
982 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
983 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
984 if (dpl > cpl)
985 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
986 if (!(e2 & DESC_P_MASK))
987 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
988 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
989 /* to inner privilege */
990 get_ss_esp_from_tss(&ss, &esp, dpl);
991 if ((ss & 0xfffc) == 0)
992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
993 if ((ss & 3) != dpl)
994 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
995 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
996 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
997#ifdef VBOX /** @todo figure out when this is done one day... */
998 if (!(ss_e2 & DESC_A_MASK))
999 ss_e2 = set_segment_accessed(ss, ss_e2);
1000#endif
1001 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1002 if (ss_dpl != dpl)
1003 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1004 if (!(ss_e2 & DESC_S_MASK) ||
1005 (ss_e2 & DESC_CS_MASK) ||
1006 !(ss_e2 & DESC_W_MASK))
1007 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1008 if (!(ss_e2 & DESC_P_MASK))
1009#ifdef VBOX /* See page 3-477 of 253666.pdf */
1010 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
1011#else
1012 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1013#endif
1014 new_stack = 1;
1015 sp_mask = get_sp_mask(ss_e2);
1016 ssp = get_seg_base(ss_e1, ss_e2);
1017#if defined(VBOX) && defined(DEBUG)
1018 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1019#endif
1020 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1021 /* to same privilege */
1022 if (env->eflags & VM_MASK)
1023 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1024 new_stack = 0;
1025 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1026 ssp = env->segs[R_SS].base;
1027 esp = ESP;
1028 dpl = cpl;
1029 } else {
1030 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1031 new_stack = 0; /* avoid warning */
1032 sp_mask = 0; /* avoid warning */
1033 ssp = 0; /* avoid warning */
1034 esp = 0; /* avoid warning */
1035 }
1036
1037 shift = type >> 3;
1038
1039#if 0
1040 /* XXX: check that enough room is available */
1041 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1042 if (env->eflags & VM_MASK)
1043 push_size += 8;
1044 push_size <<= shift;
1045#endif
1046 if (shift == 1) {
1047 if (new_stack) {
1048 if (env->eflags & VM_MASK) {
1049 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1050 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1051 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1052 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1053 }
1054 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1055 PUSHL(ssp, esp, sp_mask, ESP);
1056 }
1057 PUSHL(ssp, esp, sp_mask, compute_eflags());
1058 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1059 PUSHL(ssp, esp, sp_mask, old_eip);
1060 if (has_error_code) {
1061 PUSHL(ssp, esp, sp_mask, error_code);
1062 }
1063 } else {
1064 if (new_stack) {
1065 if (env->eflags & VM_MASK) {
1066 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1067 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1068 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1069 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1070 }
1071 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1072 PUSHW(ssp, esp, sp_mask, ESP);
1073 }
1074 PUSHW(ssp, esp, sp_mask, compute_eflags());
1075 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1076 PUSHW(ssp, esp, sp_mask, old_eip);
1077 if (has_error_code) {
1078 PUSHW(ssp, esp, sp_mask, error_code);
1079 }
1080 }
1081
1082 if (new_stack) {
1083 if (env->eflags & VM_MASK) {
1084 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1085 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1086 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1087 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1088 }
1089 ss = (ss & ~3) | dpl;
1090 cpu_x86_load_seg_cache(env, R_SS, ss,
1091 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1092 }
1093 SET_ESP(esp, sp_mask);
1094
1095 selector = (selector & ~3) | dpl;
1096 cpu_x86_load_seg_cache(env, R_CS, selector,
1097 get_seg_base(e1, e2),
1098 get_seg_limit(e1, e2),
1099 e2);
1100 cpu_x86_set_cpl(env, dpl);
1101 env->eip = offset;
1102
1103 /* interrupt gate clear IF mask */
1104 if ((type & 1) == 0) {
1105 env->eflags &= ~IF_MASK;
1106 }
1107#ifndef VBOX
1108 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1109#else
1110 /*
1111 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1112 * gets confused by seemingly changed EFLAGS. See #3491 and
1113 * public bug #2341.
1114 */
1115 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1116#endif
1117}
1118
1119#ifdef VBOX
1120
1121/* check if VME interrupt redirection is enabled in TSS */
1122DECLINLINE(bool) is_vme_irq_redirected(int intno)
1123{
1124 unsigned int io_offset, intredir_offset;
1125 unsigned char val, mask;
1126
1127 /* TSS must be a valid 32 bit one */
1128 if (!(env->tr.flags & DESC_P_MASK) ||
1129 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1130 env->tr.limit < 103)
1131 goto fail;
1132 io_offset = lduw_kernel(env->tr.base + 0x66);
1133 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1134 if (io_offset < 0x68 + 0x20)
1135 io_offset = 0x68 + 0x20;
1136 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1137 intredir_offset = io_offset - 0x20;
1138
1139 intredir_offset += (intno >> 3);
1140 if ((intredir_offset) > env->tr.limit)
1141 goto fail;
1142
1143 val = ldub_kernel(env->tr.base + intredir_offset);
1144 mask = 1 << (unsigned char)(intno & 7);
1145
1146 /* bit set means no redirection. */
1147 if ((val & mask) != 0) {
1148 return false;
1149 }
1150 return true;
1151
1152fail:
1153 raise_exception_err(EXCP0D_GPF, 0);
1154 return true;
1155}
1156
1157/* V86 mode software interrupt with CR4.VME=1 */
1158static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1159{
1160 target_ulong ptr, ssp;
1161 int selector;
1162 uint32_t offset, esp;
1163 uint32_t old_cs, old_eflags;
1164 uint32_t iopl;
1165
1166 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1167
1168 if (!is_vme_irq_redirected(intno))
1169 {
1170 if (iopl == 3)
1171 {
1172 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1173 return;
1174 }
1175 else
1176 raise_exception_err(EXCP0D_GPF, 0);
1177 }
1178
1179 /* virtual mode idt is at linear address 0 */
1180 ptr = 0 + intno * 4;
1181 offset = lduw_kernel(ptr);
1182 selector = lduw_kernel(ptr + 2);
1183 esp = ESP;
1184 ssp = env->segs[R_SS].base;
1185 old_cs = env->segs[R_CS].selector;
1186
1187 old_eflags = compute_eflags();
1188 if (iopl < 3)
1189 {
1190 /* copy VIF into IF and set IOPL to 3 */
1191 if (env->eflags & VIF_MASK)
1192 old_eflags |= IF_MASK;
1193 else
1194 old_eflags &= ~IF_MASK;
1195
1196 old_eflags |= (3 << IOPL_SHIFT);
1197 }
1198
1199 /* XXX: use SS segment size ? */
1200 PUSHW(ssp, esp, 0xffff, old_eflags);
1201 PUSHW(ssp, esp, 0xffff, old_cs);
1202 PUSHW(ssp, esp, 0xffff, next_eip);
1203
1204 /* update processor state */
1205 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1206 env->eip = offset;
1207 env->segs[R_CS].selector = selector;
1208 env->segs[R_CS].base = (selector << 4);
1209 env->eflags &= ~(TF_MASK | RF_MASK);
1210
1211 if (iopl < 3)
1212 env->eflags &= ~VIF_MASK;
1213 else
1214 env->eflags &= ~IF_MASK;
1215}
1216
1217#endif /* VBOX */
1218
1219#ifdef TARGET_X86_64
1220
1221#define PUSHQ(sp, val)\
1222{\
1223 sp -= 8;\
1224 stq_kernel(sp, (val));\
1225}
1226
1227#define POPQ(sp, val)\
1228{\
1229 val = ldq_kernel(sp);\
1230 sp += 8;\
1231}
1232
1233static inline target_ulong get_rsp_from_tss(int level)
1234{
1235 int index;
1236
1237#if 0
1238 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1239 env->tr.base, env->tr.limit);
1240#endif
1241
1242 if (!(env->tr.flags & DESC_P_MASK))
1243 cpu_abort(env, "invalid tss");
1244 index = 8 * level + 4;
1245 if ((index + 7) > env->tr.limit)
1246 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1247 return ldq_kernel(env->tr.base + index);
1248}
1249
1250/* 64 bit interrupt */
1251static void do_interrupt64(int intno, int is_int, int error_code,
1252 target_ulong next_eip, int is_hw)
1253{
1254 SegmentCache *dt;
1255 target_ulong ptr;
1256 int type, dpl, selector, cpl, ist;
1257 int has_error_code, new_stack;
1258 uint32_t e1, e2, e3, ss;
1259 target_ulong old_eip, esp, offset;
1260
1261#ifdef VBOX
1262 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1263 cpu_loop_exit();
1264#endif
1265
1266 has_error_code = 0;
1267 if (!is_int && !is_hw)
1268 has_error_code = exeption_has_error_code(intno);
1269 if (is_int)
1270 old_eip = next_eip;
1271 else
1272 old_eip = env->eip;
1273
1274 dt = &env->idt;
1275 if (intno * 16 + 15 > dt->limit)
1276 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1277 ptr = dt->base + intno * 16;
1278 e1 = ldl_kernel(ptr);
1279 e2 = ldl_kernel(ptr + 4);
1280 e3 = ldl_kernel(ptr + 8);
1281 /* check gate type */
1282 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1283 switch(type) {
1284 case 14: /* 386 interrupt gate */
1285 case 15: /* 386 trap gate */
1286 break;
1287 default:
1288 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1289 break;
1290 }
1291 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1292 cpl = env->hflags & HF_CPL_MASK;
1293 /* check privilege if software int */
1294 if (is_int && dpl < cpl)
1295 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1296 /* check valid bit */
1297 if (!(e2 & DESC_P_MASK))
1298 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1299 selector = e1 >> 16;
1300 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1301 ist = e2 & 7;
1302 if ((selector & 0xfffc) == 0)
1303 raise_exception_err(EXCP0D_GPF, 0);
1304
1305 if (load_segment(&e1, &e2, selector) != 0)
1306 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1307 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1308 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1309 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1310 if (dpl > cpl)
1311 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1312 if (!(e2 & DESC_P_MASK))
1313 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1314 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1315 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1316 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1317 /* to inner privilege */
1318 if (ist != 0)
1319 esp = get_rsp_from_tss(ist + 3);
1320 else
1321 esp = get_rsp_from_tss(dpl);
1322 esp &= ~0xfLL; /* align stack */
1323 ss = 0;
1324 new_stack = 1;
1325 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1326 /* to same privilege */
1327 if (env->eflags & VM_MASK)
1328 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1329 new_stack = 0;
1330 if (ist != 0)
1331 esp = get_rsp_from_tss(ist + 3);
1332 else
1333 esp = ESP;
1334 esp &= ~0xfLL; /* align stack */
1335 dpl = cpl;
1336 } else {
1337 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1338 new_stack = 0; /* avoid warning */
1339 esp = 0; /* avoid warning */
1340 }
1341
1342 PUSHQ(esp, env->segs[R_SS].selector);
1343 PUSHQ(esp, ESP);
1344 PUSHQ(esp, compute_eflags());
1345 PUSHQ(esp, env->segs[R_CS].selector);
1346 PUSHQ(esp, old_eip);
1347 if (has_error_code) {
1348 PUSHQ(esp, error_code);
1349 }
1350
1351 if (new_stack) {
1352 ss = 0 | dpl;
1353#ifndef VBOX
1354 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1355#else
1356 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1357#endif
1358 }
1359 ESP = esp;
1360
1361 selector = (selector & ~3) | dpl;
1362 cpu_x86_load_seg_cache(env, R_CS, selector,
1363 get_seg_base(e1, e2),
1364 get_seg_limit(e1, e2),
1365 e2);
1366 cpu_x86_set_cpl(env, dpl);
1367 env->eip = offset;
1368
1369 /* interrupt gate clear IF mask */
1370 if ((type & 1) == 0) {
1371 env->eflags &= ~IF_MASK;
1372 }
1373#ifndef VBOX
1374 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1375#else /* VBOX */
1376 /*
1377 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1378 * gets confused by seemingly changed EFLAGS. See #3491 and
1379 * public bug #2341.
1380 */
1381 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1382#endif /* VBOX */
1383}
1384#endif
1385
1386#ifdef TARGET_X86_64
1387#if defined(CONFIG_USER_ONLY)
1388void helper_syscall(int next_eip_addend)
1389{
1390 env->exception_index = EXCP_SYSCALL;
1391 env->exception_next_eip = env->eip + next_eip_addend;
1392 cpu_loop_exit();
1393}
1394#else
1395void helper_syscall(int next_eip_addend)
1396{
1397 int selector;
1398
1399 if (!(env->efer & MSR_EFER_SCE)) {
1400 raise_exception_err(EXCP06_ILLOP, 0);
1401 }
1402 selector = (env->star >> 32) & 0xffff;
1403 if (env->hflags & HF_LMA_MASK) {
1404 int code64;
1405
1406 ECX = env->eip + next_eip_addend;
1407 env->regs[11] = compute_eflags();
1408
1409 code64 = env->hflags & HF_CS64_MASK;
1410
1411 cpu_x86_set_cpl(env, 0);
1412 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1413 0, 0xffffffff,
1414 DESC_G_MASK | DESC_P_MASK |
1415 DESC_S_MASK |
1416 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1417 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1418 0, 0xffffffff,
1419 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1420 DESC_S_MASK |
1421 DESC_W_MASK | DESC_A_MASK);
1422 env->eflags &= ~env->fmask;
1423 load_eflags(env->eflags, 0);
1424 if (code64)
1425 env->eip = env->lstar;
1426 else
1427 env->eip = env->cstar;
1428 } else {
1429 ECX = (uint32_t)(env->eip + next_eip_addend);
1430
1431 cpu_x86_set_cpl(env, 0);
1432 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1433 0, 0xffffffff,
1434 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1435 DESC_S_MASK |
1436 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1437 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1438 0, 0xffffffff,
1439 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1440 DESC_S_MASK |
1441 DESC_W_MASK | DESC_A_MASK);
1442 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1443 env->eip = (uint32_t)env->star;
1444 }
1445}
1446#endif
1447#endif
1448
1449#ifdef TARGET_X86_64
1450void helper_sysret(int dflag)
1451{
1452 int cpl, selector;
1453
1454 if (!(env->efer & MSR_EFER_SCE)) {
1455 raise_exception_err(EXCP06_ILLOP, 0);
1456 }
1457 cpl = env->hflags & HF_CPL_MASK;
1458 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1459 raise_exception_err(EXCP0D_GPF, 0);
1460 }
1461 selector = (env->star >> 48) & 0xffff;
1462 if (env->hflags & HF_LMA_MASK) {
1463 if (dflag == 2) {
1464 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1465 0, 0xffffffff,
1466 DESC_G_MASK | DESC_P_MASK |
1467 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1468 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1469 DESC_L_MASK);
1470 env->eip = ECX;
1471 } else {
1472 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1473 0, 0xffffffff,
1474 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1475 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1476 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1477 env->eip = (uint32_t)ECX;
1478 }
1479 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1480 0, 0xffffffff,
1481 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1482 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1483 DESC_W_MASK | DESC_A_MASK);
1484 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1485 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1486 cpu_x86_set_cpl(env, 3);
1487 } else {
1488 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1489 0, 0xffffffff,
1490 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1491 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1492 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1493 env->eip = (uint32_t)ECX;
1494 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1495 0, 0xffffffff,
1496 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1497 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1498 DESC_W_MASK | DESC_A_MASK);
1499 env->eflags |= IF_MASK;
1500 cpu_x86_set_cpl(env, 3);
1501 }
1502}
1503#endif
1504
1505#ifdef VBOX
1506
1507/**
1508 * Checks and processes external VMM events.
1509 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1510 */
1511void helper_external_event(void)
1512{
1513# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1514 uintptr_t uSP;
1515# ifdef RT_ARCH_AMD64
1516 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1517# else
1518 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1519# endif
1520 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1521# endif
1522 /* Keep in sync with flags checked by gen_check_external_event() */
1523 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1524 {
1525 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1526 ~CPU_INTERRUPT_EXTERNAL_HARD);
1527 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1528 }
1529 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1530 {
1531 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1532 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1533 cpu_exit(env);
1534 }
1535 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1536 {
1537 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1538 ~CPU_INTERRUPT_EXTERNAL_DMA);
1539 remR3DmaRun(env);
1540 }
1541 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1542 {
1543 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1544 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1545 remR3TimersRun(env);
1546 }
1547 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1548 {
1549 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1550 ~CPU_INTERRUPT_EXTERNAL_HARD);
1551 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1552 }
1553}
1554
1555/* helper for recording call instruction addresses for later scanning */
1556void helper_record_call()
1557{
1558 if ( !(env->state & CPU_RAW_RING0)
1559 && (env->cr[0] & CR0_PG_MASK)
1560 && !(env->eflags & X86_EFL_IF))
1561 remR3RecordCall(env);
1562}
1563
1564#endif /* VBOX */
1565
1566/* real mode interrupt */
1567static void do_interrupt_real(int intno, int is_int, int error_code,
1568 unsigned int next_eip)
1569{
1570 SegmentCache *dt;
1571 target_ulong ptr, ssp;
1572 int selector;
1573 uint32_t offset, esp;
1574 uint32_t old_cs, old_eip;
1575
1576 /* real mode (simpler !) */
1577 dt = &env->idt;
1578#ifndef VBOX
1579 if (intno * 4 + 3 > dt->limit)
1580#else
1581 if ((unsigned)intno * 4 + 3 > dt->limit)
1582#endif
1583 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1584 ptr = dt->base + intno * 4;
1585 offset = lduw_kernel(ptr);
1586 selector = lduw_kernel(ptr + 2);
1587 esp = ESP;
1588 ssp = env->segs[R_SS].base;
1589 if (is_int)
1590 old_eip = next_eip;
1591 else
1592 old_eip = env->eip;
1593 old_cs = env->segs[R_CS].selector;
1594 /* XXX: use SS segment size ? */
1595 PUSHW(ssp, esp, 0xffff, compute_eflags());
1596 PUSHW(ssp, esp, 0xffff, old_cs);
1597 PUSHW(ssp, esp, 0xffff, old_eip);
1598
1599 /* update processor state */
1600 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1601 env->eip = offset;
1602 env->segs[R_CS].selector = selector;
1603 env->segs[R_CS].base = (selector << 4);
1604 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1605}
1606
1607/* fake user mode interrupt */
1608void do_interrupt_user(int intno, int is_int, int error_code,
1609 target_ulong next_eip)
1610{
1611 SegmentCache *dt;
1612 target_ulong ptr;
1613 int dpl, cpl, shift;
1614 uint32_t e2;
1615
1616 dt = &env->idt;
1617 if (env->hflags & HF_LMA_MASK) {
1618 shift = 4;
1619 } else {
1620 shift = 3;
1621 }
1622 ptr = dt->base + (intno << shift);
1623 e2 = ldl_kernel(ptr + 4);
1624
1625 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1626 cpl = env->hflags & HF_CPL_MASK;
1627 /* check privilege if software int */
1628 if (is_int && dpl < cpl)
1629 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1630
1631 /* Since we emulate only user space, we cannot do more than
1632 exiting the emulation with the suitable exception and error
1633 code */
1634 if (is_int)
1635 EIP = next_eip;
1636}
1637
1638#if !defined(CONFIG_USER_ONLY)
1639static void handle_even_inj(int intno, int is_int, int error_code,
1640 int is_hw, int rm)
1641{
1642 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1643 if (!(event_inj & SVM_EVTINJ_VALID)) {
1644 int type;
1645 if (is_int)
1646 type = SVM_EVTINJ_TYPE_SOFT;
1647 else
1648 type = SVM_EVTINJ_TYPE_EXEPT;
1649 event_inj = intno | type | SVM_EVTINJ_VALID;
1650 if (!rm && exeption_has_error_code(intno)) {
1651 event_inj |= SVM_EVTINJ_VALID_ERR;
1652 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1653 }
1654 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1655 }
1656}
1657#endif
1658
1659/*
1660 * Begin execution of an interruption. is_int is TRUE if coming from
1661 * the int instruction. next_eip is the EIP value AFTER the interrupt
1662 * instruction. It is only relevant if is_int is TRUE.
1663 */
1664void do_interrupt(int intno, int is_int, int error_code,
1665 target_ulong next_eip, int is_hw)
1666{
1667 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1668 if ((env->cr[0] & CR0_PE_MASK)) {
1669 static int count;
1670 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1671 count, intno, error_code, is_int,
1672 env->hflags & HF_CPL_MASK,
1673 env->segs[R_CS].selector, EIP,
1674 (int)env->segs[R_CS].base + EIP,
1675 env->segs[R_SS].selector, ESP);
1676 if (intno == 0x0e) {
1677 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1678 } else {
1679 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1680 }
1681 qemu_log("\n");
1682 log_cpu_state(env, X86_DUMP_CCOP);
1683#if 0
1684 {
1685 int i;
1686 uint8_t *ptr;
1687 qemu_log(" code=");
1688 ptr = env->segs[R_CS].base + env->eip;
1689 for(i = 0; i < 16; i++) {
1690 qemu_log(" %02x", ldub(ptr + i));
1691 }
1692 qemu_log("\n");
1693 }
1694#endif
1695 count++;
1696 }
1697 }
1698#ifdef VBOX
1699 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1700 if (is_int) {
1701 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1702 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1703 } else {
1704 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1705 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1706 }
1707 }
1708#endif
1709 if (env->cr[0] & CR0_PE_MASK) {
1710#if !defined(CONFIG_USER_ONLY)
1711 if (env->hflags & HF_SVMI_MASK)
1712 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1713#endif
1714#ifdef TARGET_X86_64
1715 if (env->hflags & HF_LMA_MASK) {
1716 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1717 } else
1718#endif
1719 {
1720#ifdef VBOX
1721 /* int xx *, v86 code and VME enabled? */
1722 if ( (env->eflags & VM_MASK)
1723 && (env->cr[4] & CR4_VME_MASK)
1724 && is_int
1725 && !is_hw
1726 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1727 )
1728 do_soft_interrupt_vme(intno, error_code, next_eip);
1729 else
1730#endif /* VBOX */
1731 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1732 }
1733 } else {
1734#if !defined(CONFIG_USER_ONLY)
1735 if (env->hflags & HF_SVMI_MASK)
1736 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1737#endif
1738 do_interrupt_real(intno, is_int, error_code, next_eip);
1739 }
1740
1741#if !defined(CONFIG_USER_ONLY)
1742 if (env->hflags & HF_SVMI_MASK) {
1743 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1744 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1745 }
1746#endif
1747}
1748
1749/* This should come from sysemu.h - if we could include it here... */
1750void qemu_system_reset_request(void);
1751
1752/*
1753 * Check nested exceptions and change to double or triple fault if
1754 * needed. It should only be called, if this is not an interrupt.
1755 * Returns the new exception number.
1756 */
1757static int check_exception(int intno, int *error_code)
1758{
1759 int first_contributory = env->old_exception == 0 ||
1760 (env->old_exception >= 10 &&
1761 env->old_exception <= 13);
1762 int second_contributory = intno == 0 ||
1763 (intno >= 10 && intno <= 13);
1764
1765 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1766 env->old_exception, intno);
1767
1768#if !defined(CONFIG_USER_ONLY)
1769 if (env->old_exception == EXCP08_DBLE) {
1770 if (env->hflags & HF_SVMI_MASK)
1771 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1772
1773 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1774
1775# ifndef VBOX
1776 qemu_system_reset_request();
1777 return EXCP_HLT;
1778# else
1779 remR3RaiseRC(env->pVM, VINF_EM_TRIPLE_FAULT);
1780 return EXCP_RC;
1781# endif
1782 }
1783#endif
1784
1785 if ((first_contributory && second_contributory)
1786 || (env->old_exception == EXCP0E_PAGE &&
1787 (second_contributory || (intno == EXCP0E_PAGE)))) {
1788 intno = EXCP08_DBLE;
1789 *error_code = 0;
1790 }
1791
1792 if (second_contributory || (intno == EXCP0E_PAGE) ||
1793 (intno == EXCP08_DBLE))
1794 env->old_exception = intno;
1795
1796 return intno;
1797}
1798
1799/*
1800 * Signal an interruption. It is executed in the main CPU loop.
1801 * is_int is TRUE if coming from the int instruction. next_eip is the
1802 * EIP value AFTER the interrupt instruction. It is only relevant if
1803 * is_int is TRUE.
1804 */
1805static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1806 int next_eip_addend)
1807{
1808#if defined(VBOX) && defined(DEBUG)
1809 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1810#endif
1811 if (!is_int) {
1812 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1813 intno = check_exception(intno, &error_code);
1814 } else {
1815 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1816 }
1817
1818 env->exception_index = intno;
1819 env->error_code = error_code;
1820 env->exception_is_int = is_int;
1821 env->exception_next_eip = env->eip + next_eip_addend;
1822 cpu_loop_exit();
1823}
1824
1825/* shortcuts to generate exceptions */
1826
1827void raise_exception_err(int exception_index, int error_code)
1828{
1829 raise_interrupt(exception_index, 0, error_code, 0);
1830}
1831
1832void raise_exception(int exception_index)
1833{
1834 raise_interrupt(exception_index, 0, 0, 0);
1835}
1836
1837void raise_exception_env(int exception_index, CPUState *nenv)
1838{
1839 env = nenv;
1840 raise_exception(exception_index);
1841}
1842/* SMM support */
1843
1844#if defined(CONFIG_USER_ONLY)
1845
1846void do_smm_enter(void)
1847{
1848}
1849
1850void helper_rsm(void)
1851{
1852}
1853
1854#else
1855
1856#ifdef TARGET_X86_64
1857#define SMM_REVISION_ID 0x00020064
1858#else
1859#define SMM_REVISION_ID 0x00020000
1860#endif
1861
1862void do_smm_enter(void)
1863{
1864 target_ulong sm_state;
1865 SegmentCache *dt;
1866 int i, offset;
1867
1868 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1869 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1870
1871 env->hflags |= HF_SMM_MASK;
1872 cpu_smm_update(env);
1873
1874 sm_state = env->smbase + 0x8000;
1875
1876#ifdef TARGET_X86_64
1877 for(i = 0; i < 6; i++) {
1878 dt = &env->segs[i];
1879 offset = 0x7e00 + i * 16;
1880 stw_phys(sm_state + offset, dt->selector);
1881 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1882 stl_phys(sm_state + offset + 4, dt->limit);
1883 stq_phys(sm_state + offset + 8, dt->base);
1884 }
1885
1886 stq_phys(sm_state + 0x7e68, env->gdt.base);
1887 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1888
1889 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1890 stq_phys(sm_state + 0x7e78, env->ldt.base);
1891 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1892 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1893
1894 stq_phys(sm_state + 0x7e88, env->idt.base);
1895 stl_phys(sm_state + 0x7e84, env->idt.limit);
1896
1897 stw_phys(sm_state + 0x7e90, env->tr.selector);
1898 stq_phys(sm_state + 0x7e98, env->tr.base);
1899 stl_phys(sm_state + 0x7e94, env->tr.limit);
1900 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1901
1902 stq_phys(sm_state + 0x7ed0, env->efer);
1903
1904 stq_phys(sm_state + 0x7ff8, EAX);
1905 stq_phys(sm_state + 0x7ff0, ECX);
1906 stq_phys(sm_state + 0x7fe8, EDX);
1907 stq_phys(sm_state + 0x7fe0, EBX);
1908 stq_phys(sm_state + 0x7fd8, ESP);
1909 stq_phys(sm_state + 0x7fd0, EBP);
1910 stq_phys(sm_state + 0x7fc8, ESI);
1911 stq_phys(sm_state + 0x7fc0, EDI);
1912 for(i = 8; i < 16; i++)
1913 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1914 stq_phys(sm_state + 0x7f78, env->eip);
1915 stl_phys(sm_state + 0x7f70, compute_eflags());
1916 stl_phys(sm_state + 0x7f68, env->dr[6]);
1917 stl_phys(sm_state + 0x7f60, env->dr[7]);
1918
1919 stl_phys(sm_state + 0x7f48, env->cr[4]);
1920 stl_phys(sm_state + 0x7f50, env->cr[3]);
1921 stl_phys(sm_state + 0x7f58, env->cr[0]);
1922
1923 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1924 stl_phys(sm_state + 0x7f00, env->smbase);
1925#else
1926 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1927 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1928 stl_phys(sm_state + 0x7ff4, compute_eflags());
1929 stl_phys(sm_state + 0x7ff0, env->eip);
1930 stl_phys(sm_state + 0x7fec, EDI);
1931 stl_phys(sm_state + 0x7fe8, ESI);
1932 stl_phys(sm_state + 0x7fe4, EBP);
1933 stl_phys(sm_state + 0x7fe0, ESP);
1934 stl_phys(sm_state + 0x7fdc, EBX);
1935 stl_phys(sm_state + 0x7fd8, EDX);
1936 stl_phys(sm_state + 0x7fd4, ECX);
1937 stl_phys(sm_state + 0x7fd0, EAX);
1938 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1939 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1940
1941 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1942 stl_phys(sm_state + 0x7f64, env->tr.base);
1943 stl_phys(sm_state + 0x7f60, env->tr.limit);
1944 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1945
1946 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1947 stl_phys(sm_state + 0x7f80, env->ldt.base);
1948 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1949 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1950
1951 stl_phys(sm_state + 0x7f74, env->gdt.base);
1952 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1953
1954 stl_phys(sm_state + 0x7f58, env->idt.base);
1955 stl_phys(sm_state + 0x7f54, env->idt.limit);
1956
1957 for(i = 0; i < 6; i++) {
1958 dt = &env->segs[i];
1959 if (i < 3)
1960 offset = 0x7f84 + i * 12;
1961 else
1962 offset = 0x7f2c + (i - 3) * 12;
1963 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1964 stl_phys(sm_state + offset + 8, dt->base);
1965 stl_phys(sm_state + offset + 4, dt->limit);
1966 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1967 }
1968 stl_phys(sm_state + 0x7f14, env->cr[4]);
1969
1970 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1971 stl_phys(sm_state + 0x7ef8, env->smbase);
1972#endif
1973 /* init SMM cpu state */
1974
1975#ifdef TARGET_X86_64
1976 cpu_load_efer(env, 0);
1977#endif
1978 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1979 env->eip = 0x00008000;
1980 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1981 0xffffffff, 0);
1982 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1983 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1984 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1985 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1986 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1987
1988 cpu_x86_update_cr0(env,
1989 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1990 cpu_x86_update_cr4(env, 0);
1991 env->dr[7] = 0x00000400;
1992 CC_OP = CC_OP_EFLAGS;
1993}
1994
1995void helper_rsm(void)
1996{
1997#ifdef VBOX
1998 cpu_abort(env, "helper_rsm");
1999#else /* !VBOX */
2000 target_ulong sm_state;
2001 int i, offset;
2002 uint32_t val;
2003
2004 sm_state = env->smbase + 0x8000;
2005#ifdef TARGET_X86_64
2006 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
2007
2008 for(i = 0; i < 6; i++) {
2009 offset = 0x7e00 + i * 16;
2010 cpu_x86_load_seg_cache(env, i,
2011 lduw_phys(sm_state + offset),
2012 ldq_phys(sm_state + offset + 8),
2013 ldl_phys(sm_state + offset + 4),
2014 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2015 }
2016
2017 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2018 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2019
2020 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2021 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2022 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2023 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2024#ifdef VBOX
2025 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2026 env->ldt.newselector = 0;
2027#endif
2028
2029 env->idt.base = ldq_phys(sm_state + 0x7e88);
2030 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2031
2032 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2033 env->tr.base = ldq_phys(sm_state + 0x7e98);
2034 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2035 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2036#ifdef VBOX
2037 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2038 env->tr.newselector = 0;
2039#endif
2040
2041 EAX = ldq_phys(sm_state + 0x7ff8);
2042 ECX = ldq_phys(sm_state + 0x7ff0);
2043 EDX = ldq_phys(sm_state + 0x7fe8);
2044 EBX = ldq_phys(sm_state + 0x7fe0);
2045 ESP = ldq_phys(sm_state + 0x7fd8);
2046 EBP = ldq_phys(sm_state + 0x7fd0);
2047 ESI = ldq_phys(sm_state + 0x7fc8);
2048 EDI = ldq_phys(sm_state + 0x7fc0);
2049 for(i = 8; i < 16; i++)
2050 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2051 env->eip = ldq_phys(sm_state + 0x7f78);
2052 load_eflags(ldl_phys(sm_state + 0x7f70),
2053 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2054 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2055 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2056
2057 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2058 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2059 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2060
2061 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2062 if (val & 0x20000) {
2063 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2064 }
2065#else
2066 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2067 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2068 load_eflags(ldl_phys(sm_state + 0x7ff4),
2069 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2070 env->eip = ldl_phys(sm_state + 0x7ff0);
2071 EDI = ldl_phys(sm_state + 0x7fec);
2072 ESI = ldl_phys(sm_state + 0x7fe8);
2073 EBP = ldl_phys(sm_state + 0x7fe4);
2074 ESP = ldl_phys(sm_state + 0x7fe0);
2075 EBX = ldl_phys(sm_state + 0x7fdc);
2076 EDX = ldl_phys(sm_state + 0x7fd8);
2077 ECX = ldl_phys(sm_state + 0x7fd4);
2078 EAX = ldl_phys(sm_state + 0x7fd0);
2079 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2080 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2081
2082 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2083 env->tr.base = ldl_phys(sm_state + 0x7f64);
2084 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2085 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2086#ifdef VBOX
2087 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2088 env->tr.newselector = 0;
2089#endif
2090
2091 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2092 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2093 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2094 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2095#ifdef VBOX
2096 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2097 env->ldt.newselector = 0;
2098#endif
2099
2100 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2101 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2102
2103 env->idt.base = ldl_phys(sm_state + 0x7f58);
2104 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2105
2106 for(i = 0; i < 6; i++) {
2107 if (i < 3)
2108 offset = 0x7f84 + i * 12;
2109 else
2110 offset = 0x7f2c + (i - 3) * 12;
2111 cpu_x86_load_seg_cache(env, i,
2112 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2113 ldl_phys(sm_state + offset + 8),
2114 ldl_phys(sm_state + offset + 4),
2115 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2116 }
2117 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2118
2119 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2120 if (val & 0x20000) {
2121 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2122 }
2123#endif
2124 CC_OP = CC_OP_EFLAGS;
2125 env->hflags &= ~HF_SMM_MASK;
2126 cpu_smm_update(env);
2127
2128 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2129 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2130#endif /* !VBOX */
2131}
2132
2133#endif /* !CONFIG_USER_ONLY */
2134
2135
2136/* division, flags are undefined */
2137
2138void helper_divb_AL(target_ulong t0)
2139{
2140 unsigned int num, den, q, r;
2141
2142 num = (EAX & 0xffff);
2143 den = (t0 & 0xff);
2144 if (den == 0) {
2145 raise_exception(EXCP00_DIVZ);
2146 }
2147 q = (num / den);
2148 if (q > 0xff)
2149 raise_exception(EXCP00_DIVZ);
2150 q &= 0xff;
2151 r = (num % den) & 0xff;
2152 EAX = (EAX & ~0xffff) | (r << 8) | q;
2153}
2154
2155void helper_idivb_AL(target_ulong t0)
2156{
2157 int num, den, q, r;
2158
2159 num = (int16_t)EAX;
2160 den = (int8_t)t0;
2161 if (den == 0) {
2162 raise_exception(EXCP00_DIVZ);
2163 }
2164 q = (num / den);
2165 if (q != (int8_t)q)
2166 raise_exception(EXCP00_DIVZ);
2167 q &= 0xff;
2168 r = (num % den) & 0xff;
2169 EAX = (EAX & ~0xffff) | (r << 8) | q;
2170}
2171
2172void helper_divw_AX(target_ulong t0)
2173{
2174 unsigned int num, den, q, r;
2175
2176 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2177 den = (t0 & 0xffff);
2178 if (den == 0) {
2179 raise_exception(EXCP00_DIVZ);
2180 }
2181 q = (num / den);
2182 if (q > 0xffff)
2183 raise_exception(EXCP00_DIVZ);
2184 q &= 0xffff;
2185 r = (num % den) & 0xffff;
2186 EAX = (EAX & ~0xffff) | q;
2187 EDX = (EDX & ~0xffff) | r;
2188}
2189
2190void helper_idivw_AX(target_ulong t0)
2191{
2192 int num, den, q, r;
2193
2194 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2195 den = (int16_t)t0;
2196 if (den == 0) {
2197 raise_exception(EXCP00_DIVZ);
2198 }
2199 q = (num / den);
2200 if (q != (int16_t)q)
2201 raise_exception(EXCP00_DIVZ);
2202 q &= 0xffff;
2203 r = (num % den) & 0xffff;
2204 EAX = (EAX & ~0xffff) | q;
2205 EDX = (EDX & ~0xffff) | r;
2206}
2207
2208void helper_divl_EAX(target_ulong t0)
2209{
2210 unsigned int den, r;
2211 uint64_t num, q;
2212
2213 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2214 den = t0;
2215 if (den == 0) {
2216 raise_exception(EXCP00_DIVZ);
2217 }
2218 q = (num / den);
2219 r = (num % den);
2220 if (q > 0xffffffff)
2221 raise_exception(EXCP00_DIVZ);
2222 EAX = (uint32_t)q;
2223 EDX = (uint32_t)r;
2224}
2225
2226void helper_idivl_EAX(target_ulong t0)
2227{
2228 int den, r;
2229 int64_t num, q;
2230
2231 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2232 den = t0;
2233 if (den == 0) {
2234 raise_exception(EXCP00_DIVZ);
2235 }
2236 q = (num / den);
2237 r = (num % den);
2238 if (q != (int32_t)q)
2239 raise_exception(EXCP00_DIVZ);
2240 EAX = (uint32_t)q;
2241 EDX = (uint32_t)r;
2242}
2243
2244/* bcd */
2245
2246/* XXX: exception */
2247void helper_aam(int base)
2248{
2249 int al, ah;
2250 al = EAX & 0xff;
2251 ah = al / base;
2252 al = al % base;
2253 EAX = (EAX & ~0xffff) | al | (ah << 8);
2254 CC_DST = al;
2255}
2256
2257void helper_aad(int base)
2258{
2259 int al, ah;
2260 al = EAX & 0xff;
2261 ah = (EAX >> 8) & 0xff;
2262 al = ((ah * base) + al) & 0xff;
2263 EAX = (EAX & ~0xffff) | al;
2264 CC_DST = al;
2265}
2266
2267void helper_aaa(void)
2268{
2269 int icarry;
2270 int al, ah, af;
2271 int eflags;
2272
2273 eflags = helper_cc_compute_all(CC_OP);
2274 af = eflags & CC_A;
2275 al = EAX & 0xff;
2276 ah = (EAX >> 8) & 0xff;
2277
2278 icarry = (al > 0xf9);
2279 if (((al & 0x0f) > 9 ) || af) {
2280 al = (al + 6) & 0x0f;
2281 ah = (ah + 1 + icarry) & 0xff;
2282 eflags |= CC_C | CC_A;
2283 } else {
2284 eflags &= ~(CC_C | CC_A);
2285 al &= 0x0f;
2286 }
2287 EAX = (EAX & ~0xffff) | al | (ah << 8);
2288 CC_SRC = eflags;
2289}
2290
2291void helper_aas(void)
2292{
2293 int icarry;
2294 int al, ah, af;
2295 int eflags;
2296
2297 eflags = helper_cc_compute_all(CC_OP);
2298 af = eflags & CC_A;
2299 al = EAX & 0xff;
2300 ah = (EAX >> 8) & 0xff;
2301
2302 icarry = (al < 6);
2303 if (((al & 0x0f) > 9 ) || af) {
2304 al = (al - 6) & 0x0f;
2305 ah = (ah - 1 - icarry) & 0xff;
2306 eflags |= CC_C | CC_A;
2307 } else {
2308 eflags &= ~(CC_C | CC_A);
2309 al &= 0x0f;
2310 }
2311 EAX = (EAX & ~0xffff) | al | (ah << 8);
2312 CC_SRC = eflags;
2313}
2314
2315void helper_daa(void)
2316{
2317 int al, af, cf;
2318 int eflags;
2319
2320 eflags = helper_cc_compute_all(CC_OP);
2321 cf = eflags & CC_C;
2322 af = eflags & CC_A;
2323 al = EAX & 0xff;
2324
2325 eflags = 0;
2326 if (((al & 0x0f) > 9 ) || af) {
2327 al = (al + 6) & 0xff;
2328 eflags |= CC_A;
2329 }
2330 if ((al > 0x9f) || cf) {
2331 al = (al + 0x60) & 0xff;
2332 eflags |= CC_C;
2333 }
2334 EAX = (EAX & ~0xff) | al;
2335 /* well, speed is not an issue here, so we compute the flags by hand */
2336 eflags |= (al == 0) << 6; /* zf */
2337 eflags |= parity_table[al]; /* pf */
2338 eflags |= (al & 0x80); /* sf */
2339 CC_SRC = eflags;
2340}
2341
2342void helper_das(void)
2343{
2344 int al, al1, af, cf;
2345 int eflags;
2346
2347 eflags = helper_cc_compute_all(CC_OP);
2348 cf = eflags & CC_C;
2349 af = eflags & CC_A;
2350 al = EAX & 0xff;
2351
2352 eflags = 0;
2353 al1 = al;
2354 if (((al & 0x0f) > 9 ) || af) {
2355 eflags |= CC_A;
2356 if (al < 6 || cf)
2357 eflags |= CC_C;
2358 al = (al - 6) & 0xff;
2359 }
2360 if ((al1 > 0x99) || cf) {
2361 al = (al - 0x60) & 0xff;
2362 eflags |= CC_C;
2363 }
2364 EAX = (EAX & ~0xff) | al;
2365 /* well, speed is not an issue here, so we compute the flags by hand */
2366 eflags |= (al == 0) << 6; /* zf */
2367 eflags |= parity_table[al]; /* pf */
2368 eflags |= (al & 0x80); /* sf */
2369 CC_SRC = eflags;
2370}
2371
2372void helper_into(int next_eip_addend)
2373{
2374 int eflags;
2375 eflags = helper_cc_compute_all(CC_OP);
2376 if (eflags & CC_O) {
2377 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2378 }
2379}
2380
2381void helper_cmpxchg8b(target_ulong a0)
2382{
2383 uint64_t d;
2384 int eflags;
2385
2386 eflags = helper_cc_compute_all(CC_OP);
2387 d = ldq(a0);
2388 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2389 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2390 eflags |= CC_Z;
2391 } else {
2392 /* always do the store */
2393 stq(a0, d);
2394 EDX = (uint32_t)(d >> 32);
2395 EAX = (uint32_t)d;
2396 eflags &= ~CC_Z;
2397 }
2398 CC_SRC = eflags;
2399}
2400
2401#ifdef TARGET_X86_64
2402void helper_cmpxchg16b(target_ulong a0)
2403{
2404 uint64_t d0, d1;
2405 int eflags;
2406
2407 if ((a0 & 0xf) != 0)
2408 raise_exception(EXCP0D_GPF);
2409 eflags = helper_cc_compute_all(CC_OP);
2410 d0 = ldq(a0);
2411 d1 = ldq(a0 + 8);
2412 if (d0 == EAX && d1 == EDX) {
2413 stq(a0, EBX);
2414 stq(a0 + 8, ECX);
2415 eflags |= CC_Z;
2416 } else {
2417 /* always do the store */
2418 stq(a0, d0);
2419 stq(a0 + 8, d1);
2420 EDX = d1;
2421 EAX = d0;
2422 eflags &= ~CC_Z;
2423 }
2424 CC_SRC = eflags;
2425}
2426#endif
2427
2428void helper_single_step(void)
2429{
2430#ifndef CONFIG_USER_ONLY
2431 check_hw_breakpoints(env, 1);
2432 env->dr[6] |= DR6_BS;
2433#endif
2434 raise_exception(EXCP01_DB);
2435}
2436
2437void helper_cpuid(void)
2438{
2439 uint32_t eax, ebx, ecx, edx;
2440
2441 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2442
2443 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2444 EAX = eax;
2445 EBX = ebx;
2446 ECX = ecx;
2447 EDX = edx;
2448}
2449
2450void helper_enter_level(int level, int data32, target_ulong t1)
2451{
2452 target_ulong ssp;
2453 uint32_t esp_mask, esp, ebp;
2454
2455 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2456 ssp = env->segs[R_SS].base;
2457 ebp = EBP;
2458 esp = ESP;
2459 if (data32) {
2460 /* 32 bit */
2461 esp -= 4;
2462 while (--level) {
2463 esp -= 4;
2464 ebp -= 4;
2465 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2466 }
2467 esp -= 4;
2468 stl(ssp + (esp & esp_mask), t1);
2469 } else {
2470 /* 16 bit */
2471 esp -= 2;
2472 while (--level) {
2473 esp -= 2;
2474 ebp -= 2;
2475 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2476 }
2477 esp -= 2;
2478 stw(ssp + (esp & esp_mask), t1);
2479 }
2480}
2481
2482#ifdef TARGET_X86_64
2483void helper_enter64_level(int level, int data64, target_ulong t1)
2484{
2485 target_ulong esp, ebp;
2486 ebp = EBP;
2487 esp = ESP;
2488
2489 if (data64) {
2490 /* 64 bit */
2491 esp -= 8;
2492 while (--level) {
2493 esp -= 8;
2494 ebp -= 8;
2495 stq(esp, ldq(ebp));
2496 }
2497 esp -= 8;
2498 stq(esp, t1);
2499 } else {
2500 /* 16 bit */
2501 esp -= 2;
2502 while (--level) {
2503 esp -= 2;
2504 ebp -= 2;
2505 stw(esp, lduw(ebp));
2506 }
2507 esp -= 2;
2508 stw(esp, t1);
2509 }
2510}
2511#endif
2512
2513void helper_lldt(int selector)
2514{
2515 SegmentCache *dt;
2516 uint32_t e1, e2;
2517#ifndef VBOX
2518 int index, entry_limit;
2519#else
2520 unsigned int index, entry_limit;
2521#endif
2522 target_ulong ptr;
2523
2524#ifdef VBOX
2525 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2526 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2527#endif
2528
2529 selector &= 0xffff;
2530 if ((selector & 0xfffc) == 0) {
2531 /* XXX: NULL selector case: invalid LDT */
2532 env->ldt.base = 0;
2533 env->ldt.limit = 0;
2534#ifdef VBOX
2535 env->ldt.flags = DESC_INTEL_UNUSABLE;
2536 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2537 env->ldt.newselector = 0;
2538#endif
2539 } else {
2540 if (selector & 0x4)
2541 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2542 dt = &env->gdt;
2543 index = selector & ~7;
2544#ifdef TARGET_X86_64
2545 if (env->hflags & HF_LMA_MASK)
2546 entry_limit = 15;
2547 else
2548#endif
2549 entry_limit = 7;
2550 if ((index + entry_limit) > dt->limit)
2551 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2552 ptr = dt->base + index;
2553 e1 = ldl_kernel(ptr);
2554 e2 = ldl_kernel(ptr + 4);
2555 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2556 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2557 if (!(e2 & DESC_P_MASK))
2558 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2559#ifdef TARGET_X86_64
2560 if (env->hflags & HF_LMA_MASK) {
2561 uint32_t e3;
2562 e3 = ldl_kernel(ptr + 8);
2563 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2564 env->ldt.base |= (target_ulong)e3 << 32;
2565 } else
2566#endif
2567 {
2568 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2569 }
2570 }
2571 env->ldt.selector = selector;
2572#ifdef VBOX
2573 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2574 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2575#endif
2576}
2577
2578void helper_ltr(int selector)
2579{
2580 SegmentCache *dt;
2581 uint32_t e1, e2;
2582#ifndef VBOX
2583 int index, type, entry_limit;
2584#else
2585 unsigned int index;
2586 int type, entry_limit;
2587#endif
2588 target_ulong ptr;
2589
2590#ifdef VBOX
2591 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2592 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2593 env->tr.flags, (RTSEL)(selector & 0xffff)));
2594#endif
2595 selector &= 0xffff;
2596 if ((selector & 0xfffc) == 0) {
2597 /* NULL selector case: invalid TR */
2598#ifdef VBOX
2599 raise_exception_err(EXCP0A_TSS, 0);
2600#else
2601 env->tr.base = 0;
2602 env->tr.limit = 0;
2603 env->tr.flags = 0;
2604#endif
2605 } else {
2606 if (selector & 0x4)
2607 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2608 dt = &env->gdt;
2609 index = selector & ~7;
2610#ifdef TARGET_X86_64
2611 if (env->hflags & HF_LMA_MASK)
2612 entry_limit = 15;
2613 else
2614#endif
2615 entry_limit = 7;
2616 if ((index + entry_limit) > dt->limit)
2617 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2618 ptr = dt->base + index;
2619 e1 = ldl_kernel(ptr);
2620 e2 = ldl_kernel(ptr + 4);
2621 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2622 if ((e2 & DESC_S_MASK) ||
2623 (type != 1 && type != 9))
2624 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2625 if (!(e2 & DESC_P_MASK))
2626 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2627#ifdef TARGET_X86_64
2628 if (env->hflags & HF_LMA_MASK) {
2629 uint32_t e3, e4;
2630 e3 = ldl_kernel(ptr + 8);
2631 e4 = ldl_kernel(ptr + 12);
2632 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 load_seg_cache_raw_dt(&env->tr, e1, e2);
2635 env->tr.base |= (target_ulong)e3 << 32;
2636 } else
2637#endif
2638 {
2639 load_seg_cache_raw_dt(&env->tr, e1, e2);
2640 }
2641 e2 |= DESC_TSS_BUSY_MASK;
2642 stl_kernel(ptr + 4, e2);
2643 }
2644 env->tr.selector = selector;
2645#ifdef VBOX
2646 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2647 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2648 env->tr.flags, (RTSEL)(selector & 0xffff)));
2649#endif
2650}
2651
2652/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2653void helper_load_seg(int seg_reg, int selector)
2654{
2655 uint32_t e1, e2;
2656 int cpl, dpl, rpl;
2657 SegmentCache *dt;
2658#ifndef VBOX
2659 int index;
2660#else
2661 unsigned int index;
2662#endif
2663 target_ulong ptr;
2664
2665 selector &= 0xffff;
2666 cpl = env->hflags & HF_CPL_MASK;
2667#ifdef VBOX
2668
2669 /* Trying to load a selector with CPL=1? */
2670 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2671 {
2672 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2673 selector = selector & 0xfffc;
2674 }
2675#endif /* VBOX */
2676 if ((selector & 0xfffc) == 0) {
2677 /* null selector case */
2678#ifndef VBOX
2679 if (seg_reg == R_SS
2680#ifdef TARGET_X86_64
2681 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2682#endif
2683 )
2684 raise_exception_err(EXCP0D_GPF, 0);
2685 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2686#else
2687 if (seg_reg == R_SS) {
2688 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2689 raise_exception_err(EXCP0D_GPF, 0);
2690 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2691 } else {
2692 e2 = DESC_INTEL_UNUSABLE;
2693 }
2694 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2695#endif
2696 } else {
2697
2698 if (selector & 0x4)
2699 dt = &env->ldt;
2700 else
2701 dt = &env->gdt;
2702 index = selector & ~7;
2703 if ((index + 7) > dt->limit)
2704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2705 ptr = dt->base + index;
2706 e1 = ldl_kernel(ptr);
2707 e2 = ldl_kernel(ptr + 4);
2708
2709 if (!(e2 & DESC_S_MASK))
2710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2711 rpl = selector & 3;
2712 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2713 if (seg_reg == R_SS) {
2714 /* must be writable segment */
2715 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2716 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2717 if (rpl != cpl || dpl != cpl)
2718 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2719 } else {
2720 /* must be readable segment */
2721 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2722 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2723
2724 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2725 /* if not conforming code, test rights */
2726 if (dpl < cpl || dpl < rpl)
2727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2728 }
2729 }
2730
2731 if (!(e2 & DESC_P_MASK)) {
2732 if (seg_reg == R_SS)
2733 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2734 else
2735 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2736 }
2737
2738 /* set the access bit if not already set */
2739 if (!(e2 & DESC_A_MASK)) {
2740 e2 |= DESC_A_MASK;
2741 stl_kernel(ptr + 4, e2);
2742 }
2743
2744 cpu_x86_load_seg_cache(env, seg_reg, selector,
2745 get_seg_base(e1, e2),
2746 get_seg_limit(e1, e2),
2747 e2);
2748#if 0
2749 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2750 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2751#endif
2752 }
2753}
2754
2755/* protected mode jump */
2756void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2757 int next_eip_addend)
2758{
2759 int gate_cs, type;
2760 uint32_t e1, e2, cpl, dpl, rpl, limit;
2761 target_ulong next_eip;
2762
2763#ifdef VBOX /** @todo Why do we do this? */
2764 e1 = e2 = 0;
2765#endif
2766 if ((new_cs & 0xfffc) == 0)
2767 raise_exception_err(EXCP0D_GPF, 0);
2768 if (load_segment(&e1, &e2, new_cs) != 0)
2769 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2770 cpl = env->hflags & HF_CPL_MASK;
2771 if (e2 & DESC_S_MASK) {
2772 if (!(e2 & DESC_CS_MASK))
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2775 if (e2 & DESC_C_MASK) {
2776 /* conforming code segment */
2777 if (dpl > cpl)
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 } else {
2780 /* non conforming code segment */
2781 rpl = new_cs & 3;
2782 if (rpl > cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 if (dpl != cpl)
2785 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2786 }
2787 if (!(e2 & DESC_P_MASK))
2788 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2789 limit = get_seg_limit(e1, e2);
2790 if (new_eip > limit &&
2791 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2792 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2793#ifdef VBOX
2794 if (!(e2 & DESC_A_MASK))
2795 e2 = set_segment_accessed(new_cs, e2);
2796#endif
2797 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2798 get_seg_base(e1, e2), limit, e2);
2799 EIP = new_eip;
2800 } else {
2801 /* jump to call or task gate */
2802 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2803 rpl = new_cs & 3;
2804 cpl = env->hflags & HF_CPL_MASK;
2805 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2806 switch(type) {
2807 case 1: /* 286 TSS */
2808 case 9: /* 386 TSS */
2809 case 5: /* task gate */
2810 if (dpl < cpl || dpl < rpl)
2811 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2812 next_eip = env->eip + next_eip_addend;
2813 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2814 CC_OP = CC_OP_EFLAGS;
2815 break;
2816 case 4: /* 286 call gate */
2817 case 12: /* 386 call gate */
2818 if ((dpl < cpl) || (dpl < rpl))
2819 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2820 if (!(e2 & DESC_P_MASK))
2821 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2822 gate_cs = e1 >> 16;
2823 new_eip = (e1 & 0xffff);
2824 if (type == 12)
2825 new_eip |= (e2 & 0xffff0000);
2826 if (load_segment(&e1, &e2, gate_cs) != 0)
2827 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2828 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2829 /* must be code segment */
2830 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2831 (DESC_S_MASK | DESC_CS_MASK)))
2832 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2833 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2834 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2835 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2836 if (!(e2 & DESC_P_MASK))
2837#ifdef VBOX /* See page 3-514 of 253666.pdf */
2838 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2839#else
2840 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2841#endif
2842 limit = get_seg_limit(e1, e2);
2843 if (new_eip > limit)
2844 raise_exception_err(EXCP0D_GPF, 0);
2845 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2846 get_seg_base(e1, e2), limit, e2);
2847 EIP = new_eip;
2848 break;
2849 default:
2850 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2851 break;
2852 }
2853 }
2854}
2855
2856/* real mode call */
2857void helper_lcall_real(int new_cs, target_ulong new_eip1,
2858 int shift, int next_eip)
2859{
2860 int new_eip;
2861 uint32_t esp, esp_mask;
2862 target_ulong ssp;
2863
2864 new_eip = new_eip1;
2865 esp = ESP;
2866 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2867 ssp = env->segs[R_SS].base;
2868 if (shift) {
2869 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2870 PUSHL(ssp, esp, esp_mask, next_eip);
2871 } else {
2872 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2873 PUSHW(ssp, esp, esp_mask, next_eip);
2874 }
2875
2876 SET_ESP(esp, esp_mask);
2877 env->eip = new_eip;
2878 env->segs[R_CS].selector = new_cs;
2879 env->segs[R_CS].base = (new_cs << 4);
2880}
2881
2882/* protected mode call */
2883void helper_lcall_protected(int new_cs, target_ulong new_eip,
2884 int shift, int next_eip_addend)
2885{
2886 int new_stack, i;
2887 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2888 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2889 uint32_t val, limit, old_sp_mask;
2890 target_ulong ssp, old_ssp, next_eip;
2891
2892#ifdef VBOX /** @todo Why do we do this? */
2893 e1 = e2 = 0;
2894#endif
2895 next_eip = env->eip + next_eip_addend;
2896 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2897 LOG_PCALL_STATE(env);
2898 if ((new_cs & 0xfffc) == 0)
2899 raise_exception_err(EXCP0D_GPF, 0);
2900 if (load_segment(&e1, &e2, new_cs) != 0)
2901 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2902 cpl = env->hflags & HF_CPL_MASK;
2903 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2904 if (e2 & DESC_S_MASK) {
2905 if (!(e2 & DESC_CS_MASK))
2906 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2908 if (e2 & DESC_C_MASK) {
2909 /* conforming code segment */
2910 if (dpl > cpl)
2911 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2912 } else {
2913 /* non conforming code segment */
2914 rpl = new_cs & 3;
2915 if (rpl > cpl)
2916 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2917 if (dpl != cpl)
2918 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2919 }
2920 if (!(e2 & DESC_P_MASK))
2921 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2922#ifdef VBOX
2923 if (!(e2 & DESC_A_MASK))
2924 e2 = set_segment_accessed(new_cs, e2);
2925#endif
2926
2927#ifdef TARGET_X86_64
2928 /* XXX: check 16/32 bit cases in long mode */
2929 if (shift == 2) {
2930 target_ulong rsp;
2931 /* 64 bit case */
2932 rsp = ESP;
2933 PUSHQ(rsp, env->segs[R_CS].selector);
2934 PUSHQ(rsp, next_eip);
2935 /* from this point, not restartable */
2936 ESP = rsp;
2937 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2938 get_seg_base(e1, e2),
2939 get_seg_limit(e1, e2), e2);
2940 EIP = new_eip;
2941 } else
2942#endif
2943 {
2944 sp = ESP;
2945 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2946 ssp = env->segs[R_SS].base;
2947 if (shift) {
2948 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2949 PUSHL(ssp, sp, sp_mask, next_eip);
2950 } else {
2951 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2952 PUSHW(ssp, sp, sp_mask, next_eip);
2953 }
2954
2955 limit = get_seg_limit(e1, e2);
2956 if (new_eip > limit)
2957 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2958 /* from this point, not restartable */
2959 SET_ESP(sp, sp_mask);
2960 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2961 get_seg_base(e1, e2), limit, e2);
2962 EIP = new_eip;
2963 }
2964 } else {
2965 /* check gate type */
2966 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2967 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2968 rpl = new_cs & 3;
2969 switch(type) {
2970 case 1: /* available 286 TSS */
2971 case 9: /* available 386 TSS */
2972 case 5: /* task gate */
2973 if (dpl < cpl || dpl < rpl)
2974 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2975 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2976 CC_OP = CC_OP_EFLAGS;
2977 return;
2978 case 4: /* 286 call gate */
2979 case 12: /* 386 call gate */
2980 break;
2981 default:
2982 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2983 break;
2984 }
2985 shift = type >> 3;
2986
2987 if (dpl < cpl || dpl < rpl)
2988 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2989 /* check valid bit */
2990 if (!(e2 & DESC_P_MASK))
2991 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2992 selector = e1 >> 16;
2993 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2994 param_count = e2 & 0x1f;
2995 if ((selector & 0xfffc) == 0)
2996 raise_exception_err(EXCP0D_GPF, 0);
2997
2998 if (load_segment(&e1, &e2, selector) != 0)
2999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3000 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3001 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3002 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3003 if (dpl > cpl)
3004 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3005 if (!(e2 & DESC_P_MASK))
3006 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3007
3008 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3009 /* to inner privilege */
3010 get_ss_esp_from_tss(&ss, &sp, dpl);
3011 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3012 ss, sp, param_count, ESP);
3013 if ((ss & 0xfffc) == 0)
3014 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3015 if ((ss & 3) != dpl)
3016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3017 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3018 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3019 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3020 if (ss_dpl != dpl)
3021 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3022 if (!(ss_e2 & DESC_S_MASK) ||
3023 (ss_e2 & DESC_CS_MASK) ||
3024 !(ss_e2 & DESC_W_MASK))
3025 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3026 if (!(ss_e2 & DESC_P_MASK))
3027#ifdef VBOX /* See page 3-99 of 253666.pdf */
3028 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3029#else
3030 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3031#endif
3032
3033 // push_size = ((param_count * 2) + 8) << shift;
3034
3035 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3036 old_ssp = env->segs[R_SS].base;
3037
3038 sp_mask = get_sp_mask(ss_e2);
3039 ssp = get_seg_base(ss_e1, ss_e2);
3040 if (shift) {
3041 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3042 PUSHL(ssp, sp, sp_mask, ESP);
3043 for(i = param_count - 1; i >= 0; i--) {
3044 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3045 PUSHL(ssp, sp, sp_mask, val);
3046 }
3047 } else {
3048 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3049 PUSHW(ssp, sp, sp_mask, ESP);
3050 for(i = param_count - 1; i >= 0; i--) {
3051 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3052 PUSHW(ssp, sp, sp_mask, val);
3053 }
3054 }
3055 new_stack = 1;
3056 } else {
3057 /* to same privilege */
3058 sp = ESP;
3059 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3060 ssp = env->segs[R_SS].base;
3061 // push_size = (4 << shift);
3062 new_stack = 0;
3063 }
3064
3065 if (shift) {
3066 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3067 PUSHL(ssp, sp, sp_mask, next_eip);
3068 } else {
3069 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3070 PUSHW(ssp, sp, sp_mask, next_eip);
3071 }
3072
3073 /* from this point, not restartable */
3074
3075 if (new_stack) {
3076 ss = (ss & ~3) | dpl;
3077 cpu_x86_load_seg_cache(env, R_SS, ss,
3078 ssp,
3079 get_seg_limit(ss_e1, ss_e2),
3080 ss_e2);
3081 }
3082
3083 selector = (selector & ~3) | dpl;
3084 cpu_x86_load_seg_cache(env, R_CS, selector,
3085 get_seg_base(e1, e2),
3086 get_seg_limit(e1, e2),
3087 e2);
3088 cpu_x86_set_cpl(env, dpl);
3089 SET_ESP(sp, sp_mask);
3090 EIP = offset;
3091 }
3092}
3093
3094/* real and vm86 mode iret */
3095void helper_iret_real(int shift)
3096{
3097 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3098 target_ulong ssp;
3099 int eflags_mask;
3100#ifdef VBOX
3101 bool fVME = false;
3102
3103 remR3TrapClear(env->pVM);
3104#endif /* VBOX */
3105
3106 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3107 sp = ESP;
3108 ssp = env->segs[R_SS].base;
3109 if (shift == 1) {
3110 /* 32 bits */
3111 POPL(ssp, sp, sp_mask, new_eip);
3112 POPL(ssp, sp, sp_mask, new_cs);
3113 new_cs &= 0xffff;
3114 POPL(ssp, sp, sp_mask, new_eflags);
3115 } else {
3116 /* 16 bits */
3117 POPW(ssp, sp, sp_mask, new_eip);
3118 POPW(ssp, sp, sp_mask, new_cs);
3119 POPW(ssp, sp, sp_mask, new_eflags);
3120 }
3121#ifdef VBOX
3122 if ( (env->eflags & VM_MASK)
3123 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3124 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3125 {
3126 fVME = true;
3127 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3128 /* if TF will be set -> #GP */
3129 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3130 || (new_eflags & TF_MASK))
3131 raise_exception(EXCP0D_GPF);
3132 }
3133#endif /* VBOX */
3134 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3135 env->segs[R_CS].selector = new_cs;
3136 env->segs[R_CS].base = (new_cs << 4);
3137 env->eip = new_eip;
3138#ifdef VBOX
3139 if (fVME)
3140 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3141 else
3142#endif
3143 if (env->eflags & VM_MASK)
3144 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3145 else
3146 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3147 if (shift == 0)
3148 eflags_mask &= 0xffff;
3149 load_eflags(new_eflags, eflags_mask);
3150 env->hflags2 &= ~HF2_NMI_MASK;
3151#ifdef VBOX
3152 if (fVME)
3153 {
3154 if (new_eflags & IF_MASK)
3155 env->eflags |= VIF_MASK;
3156 else
3157 env->eflags &= ~VIF_MASK;
3158 }
3159#endif /* VBOX */
3160}
3161
3162static inline void validate_seg(int seg_reg, int cpl)
3163{
3164 int dpl;
3165 uint32_t e2;
3166
3167 /* XXX: on x86_64, we do not want to nullify FS and GS because
3168 they may still contain a valid base. I would be interested to
3169 know how a real x86_64 CPU behaves */
3170 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3171 (env->segs[seg_reg].selector & 0xfffc) == 0)
3172 return;
3173
3174 e2 = env->segs[seg_reg].flags;
3175 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3176 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3177 /* data or non conforming code segment */
3178 if (dpl < cpl) {
3179 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3180 }
3181 }
3182}
3183
3184/* protected mode iret */
3185static inline void helper_ret_protected(int shift, int is_iret, int addend)
3186{
3187 uint32_t new_cs, new_eflags, new_ss;
3188 uint32_t new_es, new_ds, new_fs, new_gs;
3189 uint32_t e1, e2, ss_e1, ss_e2;
3190 int cpl, dpl, rpl, eflags_mask, iopl;
3191 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3192
3193#ifdef VBOX /** @todo Why do we do this? */
3194 ss_e1 = ss_e2 = e1 = e2 = 0;
3195#endif
3196
3197#ifdef TARGET_X86_64
3198 if (shift == 2)
3199 sp_mask = -1;
3200 else
3201#endif
3202 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3203 sp = ESP;
3204 ssp = env->segs[R_SS].base;
3205 new_eflags = 0; /* avoid warning */
3206#ifdef TARGET_X86_64
3207 if (shift == 2) {
3208 POPQ(sp, new_eip);
3209 POPQ(sp, new_cs);
3210 new_cs &= 0xffff;
3211 if (is_iret) {
3212 POPQ(sp, new_eflags);
3213 }
3214 } else
3215#endif
3216 if (shift == 1) {
3217 /* 32 bits */
3218 POPL(ssp, sp, sp_mask, new_eip);
3219 POPL(ssp, sp, sp_mask, new_cs);
3220 new_cs &= 0xffff;
3221 if (is_iret) {
3222 POPL(ssp, sp, sp_mask, new_eflags);
3223#define LOG_GROUP LOG_GROUP_REM
3224#if defined(VBOX) && defined(DEBUG)
3225 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3226 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3227 Log(("iret: new EFLAGS %08X\n", new_eflags));
3228 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3229#endif
3230 if (new_eflags & VM_MASK)
3231 goto return_to_vm86;
3232 }
3233#ifdef VBOX
3234 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3235 {
3236 if ( !EMIsRawRing1Enabled(env->pVM)
3237 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3238 {
3239 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3240 new_cs = new_cs & 0xfffc;
3241 }
3242 else
3243 {
3244 /* Ugly assumption: assume a genuine switch to ring-1. */
3245 Log(("Genuine switch to ring-1 (iret)\n"));
3246 }
3247 }
3248 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3249 {
3250 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3251 new_cs = (new_cs & 0xfffc) | 1;
3252 }
3253#endif
3254 } else {
3255 /* 16 bits */
3256 POPW(ssp, sp, sp_mask, new_eip);
3257 POPW(ssp, sp, sp_mask, new_cs);
3258 if (is_iret)
3259 POPW(ssp, sp, sp_mask, new_eflags);
3260 }
3261 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3262 new_cs, new_eip, shift, addend);
3263 LOG_PCALL_STATE(env);
3264 if ((new_cs & 0xfffc) == 0)
3265 {
3266#if defined(VBOX) && defined(DEBUG)
3267 Log(("new_cs & 0xfffc) == 0\n"));
3268#endif
3269 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3270 }
3271 if (load_segment(&e1, &e2, new_cs) != 0)
3272 {
3273#if defined(VBOX) && defined(DEBUG)
3274 Log(("load_segment failed\n"));
3275#endif
3276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3277 }
3278 if (!(e2 & DESC_S_MASK) ||
3279 !(e2 & DESC_CS_MASK))
3280 {
3281#if defined(VBOX) && defined(DEBUG)
3282 Log(("e2 mask %08x\n", e2));
3283#endif
3284 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3285 }
3286 cpl = env->hflags & HF_CPL_MASK;
3287 rpl = new_cs & 3;
3288 if (rpl < cpl)
3289 {
3290#if defined(VBOX) && defined(DEBUG)
3291 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3292#endif
3293 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3294 }
3295 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3296
3297 if (e2 & DESC_C_MASK) {
3298 if (dpl > rpl)
3299 {
3300#if defined(VBOX) && defined(DEBUG)
3301 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3302#endif
3303 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3304 }
3305 } else {
3306 if (dpl != rpl)
3307 {
3308#if defined(VBOX) && defined(DEBUG)
3309 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3310#endif
3311 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3312 }
3313 }
3314 if (!(e2 & DESC_P_MASK))
3315 {
3316#if defined(VBOX) && defined(DEBUG)
3317 Log(("DESC_P_MASK e2=%08x\n", e2));
3318#endif
3319 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3320 }
3321
3322 sp += addend;
3323 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3324 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3325 /* return to same privilege level */
3326#ifdef VBOX
3327 if (!(e2 & DESC_A_MASK))
3328 e2 = set_segment_accessed(new_cs, e2);
3329#endif
3330 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3331 get_seg_base(e1, e2),
3332 get_seg_limit(e1, e2),
3333 e2);
3334 } else {
3335 /* return to different privilege level */
3336#ifdef TARGET_X86_64
3337 if (shift == 2) {
3338 POPQ(sp, new_esp);
3339 POPQ(sp, new_ss);
3340 new_ss &= 0xffff;
3341 } else
3342#endif
3343 if (shift == 1) {
3344 /* 32 bits */
3345 POPL(ssp, sp, sp_mask, new_esp);
3346 POPL(ssp, sp, sp_mask, new_ss);
3347 new_ss &= 0xffff;
3348 } else {
3349 /* 16 bits */
3350 POPW(ssp, sp, sp_mask, new_esp);
3351 POPW(ssp, sp, sp_mask, new_ss);
3352 }
3353 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3354 new_ss, new_esp);
3355 if ((new_ss & 0xfffc) == 0) {
3356#ifdef TARGET_X86_64
3357 /* NULL ss is allowed in long mode if cpl != 3*/
3358# ifndef VBOX
3359 /* XXX: test CS64 ? */
3360 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3361 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3362 0, 0xffffffff,
3363 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3364 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3365 DESC_W_MASK | DESC_A_MASK);
3366 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3367 } else
3368# else /* VBOX */
3369 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3370 if (!(e2 & DESC_A_MASK))
3371 e2 = set_segment_accessed(new_cs, e2);
3372 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3373 0, 0xffffffff,
3374 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3375 ss_e2 = DESC_B_MASK; /* not really used */
3376 } else
3377# endif
3378#endif
3379 {
3380#if defined(VBOX) && defined(DEBUG)
3381 Log(("NULL ss, rpl=%d\n", rpl));
3382#endif
3383 raise_exception_err(EXCP0D_GPF, 0);
3384 }
3385 } else {
3386 if ((new_ss & 3) != rpl)
3387 {
3388#if defined(VBOX) && defined(DEBUG)
3389 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3390#endif
3391 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3392 }
3393 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3394 {
3395#if defined(VBOX) && defined(DEBUG)
3396 Log(("new_ss=%x load error\n", new_ss));
3397#endif
3398 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3399 }
3400 if (!(ss_e2 & DESC_S_MASK) ||
3401 (ss_e2 & DESC_CS_MASK) ||
3402 !(ss_e2 & DESC_W_MASK))
3403 {
3404#if defined(VBOX) && defined(DEBUG)
3405 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3406#endif
3407 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3408 }
3409 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3410 if (dpl != rpl)
3411 {
3412#if defined(VBOX) && defined(DEBUG)
3413 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3414#endif
3415 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3416 }
3417 if (!(ss_e2 & DESC_P_MASK))
3418 {
3419#if defined(VBOX) && defined(DEBUG)
3420 Log(("new_ss=%#x #NP\n", new_ss));
3421#endif
3422 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3423 }
3424#ifdef VBOX
3425 if (!(e2 & DESC_A_MASK))
3426 e2 = set_segment_accessed(new_cs, e2);
3427 if (!(ss_e2 & DESC_A_MASK))
3428 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3429#endif
3430 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3431 get_seg_base(ss_e1, ss_e2),
3432 get_seg_limit(ss_e1, ss_e2),
3433 ss_e2);
3434 }
3435
3436 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3437 get_seg_base(e1, e2),
3438 get_seg_limit(e1, e2),
3439 e2);
3440 cpu_x86_set_cpl(env, rpl);
3441 sp = new_esp;
3442#ifdef TARGET_X86_64
3443 if (env->hflags & HF_CS64_MASK)
3444 sp_mask = -1;
3445 else
3446#endif
3447 sp_mask = get_sp_mask(ss_e2);
3448
3449 /* validate data segments */
3450 validate_seg(R_ES, rpl);
3451 validate_seg(R_DS, rpl);
3452 validate_seg(R_FS, rpl);
3453 validate_seg(R_GS, rpl);
3454
3455 sp += addend;
3456 }
3457 SET_ESP(sp, sp_mask);
3458 env->eip = new_eip;
3459 if (is_iret) {
3460 /* NOTE: 'cpl' is the _old_ CPL */
3461 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3462 if (cpl == 0)
3463#ifdef VBOX
3464 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3465#else
3466 eflags_mask |= IOPL_MASK;
3467#endif
3468 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3469 if (cpl <= iopl)
3470 eflags_mask |= IF_MASK;
3471 if (shift == 0)
3472 eflags_mask &= 0xffff;
3473 load_eflags(new_eflags, eflags_mask);
3474 }
3475 return;
3476
3477 return_to_vm86:
3478 POPL(ssp, sp, sp_mask, new_esp);
3479 POPL(ssp, sp, sp_mask, new_ss);
3480 POPL(ssp, sp, sp_mask, new_es);
3481 POPL(ssp, sp, sp_mask, new_ds);
3482 POPL(ssp, sp, sp_mask, new_fs);
3483 POPL(ssp, sp, sp_mask, new_gs);
3484
3485 /* modify processor state */
3486 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3487 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3488 load_seg_vm(R_CS, new_cs & 0xffff);
3489 cpu_x86_set_cpl(env, 3);
3490 load_seg_vm(R_SS, new_ss & 0xffff);
3491 load_seg_vm(R_ES, new_es & 0xffff);
3492 load_seg_vm(R_DS, new_ds & 0xffff);
3493 load_seg_vm(R_FS, new_fs & 0xffff);
3494 load_seg_vm(R_GS, new_gs & 0xffff);
3495
3496 env->eip = new_eip & 0xffff;
3497 ESP = new_esp;
3498}
3499
3500void helper_iret_protected(int shift, int next_eip)
3501{
3502 int tss_selector, type;
3503 uint32_t e1, e2;
3504
3505#ifdef VBOX
3506 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3507 e1 = e2 = 0; /** @todo Why do we do this? */
3508 remR3TrapClear(env->pVM);
3509#endif
3510
3511 /* specific case for TSS */
3512 if (env->eflags & NT_MASK) {
3513#ifdef TARGET_X86_64
3514 if (env->hflags & HF_LMA_MASK)
3515 {
3516#if defined(VBOX) && defined(DEBUG)
3517 Log(("eflags.NT=1 on iret in long mode\n"));
3518#endif
3519 raise_exception_err(EXCP0D_GPF, 0);
3520 }
3521#endif
3522 tss_selector = lduw_kernel(env->tr.base + 0);
3523 if (tss_selector & 4)
3524 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3525 if (load_segment(&e1, &e2, tss_selector) != 0)
3526 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3527 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3528 /* NOTE: we check both segment and busy TSS */
3529 if (type != 3)
3530 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3531 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3532 } else {
3533 helper_ret_protected(shift, 1, 0);
3534 }
3535 env->hflags2 &= ~HF2_NMI_MASK;
3536}
3537
3538void helper_lret_protected(int shift, int addend)
3539{
3540 helper_ret_protected(shift, 0, addend);
3541}
3542
3543void helper_sysenter(void)
3544{
3545 if (env->sysenter_cs == 0) {
3546 raise_exception_err(EXCP0D_GPF, 0);
3547 }
3548 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3549 cpu_x86_set_cpl(env, 0);
3550
3551#ifdef TARGET_X86_64
3552 if (env->hflags & HF_LMA_MASK) {
3553 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3554 0, 0xffffffff,
3555 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3556 DESC_S_MASK |
3557 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3558 } else
3559#endif
3560 {
3561 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3562 0, 0xffffffff,
3563 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3564 DESC_S_MASK |
3565 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3566 }
3567 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3568 0, 0xffffffff,
3569 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3570 DESC_S_MASK |
3571 DESC_W_MASK | DESC_A_MASK);
3572 ESP = env->sysenter_esp;
3573 EIP = env->sysenter_eip;
3574}
3575
3576void helper_sysexit(int dflag)
3577{
3578 int cpl;
3579
3580 cpl = env->hflags & HF_CPL_MASK;
3581 if (env->sysenter_cs == 0 || cpl != 0) {
3582 raise_exception_err(EXCP0D_GPF, 0);
3583 }
3584 cpu_x86_set_cpl(env, 3);
3585#ifdef TARGET_X86_64
3586 if (dflag == 2) {
3587 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3588 0, 0xffffffff,
3589 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3590 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3591 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3592 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3593 0, 0xffffffff,
3594 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3595 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3596 DESC_W_MASK | DESC_A_MASK);
3597 } else
3598#endif
3599 {
3600 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3601 0, 0xffffffff,
3602 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3603 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3604 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3605 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3606 0, 0xffffffff,
3607 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3608 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3609 DESC_W_MASK | DESC_A_MASK);
3610 }
3611 ESP = ECX;
3612 EIP = EDX;
3613}
3614
3615#if defined(CONFIG_USER_ONLY)
3616target_ulong helper_read_crN(int reg)
3617{
3618 return 0;
3619}
3620
3621void helper_write_crN(int reg, target_ulong t0)
3622{
3623}
3624
3625void helper_movl_drN_T0(int reg, target_ulong t0)
3626{
3627}
3628#else
3629target_ulong helper_read_crN(int reg)
3630{
3631 target_ulong val;
3632
3633 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3634 switch(reg) {
3635 default:
3636 val = env->cr[reg];
3637 break;
3638 case 8:
3639 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3640#ifndef VBOX
3641 val = cpu_get_apic_tpr(env->apic_state);
3642#else /* VBOX */
3643 val = cpu_get_apic_tpr(env);
3644#endif /* VBOX */
3645 } else {
3646 val = env->v_tpr;
3647 }
3648 break;
3649 }
3650 return val;
3651}
3652
3653void helper_write_crN(int reg, target_ulong t0)
3654{
3655 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3656 switch(reg) {
3657 case 0:
3658 cpu_x86_update_cr0(env, t0);
3659 break;
3660 case 3:
3661 cpu_x86_update_cr3(env, t0);
3662 break;
3663 case 4:
3664 cpu_x86_update_cr4(env, t0);
3665 break;
3666 case 8:
3667 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3668#ifndef VBOX
3669 cpu_set_apic_tpr(env->apic_state, t0);
3670#else /* VBOX */
3671 cpu_set_apic_tpr(env, t0);
3672#endif /* VBOX */
3673 }
3674 env->v_tpr = t0 & 0x0f;
3675 break;
3676 default:
3677 env->cr[reg] = t0;
3678 break;
3679 }
3680}
3681
3682void helper_movl_drN_T0(int reg, target_ulong t0)
3683{
3684 int i;
3685
3686 if (reg < 4) {
3687 hw_breakpoint_remove(env, reg);
3688 env->dr[reg] = t0;
3689 hw_breakpoint_insert(env, reg);
3690# ifndef VBOX
3691 } else if (reg == 7) {
3692# else
3693 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3694 if (t0 & X86_DR7_MBZ_MASK)
3695 raise_exception_err(EXCP0D_GPF, 0);
3696 t0 |= X86_DR7_RA1_MASK;
3697 t0 &= ~X86_DR7_RAZ_MASK;
3698# endif
3699 for (i = 0; i < 4; i++)
3700 hw_breakpoint_remove(env, i);
3701 env->dr[7] = t0;
3702 for (i = 0; i < 4; i++)
3703 hw_breakpoint_insert(env, i);
3704 } else {
3705# ifndef VBOX
3706 env->dr[reg] = t0;
3707# else
3708 if (t0 & X86_DR6_MBZ_MASK)
3709 raise_exception_err(EXCP0D_GPF, 0);
3710 t0 |= X86_DR6_RA1_MASK;
3711 t0 &= ~X86_DR6_RAZ_MASK;
3712 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3713# endif
3714 }
3715}
3716#endif
3717
3718void helper_lmsw(target_ulong t0)
3719{
3720 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3721 if already set to one. */
3722 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3723 helper_write_crN(0, t0);
3724}
3725
3726void helper_clts(void)
3727{
3728 env->cr[0] &= ~CR0_TS_MASK;
3729 env->hflags &= ~HF_TS_MASK;
3730}
3731
3732void helper_invlpg(target_ulong addr)
3733{
3734 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3735 tlb_flush_page(env, addr);
3736}
3737
3738void helper_rdtsc(void)
3739{
3740 uint64_t val;
3741
3742 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3743 raise_exception(EXCP0D_GPF);
3744 }
3745 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3746
3747 val = cpu_get_tsc(env) + env->tsc_offset;
3748 EAX = (uint32_t)(val);
3749 EDX = (uint32_t)(val >> 32);
3750}
3751
3752void helper_rdtscp(void)
3753{
3754 helper_rdtsc();
3755#ifndef VBOX
3756 ECX = (uint32_t)(env->tsc_aux);
3757#else /* VBOX */
3758 uint64_t val;
3759 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3760 ECX = (uint32_t)(val);
3761 else
3762 ECX = 0;
3763#endif /* VBOX */
3764}
3765
3766void helper_rdpmc(void)
3767{
3768#ifdef VBOX
3769 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3770 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3771 raise_exception(EXCP0D_GPF);
3772 }
3773 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3774 EAX = 0;
3775 EDX = 0;
3776#else /* !VBOX */
3777 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3778 raise_exception(EXCP0D_GPF);
3779 }
3780 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3781
3782 /* currently unimplemented */
3783 raise_exception_err(EXCP06_ILLOP, 0);
3784#endif /* !VBOX */
3785}
3786
3787#if defined(CONFIG_USER_ONLY)
3788void helper_wrmsr(void)
3789{
3790}
3791
3792void helper_rdmsr(void)
3793{
3794}
3795#else
3796void helper_wrmsr(void)
3797{
3798 uint64_t val;
3799
3800 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3801
3802 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3803
3804 switch((uint32_t)ECX) {
3805 case MSR_IA32_SYSENTER_CS:
3806 env->sysenter_cs = val & 0xffff;
3807 break;
3808 case MSR_IA32_SYSENTER_ESP:
3809 env->sysenter_esp = val;
3810 break;
3811 case MSR_IA32_SYSENTER_EIP:
3812 env->sysenter_eip = val;
3813 break;
3814 case MSR_IA32_APICBASE:
3815# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3816 cpu_set_apic_base(env->apic_state, val);
3817# endif
3818 break;
3819 case MSR_EFER:
3820 {
3821 uint64_t update_mask;
3822 update_mask = 0;
3823 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3824 update_mask |= MSR_EFER_SCE;
3825 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3826 update_mask |= MSR_EFER_LME;
3827 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3828 update_mask |= MSR_EFER_FFXSR;
3829 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3830 update_mask |= MSR_EFER_NXE;
3831 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3832 update_mask |= MSR_EFER_SVME;
3833 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3834 update_mask |= MSR_EFER_FFXSR;
3835 cpu_load_efer(env, (env->efer & ~update_mask) |
3836 (val & update_mask));
3837 }
3838 break;
3839 case MSR_STAR:
3840 env->star = val;
3841 break;
3842 case MSR_PAT:
3843 env->pat = val;
3844 break;
3845 case MSR_VM_HSAVE_PA:
3846 env->vm_hsave = val;
3847 break;
3848#ifdef TARGET_X86_64
3849 case MSR_LSTAR:
3850 env->lstar = val;
3851 break;
3852 case MSR_CSTAR:
3853 env->cstar = val;
3854 break;
3855 case MSR_FMASK:
3856 env->fmask = val;
3857 break;
3858 case MSR_FSBASE:
3859 env->segs[R_FS].base = val;
3860 break;
3861 case MSR_GSBASE:
3862 env->segs[R_GS].base = val;
3863 break;
3864 case MSR_KERNELGSBASE:
3865 env->kernelgsbase = val;
3866 break;
3867#endif
3868# ifndef VBOX
3869 case MSR_MTRRphysBase(0):
3870 case MSR_MTRRphysBase(1):
3871 case MSR_MTRRphysBase(2):
3872 case MSR_MTRRphysBase(3):
3873 case MSR_MTRRphysBase(4):
3874 case MSR_MTRRphysBase(5):
3875 case MSR_MTRRphysBase(6):
3876 case MSR_MTRRphysBase(7):
3877 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3878 break;
3879 case MSR_MTRRphysMask(0):
3880 case MSR_MTRRphysMask(1):
3881 case MSR_MTRRphysMask(2):
3882 case MSR_MTRRphysMask(3):
3883 case MSR_MTRRphysMask(4):
3884 case MSR_MTRRphysMask(5):
3885 case MSR_MTRRphysMask(6):
3886 case MSR_MTRRphysMask(7):
3887 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3888 break;
3889 case MSR_MTRRfix64K_00000:
3890 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3891 break;
3892 case MSR_MTRRfix16K_80000:
3893 case MSR_MTRRfix16K_A0000:
3894 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3895 break;
3896 case MSR_MTRRfix4K_C0000:
3897 case MSR_MTRRfix4K_C8000:
3898 case MSR_MTRRfix4K_D0000:
3899 case MSR_MTRRfix4K_D8000:
3900 case MSR_MTRRfix4K_E0000:
3901 case MSR_MTRRfix4K_E8000:
3902 case MSR_MTRRfix4K_F0000:
3903 case MSR_MTRRfix4K_F8000:
3904 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3905 break;
3906 case MSR_MTRRdefType:
3907 env->mtrr_deftype = val;
3908 break;
3909 case MSR_MCG_STATUS:
3910 env->mcg_status = val;
3911 break;
3912 case MSR_MCG_CTL:
3913 if ((env->mcg_cap & MCG_CTL_P)
3914 && (val == 0 || val == ~(uint64_t)0))
3915 env->mcg_ctl = val;
3916 break;
3917 case MSR_TSC_AUX:
3918 env->tsc_aux = val;
3919 break;
3920# endif /* !VBOX */
3921 default:
3922# ifndef VBOX
3923 if ((uint32_t)ECX >= MSR_MC0_CTL
3924 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3925 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3926 if ((offset & 0x3) != 0
3927 || (val == 0 || val == ~(uint64_t)0))
3928 env->mce_banks[offset] = val;
3929 break;
3930 }
3931 /* XXX: exception ? */
3932# endif
3933 break;
3934 }
3935
3936# ifdef VBOX
3937 /* call CPUM. */
3938 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3939 {
3940 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3941 }
3942# endif
3943}
3944
3945void helper_rdmsr(void)
3946{
3947 uint64_t val;
3948
3949 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3950
3951 switch((uint32_t)ECX) {
3952 case MSR_IA32_SYSENTER_CS:
3953 val = env->sysenter_cs;
3954 break;
3955 case MSR_IA32_SYSENTER_ESP:
3956 val = env->sysenter_esp;
3957 break;
3958 case MSR_IA32_SYSENTER_EIP:
3959 val = env->sysenter_eip;
3960 break;
3961 case MSR_IA32_APICBASE:
3962#ifndef VBOX
3963 val = cpu_get_apic_base(env->apic_state);
3964#else /* VBOX */
3965 val = cpu_get_apic_base(env);
3966#endif /* VBOX */
3967 break;
3968 case MSR_EFER:
3969 val = env->efer;
3970 break;
3971 case MSR_STAR:
3972 val = env->star;
3973 break;
3974 case MSR_PAT:
3975 val = env->pat;
3976 break;
3977 case MSR_VM_HSAVE_PA:
3978 val = env->vm_hsave;
3979 break;
3980# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3981 case MSR_IA32_PERF_STATUS:
3982 /* tsc_increment_by_tick */
3983 val = 1000ULL;
3984 /* CPU multiplier */
3985 val |= (((uint64_t)4ULL) << 40);
3986 break;
3987# endif /* !VBOX */
3988#ifdef TARGET_X86_64
3989 case MSR_LSTAR:
3990 val = env->lstar;
3991 break;
3992 case MSR_CSTAR:
3993 val = env->cstar;
3994 break;
3995 case MSR_FMASK:
3996 val = env->fmask;
3997 break;
3998 case MSR_FSBASE:
3999 val = env->segs[R_FS].base;
4000 break;
4001 case MSR_GSBASE:
4002 val = env->segs[R_GS].base;
4003 break;
4004 case MSR_KERNELGSBASE:
4005 val = env->kernelgsbase;
4006 break;
4007# ifndef VBOX
4008 case MSR_TSC_AUX:
4009 val = env->tsc_aux;
4010 break;
4011# endif /*!VBOX*/
4012#endif
4013# ifndef VBOX
4014 case MSR_MTRRphysBase(0):
4015 case MSR_MTRRphysBase(1):
4016 case MSR_MTRRphysBase(2):
4017 case MSR_MTRRphysBase(3):
4018 case MSR_MTRRphysBase(4):
4019 case MSR_MTRRphysBase(5):
4020 case MSR_MTRRphysBase(6):
4021 case MSR_MTRRphysBase(7):
4022 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4023 break;
4024 case MSR_MTRRphysMask(0):
4025 case MSR_MTRRphysMask(1):
4026 case MSR_MTRRphysMask(2):
4027 case MSR_MTRRphysMask(3):
4028 case MSR_MTRRphysMask(4):
4029 case MSR_MTRRphysMask(5):
4030 case MSR_MTRRphysMask(6):
4031 case MSR_MTRRphysMask(7):
4032 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4033 break;
4034 case MSR_MTRRfix64K_00000:
4035 val = env->mtrr_fixed[0];
4036 break;
4037 case MSR_MTRRfix16K_80000:
4038 case MSR_MTRRfix16K_A0000:
4039 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4040 break;
4041 case MSR_MTRRfix4K_C0000:
4042 case MSR_MTRRfix4K_C8000:
4043 case MSR_MTRRfix4K_D0000:
4044 case MSR_MTRRfix4K_D8000:
4045 case MSR_MTRRfix4K_E0000:
4046 case MSR_MTRRfix4K_E8000:
4047 case MSR_MTRRfix4K_F0000:
4048 case MSR_MTRRfix4K_F8000:
4049 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4050 break;
4051 case MSR_MTRRdefType:
4052 val = env->mtrr_deftype;
4053 break;
4054 case MSR_MTRRcap:
4055 if (env->cpuid_features & CPUID_MTRR)
4056 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4057 else
4058 /* XXX: exception ? */
4059 val = 0;
4060 break;
4061 case MSR_MCG_CAP:
4062 val = env->mcg_cap;
4063 break;
4064 case MSR_MCG_CTL:
4065 if (env->mcg_cap & MCG_CTL_P)
4066 val = env->mcg_ctl;
4067 else
4068 val = 0;
4069 break;
4070 case MSR_MCG_STATUS:
4071 val = env->mcg_status;
4072 break;
4073# endif /* !VBOX */
4074 default:
4075# ifndef VBOX
4076 if ((uint32_t)ECX >= MSR_MC0_CTL
4077 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4078 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4079 val = env->mce_banks[offset];
4080 break;
4081 }
4082 /* XXX: exception ? */
4083 val = 0;
4084# else /* VBOX */
4085 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4086 {
4087 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4088 val = 0;
4089 }
4090# endif /* VBOX */
4091 break;
4092 }
4093 EAX = (uint32_t)(val);
4094 EDX = (uint32_t)(val >> 32);
4095
4096# ifdef VBOX_STRICT
4097 if ((uint32_t)ECX != MSR_IA32_TSC) {
4098 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4099 val = 0;
4100 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4101 }
4102# endif
4103}
4104#endif
4105
4106target_ulong helper_lsl(target_ulong selector1)
4107{
4108 unsigned int limit;
4109 uint32_t e1, e2, eflags, selector;
4110 int rpl, dpl, cpl, type;
4111
4112 selector = selector1 & 0xffff;
4113 eflags = helper_cc_compute_all(CC_OP);
4114 if ((selector & 0xfffc) == 0)
4115 goto fail;
4116 if (load_segment(&e1, &e2, selector) != 0)
4117 goto fail;
4118 rpl = selector & 3;
4119 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4120 cpl = env->hflags & HF_CPL_MASK;
4121 if (e2 & DESC_S_MASK) {
4122 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4123 /* conforming */
4124 } else {
4125 if (dpl < cpl || dpl < rpl)
4126 goto fail;
4127 }
4128 } else {
4129 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4130 switch(type) {
4131 case 1:
4132 case 2:
4133 case 3:
4134 case 9:
4135 case 11:
4136 break;
4137 default:
4138 goto fail;
4139 }
4140 if (dpl < cpl || dpl < rpl) {
4141 fail:
4142 CC_SRC = eflags & ~CC_Z;
4143 return 0;
4144 }
4145 }
4146 limit = get_seg_limit(e1, e2);
4147 CC_SRC = eflags | CC_Z;
4148 return limit;
4149}
4150
4151target_ulong helper_lar(target_ulong selector1)
4152{
4153 uint32_t e1, e2, eflags, selector;
4154 int rpl, dpl, cpl, type;
4155
4156 selector = selector1 & 0xffff;
4157 eflags = helper_cc_compute_all(CC_OP);
4158 if ((selector & 0xfffc) == 0)
4159 goto fail;
4160 if (load_segment(&e1, &e2, selector) != 0)
4161 goto fail;
4162 rpl = selector & 3;
4163 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4164 cpl = env->hflags & HF_CPL_MASK;
4165 if (e2 & DESC_S_MASK) {
4166 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4167 /* conforming */
4168 } else {
4169 if (dpl < cpl || dpl < rpl)
4170 goto fail;
4171 }
4172 } else {
4173 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4174 switch(type) {
4175 case 1:
4176 case 2:
4177 case 3:
4178 case 4:
4179 case 5:
4180 case 9:
4181 case 11:
4182 case 12:
4183 break;
4184 default:
4185 goto fail;
4186 }
4187 if (dpl < cpl || dpl < rpl) {
4188 fail:
4189 CC_SRC = eflags & ~CC_Z;
4190 return 0;
4191 }
4192 }
4193 CC_SRC = eflags | CC_Z;
4194#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4195 return e2 & 0x00ffff00;
4196#else
4197 return e2 & 0x00f0ff00;
4198#endif
4199}
4200
4201void helper_verr(target_ulong selector1)
4202{
4203 uint32_t e1, e2, eflags, selector;
4204 int rpl, dpl, cpl;
4205
4206 selector = selector1 & 0xffff;
4207 eflags = helper_cc_compute_all(CC_OP);
4208 if ((selector & 0xfffc) == 0)
4209 goto fail;
4210 if (load_segment(&e1, &e2, selector) != 0)
4211 goto fail;
4212 if (!(e2 & DESC_S_MASK))
4213 goto fail;
4214 rpl = selector & 3;
4215 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4216 cpl = env->hflags & HF_CPL_MASK;
4217 if (e2 & DESC_CS_MASK) {
4218 if (!(e2 & DESC_R_MASK))
4219 goto fail;
4220 if (!(e2 & DESC_C_MASK)) {
4221 if (dpl < cpl || dpl < rpl)
4222 goto fail;
4223 }
4224 } else {
4225 if (dpl < cpl || dpl < rpl) {
4226 fail:
4227 CC_SRC = eflags & ~CC_Z;
4228 return;
4229 }
4230 }
4231 CC_SRC = eflags | CC_Z;
4232}
4233
4234void helper_verw(target_ulong selector1)
4235{
4236 uint32_t e1, e2, eflags, selector;
4237 int rpl, dpl, cpl;
4238
4239 selector = selector1 & 0xffff;
4240 eflags = helper_cc_compute_all(CC_OP);
4241 if ((selector & 0xfffc) == 0)
4242 goto fail;
4243 if (load_segment(&e1, &e2, selector) != 0)
4244 goto fail;
4245 if (!(e2 & DESC_S_MASK))
4246 goto fail;
4247 rpl = selector & 3;
4248 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4249 cpl = env->hflags & HF_CPL_MASK;
4250 if (e2 & DESC_CS_MASK) {
4251 goto fail;
4252 } else {
4253 if (dpl < cpl || dpl < rpl)
4254 goto fail;
4255 if (!(e2 & DESC_W_MASK)) {
4256 fail:
4257 CC_SRC = eflags & ~CC_Z;
4258 return;
4259 }
4260 }
4261 CC_SRC = eflags | CC_Z;
4262}
4263
4264/* x87 FPU helpers */
4265
4266static void fpu_set_exception(int mask)
4267{
4268 env->fpus |= mask;
4269 if (env->fpus & (~env->fpuc & FPUC_EM))
4270 env->fpus |= FPUS_SE | FPUS_B;
4271}
4272
4273static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4274{
4275 if (b == 0.0)
4276 fpu_set_exception(FPUS_ZE);
4277 return a / b;
4278}
4279
4280static void fpu_raise_exception(void)
4281{
4282 if (env->cr[0] & CR0_NE_MASK) {
4283 raise_exception(EXCP10_COPR);
4284 }
4285#if !defined(CONFIG_USER_ONLY)
4286 else {
4287 cpu_set_ferr(env);
4288 }
4289#endif
4290}
4291
4292void helper_flds_FT0(uint32_t val)
4293{
4294 union {
4295 float32 f;
4296 uint32_t i;
4297 } u;
4298 u.i = val;
4299 FT0 = float32_to_floatx(u.f, &env->fp_status);
4300}
4301
4302void helper_fldl_FT0(uint64_t val)
4303{
4304 union {
4305 float64 f;
4306 uint64_t i;
4307 } u;
4308 u.i = val;
4309 FT0 = float64_to_floatx(u.f, &env->fp_status);
4310}
4311
4312void helper_fildl_FT0(int32_t val)
4313{
4314 FT0 = int32_to_floatx(val, &env->fp_status);
4315}
4316
4317void helper_flds_ST0(uint32_t val)
4318{
4319 int new_fpstt;
4320 union {
4321 float32 f;
4322 uint32_t i;
4323 } u;
4324 new_fpstt = (env->fpstt - 1) & 7;
4325 u.i = val;
4326 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4327 env->fpstt = new_fpstt;
4328 env->fptags[new_fpstt] = 0; /* validate stack entry */
4329}
4330
4331void helper_fldl_ST0(uint64_t val)
4332{
4333 int new_fpstt;
4334 union {
4335 float64 f;
4336 uint64_t i;
4337 } u;
4338 new_fpstt = (env->fpstt - 1) & 7;
4339 u.i = val;
4340 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4341 env->fpstt = new_fpstt;
4342 env->fptags[new_fpstt] = 0; /* validate stack entry */
4343}
4344
4345void helper_fildl_ST0(int32_t val)
4346{
4347 int new_fpstt;
4348 new_fpstt = (env->fpstt - 1) & 7;
4349 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4350 env->fpstt = new_fpstt;
4351 env->fptags[new_fpstt] = 0; /* validate stack entry */
4352}
4353
4354void helper_fildll_ST0(int64_t val)
4355{
4356 int new_fpstt;
4357 new_fpstt = (env->fpstt - 1) & 7;
4358 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4359 env->fpstt = new_fpstt;
4360 env->fptags[new_fpstt] = 0; /* validate stack entry */
4361}
4362
4363#ifndef VBOX
4364uint32_t helper_fsts_ST0(void)
4365#else
4366RTCCUINTREG helper_fsts_ST0(void)
4367#endif
4368{
4369 union {
4370 float32 f;
4371 uint32_t i;
4372 } u;
4373 u.f = floatx_to_float32(ST0, &env->fp_status);
4374 return u.i;
4375}
4376
4377uint64_t helper_fstl_ST0(void)
4378{
4379 union {
4380 float64 f;
4381 uint64_t i;
4382 } u;
4383 u.f = floatx_to_float64(ST0, &env->fp_status);
4384 return u.i;
4385}
4386
4387#ifndef VBOX
4388int32_t helper_fist_ST0(void)
4389#else
4390RTCCINTREG helper_fist_ST0(void)
4391#endif
4392{
4393 int32_t val;
4394 val = floatx_to_int32(ST0, &env->fp_status);
4395 if (val != (int16_t)val)
4396 val = -32768;
4397 return val;
4398}
4399
4400#ifndef VBOX
4401int32_t helper_fistl_ST0(void)
4402#else
4403RTCCINTREG helper_fistl_ST0(void)
4404#endif
4405{
4406 int32_t val;
4407 val = floatx_to_int32(ST0, &env->fp_status);
4408 return val;
4409}
4410
4411int64_t helper_fistll_ST0(void)
4412{
4413 int64_t val;
4414 val = floatx_to_int64(ST0, &env->fp_status);
4415 return val;
4416}
4417
4418#ifndef VBOX
4419int32_t helper_fistt_ST0(void)
4420#else
4421RTCCINTREG helper_fistt_ST0(void)
4422#endif
4423{
4424 int32_t val;
4425 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4426 if (val != (int16_t)val)
4427 val = -32768;
4428 return val;
4429}
4430
4431#ifndef VBOX
4432int32_t helper_fisttl_ST0(void)
4433#else
4434RTCCINTREG helper_fisttl_ST0(void)
4435#endif
4436{
4437 int32_t val;
4438 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4439 return val;
4440}
4441
4442int64_t helper_fisttll_ST0(void)
4443{
4444 int64_t val;
4445 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4446 return val;
4447}
4448
4449void helper_fldt_ST0(target_ulong ptr)
4450{
4451 int new_fpstt;
4452 new_fpstt = (env->fpstt - 1) & 7;
4453 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4454 env->fpstt = new_fpstt;
4455 env->fptags[new_fpstt] = 0; /* validate stack entry */
4456}
4457
4458void helper_fstt_ST0(target_ulong ptr)
4459{
4460 helper_fstt(ST0, ptr);
4461}
4462
4463void helper_fpush(void)
4464{
4465 fpush();
4466}
4467
4468void helper_fpop(void)
4469{
4470 fpop();
4471}
4472
4473void helper_fdecstp(void)
4474{
4475 env->fpstt = (env->fpstt - 1) & 7;
4476 env->fpus &= (~0x4700);
4477}
4478
4479void helper_fincstp(void)
4480{
4481 env->fpstt = (env->fpstt + 1) & 7;
4482 env->fpus &= (~0x4700);
4483}
4484
4485/* FPU move */
4486
4487void helper_ffree_STN(int st_index)
4488{
4489 env->fptags[(env->fpstt + st_index) & 7] = 1;
4490}
4491
4492void helper_fmov_ST0_FT0(void)
4493{
4494 ST0 = FT0;
4495}
4496
4497void helper_fmov_FT0_STN(int st_index)
4498{
4499 FT0 = ST(st_index);
4500}
4501
4502void helper_fmov_ST0_STN(int st_index)
4503{
4504 ST0 = ST(st_index);
4505}
4506
4507void helper_fmov_STN_ST0(int st_index)
4508{
4509 ST(st_index) = ST0;
4510}
4511
4512void helper_fxchg_ST0_STN(int st_index)
4513{
4514 CPU86_LDouble tmp;
4515 tmp = ST(st_index);
4516 ST(st_index) = ST0;
4517 ST0 = tmp;
4518}
4519
4520/* FPU operations */
4521
4522static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4523
4524void helper_fcom_ST0_FT0(void)
4525{
4526 int ret;
4527
4528 ret = floatx_compare(ST0, FT0, &env->fp_status);
4529 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4530}
4531
4532void helper_fucom_ST0_FT0(void)
4533{
4534 int ret;
4535
4536 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4537 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4538}
4539
4540static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4541
4542void helper_fcomi_ST0_FT0(void)
4543{
4544 int eflags;
4545 int ret;
4546
4547 ret = floatx_compare(ST0, FT0, &env->fp_status);
4548 eflags = helper_cc_compute_all(CC_OP);
4549 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4550 CC_SRC = eflags;
4551}
4552
4553void helper_fucomi_ST0_FT0(void)
4554{
4555 int eflags;
4556 int ret;
4557
4558 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4559 eflags = helper_cc_compute_all(CC_OP);
4560 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4561 CC_SRC = eflags;
4562}
4563
4564void helper_fadd_ST0_FT0(void)
4565{
4566 ST0 += FT0;
4567}
4568
4569void helper_fmul_ST0_FT0(void)
4570{
4571 ST0 *= FT0;
4572}
4573
4574void helper_fsub_ST0_FT0(void)
4575{
4576 ST0 -= FT0;
4577}
4578
4579void helper_fsubr_ST0_FT0(void)
4580{
4581 ST0 = FT0 - ST0;
4582}
4583
4584void helper_fdiv_ST0_FT0(void)
4585{
4586 ST0 = helper_fdiv(ST0, FT0);
4587}
4588
4589void helper_fdivr_ST0_FT0(void)
4590{
4591 ST0 = helper_fdiv(FT0, ST0);
4592}
4593
4594/* fp operations between STN and ST0 */
4595
4596void helper_fadd_STN_ST0(int st_index)
4597{
4598 ST(st_index) += ST0;
4599}
4600
4601void helper_fmul_STN_ST0(int st_index)
4602{
4603 ST(st_index) *= ST0;
4604}
4605
4606void helper_fsub_STN_ST0(int st_index)
4607{
4608 ST(st_index) -= ST0;
4609}
4610
4611void helper_fsubr_STN_ST0(int st_index)
4612{
4613 CPU86_LDouble *p;
4614 p = &ST(st_index);
4615 *p = ST0 - *p;
4616}
4617
4618void helper_fdiv_STN_ST0(int st_index)
4619{
4620 CPU86_LDouble *p;
4621 p = &ST(st_index);
4622 *p = helper_fdiv(*p, ST0);
4623}
4624
4625void helper_fdivr_STN_ST0(int st_index)
4626{
4627 CPU86_LDouble *p;
4628 p = &ST(st_index);
4629 *p = helper_fdiv(ST0, *p);
4630}
4631
4632/* misc FPU operations */
4633void helper_fchs_ST0(void)
4634{
4635 ST0 = floatx_chs(ST0);
4636}
4637
4638void helper_fabs_ST0(void)
4639{
4640 ST0 = floatx_abs(ST0);
4641}
4642
4643void helper_fld1_ST0(void)
4644{
4645 ST0 = f15rk[1];
4646}
4647
4648void helper_fldl2t_ST0(void)
4649{
4650 ST0 = f15rk[6];
4651}
4652
4653void helper_fldl2e_ST0(void)
4654{
4655 ST0 = f15rk[5];
4656}
4657
4658void helper_fldpi_ST0(void)
4659{
4660 ST0 = f15rk[2];
4661}
4662
4663void helper_fldlg2_ST0(void)
4664{
4665 ST0 = f15rk[3];
4666}
4667
4668void helper_fldln2_ST0(void)
4669{
4670 ST0 = f15rk[4];
4671}
4672
4673void helper_fldz_ST0(void)
4674{
4675 ST0 = f15rk[0];
4676}
4677
4678void helper_fldz_FT0(void)
4679{
4680 FT0 = f15rk[0];
4681}
4682
4683#ifndef VBOX
4684uint32_t helper_fnstsw(void)
4685#else
4686RTCCUINTREG helper_fnstsw(void)
4687#endif
4688{
4689 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4690}
4691
4692#ifndef VBOX
4693uint32_t helper_fnstcw(void)
4694#else
4695RTCCUINTREG helper_fnstcw(void)
4696#endif
4697{
4698 return env->fpuc;
4699}
4700
4701static void update_fp_status(void)
4702{
4703 int rnd_type;
4704
4705 /* set rounding mode */
4706 switch(env->fpuc & RC_MASK) {
4707 default:
4708 case RC_NEAR:
4709 rnd_type = float_round_nearest_even;
4710 break;
4711 case RC_DOWN:
4712 rnd_type = float_round_down;
4713 break;
4714 case RC_UP:
4715 rnd_type = float_round_up;
4716 break;
4717 case RC_CHOP:
4718 rnd_type = float_round_to_zero;
4719 break;
4720 }
4721 set_float_rounding_mode(rnd_type, &env->fp_status);
4722#ifdef FLOATX80
4723 switch((env->fpuc >> 8) & 3) {
4724 case 0:
4725 rnd_type = 32;
4726 break;
4727 case 2:
4728 rnd_type = 64;
4729 break;
4730 case 3:
4731 default:
4732 rnd_type = 80;
4733 break;
4734 }
4735 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4736#endif
4737}
4738
4739void helper_fldcw(uint32_t val)
4740{
4741 env->fpuc = val;
4742 update_fp_status();
4743}
4744
4745void helper_fclex(void)
4746{
4747 env->fpus &= 0x7f00;
4748}
4749
4750void helper_fwait(void)
4751{
4752 if (env->fpus & FPUS_SE)
4753 fpu_raise_exception();
4754}
4755
4756void helper_fninit(void)
4757{
4758 env->fpus = 0;
4759 env->fpstt = 0;
4760 env->fpuc = 0x37f;
4761 env->fptags[0] = 1;
4762 env->fptags[1] = 1;
4763 env->fptags[2] = 1;
4764 env->fptags[3] = 1;
4765 env->fptags[4] = 1;
4766 env->fptags[5] = 1;
4767 env->fptags[6] = 1;
4768 env->fptags[7] = 1;
4769}
4770
4771/* BCD ops */
4772
4773void helper_fbld_ST0(target_ulong ptr)
4774{
4775 CPU86_LDouble tmp;
4776 uint64_t val;
4777 unsigned int v;
4778 int i;
4779
4780 val = 0;
4781 for(i = 8; i >= 0; i--) {
4782 v = ldub(ptr + i);
4783 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4784 }
4785 tmp = val;
4786 if (ldub(ptr + 9) & 0x80)
4787 tmp = -tmp;
4788 fpush();
4789 ST0 = tmp;
4790}
4791
4792void helper_fbst_ST0(target_ulong ptr)
4793{
4794 int v;
4795 target_ulong mem_ref, mem_end;
4796 int64_t val;
4797
4798 val = floatx_to_int64(ST0, &env->fp_status);
4799 mem_ref = ptr;
4800 mem_end = mem_ref + 9;
4801 if (val < 0) {
4802 stb(mem_end, 0x80);
4803 val = -val;
4804 } else {
4805 stb(mem_end, 0x00);
4806 }
4807 while (mem_ref < mem_end) {
4808 if (val == 0)
4809 break;
4810 v = val % 100;
4811 val = val / 100;
4812 v = ((v / 10) << 4) | (v % 10);
4813 stb(mem_ref++, v);
4814 }
4815 while (mem_ref < mem_end) {
4816 stb(mem_ref++, 0);
4817 }
4818}
4819
4820void helper_f2xm1(void)
4821{
4822 ST0 = pow(2.0,ST0) - 1.0;
4823}
4824
4825void helper_fyl2x(void)
4826{
4827 CPU86_LDouble fptemp;
4828
4829 fptemp = ST0;
4830 if (fptemp>0.0){
4831 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4832 ST1 *= fptemp;
4833 fpop();
4834 } else {
4835 env->fpus &= (~0x4700);
4836 env->fpus |= 0x400;
4837 }
4838}
4839
4840void helper_fptan(void)
4841{
4842 CPU86_LDouble fptemp;
4843
4844 fptemp = ST0;
4845 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4846 env->fpus |= 0x400;
4847 } else {
4848 ST0 = tan(fptemp);
4849 fpush();
4850 ST0 = 1.0;
4851 env->fpus &= (~0x400); /* C2 <-- 0 */
4852 /* the above code is for |arg| < 2**52 only */
4853 }
4854}
4855
4856void helper_fpatan(void)
4857{
4858 CPU86_LDouble fptemp, fpsrcop;
4859
4860 fpsrcop = ST1;
4861 fptemp = ST0;
4862 ST1 = atan2(fpsrcop,fptemp);
4863 fpop();
4864}
4865
4866void helper_fxtract(void)
4867{
4868 CPU86_LDoubleU temp;
4869 unsigned int expdif;
4870
4871 temp.d = ST0;
4872 expdif = EXPD(temp) - EXPBIAS;
4873 /*DP exponent bias*/
4874 ST0 = expdif;
4875 fpush();
4876 BIASEXPONENT(temp);
4877 ST0 = temp.d;
4878}
4879
4880void helper_fprem1(void)
4881{
4882 CPU86_LDouble dblq, fpsrcop, fptemp;
4883 CPU86_LDoubleU fpsrcop1, fptemp1;
4884 int expdif;
4885 signed long long int q;
4886
4887#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4888 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4889#else
4890 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4891#endif
4892 ST0 = 0.0 / 0.0; /* NaN */
4893 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4894 return;
4895 }
4896
4897 fpsrcop = ST0;
4898 fptemp = ST1;
4899 fpsrcop1.d = fpsrcop;
4900 fptemp1.d = fptemp;
4901 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4902
4903 if (expdif < 0) {
4904 /* optimisation? taken from the AMD docs */
4905 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4906 /* ST0 is unchanged */
4907 return;
4908 }
4909
4910 if (expdif < 53) {
4911 dblq = fpsrcop / fptemp;
4912 /* round dblq towards nearest integer */
4913 dblq = rint(dblq);
4914 ST0 = fpsrcop - fptemp * dblq;
4915
4916 /* convert dblq to q by truncating towards zero */
4917 if (dblq < 0.0)
4918 q = (signed long long int)(-dblq);
4919 else
4920 q = (signed long long int)dblq;
4921
4922 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4923 /* (C0,C3,C1) <-- (q2,q1,q0) */
4924 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4925 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4926 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4927 } else {
4928 env->fpus |= 0x400; /* C2 <-- 1 */
4929 fptemp = pow(2.0, expdif - 50);
4930 fpsrcop = (ST0 / ST1) / fptemp;
4931 /* fpsrcop = integer obtained by chopping */
4932 fpsrcop = (fpsrcop < 0.0) ?
4933 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4934 ST0 -= (ST1 * fpsrcop * fptemp);
4935 }
4936}
4937
4938void helper_fprem(void)
4939{
4940 CPU86_LDouble dblq, fpsrcop, fptemp;
4941 CPU86_LDoubleU fpsrcop1, fptemp1;
4942 int expdif;
4943 signed long long int q;
4944
4945#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4946 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4947#else
4948 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4949#endif
4950 ST0 = 0.0 / 0.0; /* NaN */
4951 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4952 return;
4953 }
4954
4955 fpsrcop = (CPU86_LDouble)ST0;
4956 fptemp = (CPU86_LDouble)ST1;
4957 fpsrcop1.d = fpsrcop;
4958 fptemp1.d = fptemp;
4959 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4960
4961 if (expdif < 0) {
4962 /* optimisation? taken from the AMD docs */
4963 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4964 /* ST0 is unchanged */
4965 return;
4966 }
4967
4968 if ( expdif < 53 ) {
4969 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4970 /* round dblq towards zero */
4971 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4972 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4973
4974 /* convert dblq to q by truncating towards zero */
4975 if (dblq < 0.0)
4976 q = (signed long long int)(-dblq);
4977 else
4978 q = (signed long long int)dblq;
4979
4980 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4981 /* (C0,C3,C1) <-- (q2,q1,q0) */
4982 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4983 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4984 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4985 } else {
4986 int N = 32 + (expdif % 32); /* as per AMD docs */
4987 env->fpus |= 0x400; /* C2 <-- 1 */
4988 fptemp = pow(2.0, (double)(expdif - N));
4989 fpsrcop = (ST0 / ST1) / fptemp;
4990 /* fpsrcop = integer obtained by chopping */
4991 fpsrcop = (fpsrcop < 0.0) ?
4992 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4993 ST0 -= (ST1 * fpsrcop * fptemp);
4994 }
4995}
4996
4997void helper_fyl2xp1(void)
4998{
4999 CPU86_LDouble fptemp;
5000
5001 fptemp = ST0;
5002 if ((fptemp+1.0)>0.0) {
5003 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
5004 ST1 *= fptemp;
5005 fpop();
5006 } else {
5007 env->fpus &= (~0x4700);
5008 env->fpus |= 0x400;
5009 }
5010}
5011
5012void helper_fsqrt(void)
5013{
5014 CPU86_LDouble fptemp;
5015
5016 fptemp = ST0;
5017 if (fptemp<0.0) {
5018 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5019 env->fpus |= 0x400;
5020 }
5021 ST0 = sqrt(fptemp);
5022}
5023
5024void helper_fsincos(void)
5025{
5026 CPU86_LDouble fptemp;
5027
5028 fptemp = ST0;
5029 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5030 env->fpus |= 0x400;
5031 } else {
5032 ST0 = sin(fptemp);
5033 fpush();
5034 ST0 = cos(fptemp);
5035 env->fpus &= (~0x400); /* C2 <-- 0 */
5036 /* the above code is for |arg| < 2**63 only */
5037 }
5038}
5039
5040void helper_frndint(void)
5041{
5042 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5043}
5044
5045void helper_fscale(void)
5046{
5047 ST0 = ldexp (ST0, (int)(ST1));
5048}
5049
5050void helper_fsin(void)
5051{
5052 CPU86_LDouble fptemp;
5053
5054 fptemp = ST0;
5055 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5056 env->fpus |= 0x400;
5057 } else {
5058 ST0 = sin(fptemp);
5059 env->fpus &= (~0x400); /* C2 <-- 0 */
5060 /* the above code is for |arg| < 2**53 only */
5061 }
5062}
5063
5064void helper_fcos(void)
5065{
5066 CPU86_LDouble fptemp;
5067
5068 fptemp = ST0;
5069 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5070 env->fpus |= 0x400;
5071 } else {
5072 ST0 = cos(fptemp);
5073 env->fpus &= (~0x400); /* C2 <-- 0 */
5074 /* the above code is for |arg5 < 2**63 only */
5075 }
5076}
5077
5078void helper_fxam_ST0(void)
5079{
5080 CPU86_LDoubleU temp;
5081 int expdif;
5082
5083 temp.d = ST0;
5084
5085 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5086 if (SIGND(temp))
5087 env->fpus |= 0x200; /* C1 <-- 1 */
5088
5089 /* XXX: test fptags too */
5090 expdif = EXPD(temp);
5091 if (expdif == MAXEXPD) {
5092#ifdef USE_X86LDOUBLE
5093 if (MANTD(temp) == 0x8000000000000000ULL)
5094#else
5095 if (MANTD(temp) == 0)
5096#endif
5097 env->fpus |= 0x500 /*Infinity*/;
5098 else
5099 env->fpus |= 0x100 /*NaN*/;
5100 } else if (expdif == 0) {
5101 if (MANTD(temp) == 0)
5102 env->fpus |= 0x4000 /*Zero*/;
5103 else
5104 env->fpus |= 0x4400 /*Denormal*/;
5105 } else {
5106 env->fpus |= 0x400;
5107 }
5108}
5109
5110void helper_fstenv(target_ulong ptr, int data32)
5111{
5112 int fpus, fptag, exp, i;
5113 uint64_t mant;
5114 CPU86_LDoubleU tmp;
5115
5116 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5117 fptag = 0;
5118 for (i=7; i>=0; i--) {
5119 fptag <<= 2;
5120 if (env->fptags[i]) {
5121 fptag |= 3;
5122 } else {
5123 tmp.d = env->fpregs[i].d;
5124 exp = EXPD(tmp);
5125 mant = MANTD(tmp);
5126 if (exp == 0 && mant == 0) {
5127 /* zero */
5128 fptag |= 1;
5129 } else if (exp == 0 || exp == MAXEXPD
5130#ifdef USE_X86LDOUBLE
5131 || (mant & (1LL << 63)) == 0
5132#endif
5133 ) {
5134 /* NaNs, infinity, denormal */
5135 fptag |= 2;
5136 }
5137 }
5138 }
5139 if (data32) {
5140 /* 32 bit */
5141 stl(ptr, env->fpuc);
5142 stl(ptr + 4, fpus);
5143 stl(ptr + 8, fptag);
5144 stl(ptr + 12, 0); /* fpip */
5145 stl(ptr + 16, 0); /* fpcs */
5146 stl(ptr + 20, 0); /* fpoo */
5147 stl(ptr + 24, 0); /* fpos */
5148 } else {
5149 /* 16 bit */
5150 stw(ptr, env->fpuc);
5151 stw(ptr + 2, fpus);
5152 stw(ptr + 4, fptag);
5153 stw(ptr + 6, 0);
5154 stw(ptr + 8, 0);
5155 stw(ptr + 10, 0);
5156 stw(ptr + 12, 0);
5157 }
5158}
5159
5160void helper_fldenv(target_ulong ptr, int data32)
5161{
5162 int i, fpus, fptag;
5163
5164 if (data32) {
5165 env->fpuc = lduw(ptr);
5166 fpus = lduw(ptr + 4);
5167 fptag = lduw(ptr + 8);
5168 }
5169 else {
5170 env->fpuc = lduw(ptr);
5171 fpus = lduw(ptr + 2);
5172 fptag = lduw(ptr + 4);
5173 }
5174 env->fpstt = (fpus >> 11) & 7;
5175 env->fpus = fpus & ~0x3800;
5176 for(i = 0;i < 8; i++) {
5177 env->fptags[i] = ((fptag & 3) == 3);
5178 fptag >>= 2;
5179 }
5180}
5181
5182void helper_fsave(target_ulong ptr, int data32)
5183{
5184 CPU86_LDouble tmp;
5185 int i;
5186
5187 helper_fstenv(ptr, data32);
5188
5189 ptr += (14 << data32);
5190 for(i = 0;i < 8; i++) {
5191 tmp = ST(i);
5192 helper_fstt(tmp, ptr);
5193 ptr += 10;
5194 }
5195
5196 /* fninit */
5197 env->fpus = 0;
5198 env->fpstt = 0;
5199 env->fpuc = 0x37f;
5200 env->fptags[0] = 1;
5201 env->fptags[1] = 1;
5202 env->fptags[2] = 1;
5203 env->fptags[3] = 1;
5204 env->fptags[4] = 1;
5205 env->fptags[5] = 1;
5206 env->fptags[6] = 1;
5207 env->fptags[7] = 1;
5208}
5209
5210void helper_frstor(target_ulong ptr, int data32)
5211{
5212 CPU86_LDouble tmp;
5213 int i;
5214
5215 helper_fldenv(ptr, data32);
5216 ptr += (14 << data32);
5217
5218 for(i = 0;i < 8; i++) {
5219 tmp = helper_fldt(ptr);
5220 ST(i) = tmp;
5221 ptr += 10;
5222 }
5223}
5224
5225void helper_fxsave(target_ulong ptr, int data64)
5226{
5227 int fpus, fptag, i, nb_xmm_regs;
5228 CPU86_LDouble tmp;
5229 target_ulong addr;
5230
5231 /* The operand must be 16 byte aligned */
5232 if (ptr & 0xf) {
5233 raise_exception(EXCP0D_GPF);
5234 }
5235
5236 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5237 fptag = 0;
5238 for(i = 0; i < 8; i++) {
5239 fptag |= (env->fptags[i] << i);
5240 }
5241 stw(ptr, env->fpuc);
5242 stw(ptr + 2, fpus);
5243 stw(ptr + 4, fptag ^ 0xff);
5244#ifdef TARGET_X86_64
5245 if (data64) {
5246 stq(ptr + 0x08, 0); /* rip */
5247 stq(ptr + 0x10, 0); /* rdp */
5248 } else
5249#endif
5250 {
5251 stl(ptr + 0x08, 0); /* eip */
5252 stl(ptr + 0x0c, 0); /* sel */
5253 stl(ptr + 0x10, 0); /* dp */
5254 stl(ptr + 0x14, 0); /* sel */
5255 }
5256
5257 addr = ptr + 0x20;
5258 for(i = 0;i < 8; i++) {
5259 tmp = ST(i);
5260 helper_fstt(tmp, addr);
5261 addr += 16;
5262 }
5263
5264 if (env->cr[4] & CR4_OSFXSR_MASK) {
5265 /* XXX: finish it */
5266 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5267 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5268 if (env->hflags & HF_CS64_MASK)
5269 nb_xmm_regs = 16;
5270 else
5271 nb_xmm_regs = 8;
5272 addr = ptr + 0xa0;
5273 /* Fast FXSAVE leaves out the XMM registers */
5274 if (!(env->efer & MSR_EFER_FFXSR)
5275 || (env->hflags & HF_CPL_MASK)
5276 || !(env->hflags & HF_LMA_MASK)) {
5277 for(i = 0; i < nb_xmm_regs; i++) {
5278 stq(addr, env->xmm_regs[i].XMM_Q(0));
5279 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5280 addr += 16;
5281 }
5282 }
5283 }
5284}
5285
5286void helper_fxrstor(target_ulong ptr, int data64)
5287{
5288 int i, fpus, fptag, nb_xmm_regs;
5289 CPU86_LDouble tmp;
5290 target_ulong addr;
5291
5292 /* The operand must be 16 byte aligned */
5293 if (ptr & 0xf) {
5294 raise_exception(EXCP0D_GPF);
5295 }
5296
5297 env->fpuc = lduw(ptr);
5298 fpus = lduw(ptr + 2);
5299 fptag = lduw(ptr + 4);
5300 env->fpstt = (fpus >> 11) & 7;
5301 env->fpus = fpus & ~0x3800;
5302 fptag ^= 0xff;
5303 for(i = 0;i < 8; i++) {
5304 env->fptags[i] = ((fptag >> i) & 1);
5305 }
5306
5307 addr = ptr + 0x20;
5308 for(i = 0;i < 8; i++) {
5309 tmp = helper_fldt(addr);
5310 ST(i) = tmp;
5311 addr += 16;
5312 }
5313
5314 if (env->cr[4] & CR4_OSFXSR_MASK) {
5315 /* XXX: finish it */
5316 env->mxcsr = ldl(ptr + 0x18);
5317 //ldl(ptr + 0x1c);
5318 if (env->hflags & HF_CS64_MASK)
5319 nb_xmm_regs = 16;
5320 else
5321 nb_xmm_regs = 8;
5322 addr = ptr + 0xa0;
5323 /* Fast FXRESTORE leaves out the XMM registers */
5324 if (!(env->efer & MSR_EFER_FFXSR)
5325 || (env->hflags & HF_CPL_MASK)
5326 || !(env->hflags & HF_LMA_MASK)) {
5327 for(i = 0; i < nb_xmm_regs; i++) {
5328#if !defined(VBOX) || __GNUC__ < 4
5329 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5330 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5331#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5332# if 1
5333 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5334 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5335 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5336 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5337# else
5338 /* this works fine on Mac OS X, gcc 4.0.1 */
5339 uint64_t u64 = ldq(addr);
5340 env->xmm_regs[i].XMM_Q(0);
5341 u64 = ldq(addr + 4);
5342 env->xmm_regs[i].XMM_Q(1) = u64;
5343# endif
5344#endif
5345 addr += 16;
5346 }
5347 }
5348 }
5349}
5350
5351#ifndef USE_X86LDOUBLE
5352
5353void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5354{
5355 CPU86_LDoubleU temp;
5356 int e;
5357
5358 temp.d = f;
5359 /* mantissa */
5360 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5361 /* exponent + sign */
5362 e = EXPD(temp) - EXPBIAS + 16383;
5363 e |= SIGND(temp) >> 16;
5364 *pexp = e;
5365}
5366
5367CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5368{
5369 CPU86_LDoubleU temp;
5370 int e;
5371 uint64_t ll;
5372
5373 /* XXX: handle overflow ? */
5374 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5375 e |= (upper >> 4) & 0x800; /* sign */
5376 ll = (mant >> 11) & ((1LL << 52) - 1);
5377#ifdef __arm__
5378 temp.l.upper = (e << 20) | (ll >> 32);
5379 temp.l.lower = ll;
5380#else
5381 temp.ll = ll | ((uint64_t)e << 52);
5382#endif
5383 return temp.d;
5384}
5385
5386#else
5387
5388void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5389{
5390 CPU86_LDoubleU temp;
5391
5392 temp.d = f;
5393 *pmant = temp.l.lower;
5394 *pexp = temp.l.upper;
5395}
5396
5397CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5398{
5399 CPU86_LDoubleU temp;
5400
5401 temp.l.upper = upper;
5402 temp.l.lower = mant;
5403 return temp.d;
5404}
5405#endif
5406
5407#ifdef TARGET_X86_64
5408
5409//#define DEBUG_MULDIV
5410
5411static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5412{
5413 *plow += a;
5414 /* carry test */
5415 if (*plow < a)
5416 (*phigh)++;
5417 *phigh += b;
5418}
5419
5420static void neg128(uint64_t *plow, uint64_t *phigh)
5421{
5422 *plow = ~ *plow;
5423 *phigh = ~ *phigh;
5424 add128(plow, phigh, 1, 0);
5425}
5426
5427/* return TRUE if overflow */
5428static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5429{
5430 uint64_t q, r, a1, a0;
5431 int i, qb, ab;
5432
5433 a0 = *plow;
5434 a1 = *phigh;
5435 if (a1 == 0) {
5436 q = a0 / b;
5437 r = a0 % b;
5438 *plow = q;
5439 *phigh = r;
5440 } else {
5441 if (a1 >= b)
5442 return 1;
5443 /* XXX: use a better algorithm */
5444 for(i = 0; i < 64; i++) {
5445 ab = a1 >> 63;
5446 a1 = (a1 << 1) | (a0 >> 63);
5447 if (ab || a1 >= b) {
5448 a1 -= b;
5449 qb = 1;
5450 } else {
5451 qb = 0;
5452 }
5453 a0 = (a0 << 1) | qb;
5454 }
5455#if defined(DEBUG_MULDIV)
5456 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5457 *phigh, *plow, b, a0, a1);
5458#endif
5459 *plow = a0;
5460 *phigh = a1;
5461 }
5462 return 0;
5463}
5464
5465/* return TRUE if overflow */
5466static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5467{
5468 int sa, sb;
5469 sa = ((int64_t)*phigh < 0);
5470 if (sa)
5471 neg128(plow, phigh);
5472 sb = (b < 0);
5473 if (sb)
5474 b = -b;
5475 if (div64(plow, phigh, b) != 0)
5476 return 1;
5477 if (sa ^ sb) {
5478 if (*plow > (1ULL << 63))
5479 return 1;
5480 *plow = - *plow;
5481 } else {
5482 if (*plow >= (1ULL << 63))
5483 return 1;
5484 }
5485 if (sa)
5486 *phigh = - *phigh;
5487 return 0;
5488}
5489
5490void helper_mulq_EAX_T0(target_ulong t0)
5491{
5492 uint64_t r0, r1;
5493
5494 mulu64(&r0, &r1, EAX, t0);
5495 EAX = r0;
5496 EDX = r1;
5497 CC_DST = r0;
5498 CC_SRC = r1;
5499}
5500
5501void helper_imulq_EAX_T0(target_ulong t0)
5502{
5503 uint64_t r0, r1;
5504
5505 muls64(&r0, &r1, EAX, t0);
5506 EAX = r0;
5507 EDX = r1;
5508 CC_DST = r0;
5509 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5510}
5511
5512target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5513{
5514 uint64_t r0, r1;
5515
5516 muls64(&r0, &r1, t0, t1);
5517 CC_DST = r0;
5518 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5519 return r0;
5520}
5521
5522void helper_divq_EAX(target_ulong t0)
5523{
5524 uint64_t r0, r1;
5525 if (t0 == 0) {
5526 raise_exception(EXCP00_DIVZ);
5527 }
5528 r0 = EAX;
5529 r1 = EDX;
5530 if (div64(&r0, &r1, t0))
5531 raise_exception(EXCP00_DIVZ);
5532 EAX = r0;
5533 EDX = r1;
5534}
5535
5536void helper_idivq_EAX(target_ulong t0)
5537{
5538 uint64_t r0, r1;
5539 if (t0 == 0) {
5540 raise_exception(EXCP00_DIVZ);
5541 }
5542 r0 = EAX;
5543 r1 = EDX;
5544 if (idiv64(&r0, &r1, t0))
5545 raise_exception(EXCP00_DIVZ);
5546 EAX = r0;
5547 EDX = r1;
5548}
5549#endif
5550
5551static void do_hlt(void)
5552{
5553 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5554 env->halted = 1;
5555 env->exception_index = EXCP_HLT;
5556 cpu_loop_exit();
5557}
5558
5559void helper_hlt(int next_eip_addend)
5560{
5561 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5562 EIP += next_eip_addend;
5563
5564 do_hlt();
5565}
5566
5567void helper_monitor(target_ulong ptr)
5568{
5569#ifdef VBOX
5570 if ((uint32_t)ECX > 1)
5571 raise_exception(EXCP0D_GPF);
5572#else /* !VBOX */
5573 if ((uint32_t)ECX != 0)
5574 raise_exception(EXCP0D_GPF);
5575#endif /* !VBOX */
5576 /* XXX: store address ? */
5577 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5578}
5579
5580void helper_mwait(int next_eip_addend)
5581{
5582 if ((uint32_t)ECX != 0)
5583 raise_exception(EXCP0D_GPF);
5584#ifdef VBOX
5585 helper_hlt(next_eip_addend);
5586#else /* !VBOX */
5587 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5588 EIP += next_eip_addend;
5589
5590 /* XXX: not complete but not completely erroneous */
5591 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5592 /* more than one CPU: do not sleep because another CPU may
5593 wake this one */
5594 } else {
5595 do_hlt();
5596 }
5597#endif /* !VBOX */
5598}
5599
5600void helper_debug(void)
5601{
5602 env->exception_index = EXCP_DEBUG;
5603 cpu_loop_exit();
5604}
5605
5606void helper_reset_rf(void)
5607{
5608 env->eflags &= ~RF_MASK;
5609}
5610
5611void helper_raise_interrupt(int intno, int next_eip_addend)
5612{
5613 raise_interrupt(intno, 1, 0, next_eip_addend);
5614}
5615
5616void helper_raise_exception(int exception_index)
5617{
5618 raise_exception(exception_index);
5619}
5620
5621void helper_cli(void)
5622{
5623 env->eflags &= ~IF_MASK;
5624}
5625
5626void helper_sti(void)
5627{
5628 env->eflags |= IF_MASK;
5629}
5630
5631#ifdef VBOX
5632void helper_cli_vme(void)
5633{
5634 env->eflags &= ~VIF_MASK;
5635}
5636
5637void helper_sti_vme(void)
5638{
5639 /* First check, then change eflags according to the AMD manual */
5640 if (env->eflags & VIP_MASK) {
5641 raise_exception(EXCP0D_GPF);
5642 }
5643 env->eflags |= VIF_MASK;
5644}
5645#endif /* VBOX */
5646
5647#if 0
5648/* vm86plus instructions */
5649void helper_cli_vm(void)
5650{
5651 env->eflags &= ~VIF_MASK;
5652}
5653
5654void helper_sti_vm(void)
5655{
5656 env->eflags |= VIF_MASK;
5657 if (env->eflags & VIP_MASK) {
5658 raise_exception(EXCP0D_GPF);
5659 }
5660}
5661#endif
5662
5663void helper_set_inhibit_irq(void)
5664{
5665 env->hflags |= HF_INHIBIT_IRQ_MASK;
5666}
5667
5668void helper_reset_inhibit_irq(void)
5669{
5670 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5671}
5672
5673void helper_boundw(target_ulong a0, int v)
5674{
5675 int low, high;
5676 low = ldsw(a0);
5677 high = ldsw(a0 + 2);
5678 v = (int16_t)v;
5679 if (v < low || v > high) {
5680 raise_exception(EXCP05_BOUND);
5681 }
5682}
5683
5684void helper_boundl(target_ulong a0, int v)
5685{
5686 int low, high;
5687 low = ldl(a0);
5688 high = ldl(a0 + 4);
5689 if (v < low || v > high) {
5690 raise_exception(EXCP05_BOUND);
5691 }
5692}
5693
5694static float approx_rsqrt(float a)
5695{
5696 return 1.0 / sqrt(a);
5697}
5698
5699static float approx_rcp(float a)
5700{
5701 return 1.0 / a;
5702}
5703
5704#if !defined(CONFIG_USER_ONLY)
5705
5706#define MMUSUFFIX _mmu
5707
5708#define SHIFT 0
5709#include "softmmu_template.h"
5710
5711#define SHIFT 1
5712#include "softmmu_template.h"
5713
5714#define SHIFT 2
5715#include "softmmu_template.h"
5716
5717#define SHIFT 3
5718#include "softmmu_template.h"
5719
5720#endif
5721
5722#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5723/* This code assumes real physical address always fit into host CPU reg,
5724 which is wrong in general, but true for our current use cases. */
5725RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5726{
5727 return remR3PhysReadS8(addr);
5728}
5729RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5730{
5731 return remR3PhysReadU8(addr);
5732}
5733void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5734{
5735 remR3PhysWriteU8(addr, val);
5736}
5737RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5738{
5739 return remR3PhysReadS16(addr);
5740}
5741RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5742{
5743 return remR3PhysReadU16(addr);
5744}
5745void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5746{
5747 remR3PhysWriteU16(addr, val);
5748}
5749RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5750{
5751 return remR3PhysReadS32(addr);
5752}
5753RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5754{
5755 return remR3PhysReadU32(addr);
5756}
5757void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5758{
5759 remR3PhysWriteU32(addr, val);
5760}
5761uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5762{
5763 return remR3PhysReadU64(addr);
5764}
5765void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5766{
5767 remR3PhysWriteU64(addr, val);
5768}
5769#endif /* VBOX */
5770
5771#if !defined(CONFIG_USER_ONLY)
5772/* try to fill the TLB and return an exception if error. If retaddr is
5773 NULL, it means that the function was called in C code (i.e. not
5774 from generated code or from helper.c) */
5775/* XXX: fix it to restore all registers */
5776void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5777{
5778 TranslationBlock *tb;
5779 int ret;
5780 uintptr_t pc;
5781 CPUX86State *saved_env;
5782
5783 /* XXX: hack to restore env in all cases, even if not called from
5784 generated code */
5785 saved_env = env;
5786 env = cpu_single_env;
5787
5788 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5789 if (ret) {
5790 if (retaddr) {
5791 /* now we have a real cpu fault */
5792 pc = (uintptr_t)retaddr;
5793 tb = tb_find_pc(pc);
5794 if (tb) {
5795 /* the PC is inside the translated code. It means that we have
5796 a virtual CPU fault */
5797 cpu_restore_state(tb, env, pc, NULL);
5798 }
5799 }
5800 raise_exception_err(env->exception_index, env->error_code);
5801 }
5802 env = saved_env;
5803}
5804#endif
5805
5806#ifdef VBOX
5807
5808/**
5809 * Correctly computes the eflags.
5810 * @returns eflags.
5811 * @param env1 CPU environment.
5812 */
5813uint32_t raw_compute_eflags(CPUX86State *env1)
5814{
5815 CPUX86State *savedenv = env;
5816 uint32_t efl;
5817 env = env1;
5818 efl = compute_eflags();
5819 env = savedenv;
5820 return efl;
5821}
5822
5823/**
5824 * Reads byte from virtual address in guest memory area.
5825 * XXX: is it working for any addresses? swapped out pages?
5826 * @returns read data byte.
5827 * @param env1 CPU environment.
5828 * @param pvAddr GC Virtual address.
5829 */
5830uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5831{
5832 CPUX86State *savedenv = env;
5833 uint8_t u8;
5834 env = env1;
5835 u8 = ldub_kernel(addr);
5836 env = savedenv;
5837 return u8;
5838}
5839
5840/**
5841 * Reads byte from virtual address in guest memory area.
5842 * XXX: is it working for any addresses? swapped out pages?
5843 * @returns read data byte.
5844 * @param env1 CPU environment.
5845 * @param pvAddr GC Virtual address.
5846 */
5847uint16_t read_word(CPUX86State *env1, target_ulong addr)
5848{
5849 CPUX86State *savedenv = env;
5850 uint16_t u16;
5851 env = env1;
5852 u16 = lduw_kernel(addr);
5853 env = savedenv;
5854 return u16;
5855}
5856
5857/**
5858 * Reads byte from virtual address in guest memory area.
5859 * XXX: is it working for any addresses? swapped out pages?
5860 * @returns read data byte.
5861 * @param env1 CPU environment.
5862 * @param pvAddr GC Virtual address.
5863 */
5864uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5865{
5866 CPUX86State *savedenv = env;
5867 uint32_t u32;
5868 env = env1;
5869 u32 = ldl_kernel(addr);
5870 env = savedenv;
5871 return u32;
5872}
5873
5874/**
5875 * Writes byte to virtual address in guest memory area.
5876 * XXX: is it working for any addresses? swapped out pages?
5877 * @returns read data byte.
5878 * @param env1 CPU environment.
5879 * @param pvAddr GC Virtual address.
5880 * @param val byte value
5881 */
5882void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5883{
5884 CPUX86State *savedenv = env;
5885 env = env1;
5886 stb(addr, val);
5887 env = savedenv;
5888}
5889
5890void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5891{
5892 CPUX86State *savedenv = env;
5893 env = env1;
5894 stw(addr, val);
5895 env = savedenv;
5896}
5897
5898void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5899{
5900 CPUX86State *savedenv = env;
5901 env = env1;
5902 stl(addr, val);
5903 env = savedenv;
5904}
5905
5906/**
5907 * Correctly loads selector into segment register with updating internal
5908 * qemu data/caches.
5909 * @param env1 CPU environment.
5910 * @param seg_reg Segment register.
5911 * @param selector Selector to load.
5912 */
5913void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5914{
5915 CPUX86State *savedenv = env;
5916#ifdef FORCE_SEGMENT_SYNC
5917 jmp_buf old_buf;
5918#endif
5919
5920 env = env1;
5921
5922 if ( env->eflags & X86_EFL_VM
5923 || !(env->cr[0] & X86_CR0_PE))
5924 {
5925 load_seg_vm(seg_reg, selector);
5926
5927 env = savedenv;
5928
5929 /* Successful sync. */
5930 Assert(env1->segs[seg_reg].newselector == 0);
5931 }
5932 else
5933 {
5934 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5935 time critical - let's not do that */
5936#ifdef FORCE_SEGMENT_SYNC
5937 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5938#endif
5939 if (setjmp(env1->jmp_env) == 0)
5940 {
5941 if (seg_reg == R_CS)
5942 {
5943 uint32_t e1, e2;
5944 e1 = e2 = 0;
5945 load_segment(&e1, &e2, selector);
5946 cpu_x86_load_seg_cache(env, R_CS, selector,
5947 get_seg_base(e1, e2),
5948 get_seg_limit(e1, e2),
5949 e2);
5950 }
5951 else
5952 helper_load_seg(seg_reg, selector);
5953 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5954 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5955
5956 env = savedenv;
5957
5958 /* Successful sync. */
5959 Assert(env1->segs[seg_reg].newselector == 0);
5960 }
5961 else
5962 {
5963 env = savedenv;
5964
5965 /* Postpone sync until the guest uses the selector. */
5966 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5967 env1->segs[seg_reg].newselector = selector;
5968 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5969 env1->exception_index = -1;
5970 env1->error_code = 0;
5971 env1->old_exception = -1;
5972 }
5973#ifdef FORCE_SEGMENT_SYNC
5974 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5975#endif
5976 }
5977
5978}
5979
5980DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5981{
5982 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5983}
5984
5985
5986int emulate_single_instr(CPUX86State *env1)
5987{
5988 TranslationBlock *tb;
5989 TranslationBlock *current;
5990 int flags;
5991 uint8_t *tc_ptr;
5992 target_ulong old_eip;
5993
5994 /* ensures env is loaded! */
5995 CPUX86State *savedenv = env;
5996 env = env1;
5997
5998 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5999
6000 current = env->current_tb;
6001 env->current_tb = NULL;
6002 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
6003
6004 /*
6005 * Translate only one instruction.
6006 */
6007 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
6008 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
6009 env->segs[R_CS].base, flags, 0);
6010
6011 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
6012
6013
6014 /* tb_link_phys: */
6015 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6016 tb->jmp_next[0] = NULL;
6017 tb->jmp_next[1] = NULL;
6018 Assert(tb->jmp_next[0] == NULL);
6019 Assert(tb->jmp_next[1] == NULL);
6020 if (tb->tb_next_offset[0] != 0xffff)
6021 tb_reset_jump(tb, 0);
6022 if (tb->tb_next_offset[1] != 0xffff)
6023 tb_reset_jump(tb, 1);
6024
6025 /*
6026 * Execute it using emulation
6027 */
6028 old_eip = env->eip;
6029 env->current_tb = tb;
6030
6031 /*
6032 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6033 * perhaps not a very safe hack
6034 */
6035 while (old_eip == env->eip)
6036 {
6037 tc_ptr = tb->tc_ptr;
6038
6039#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6040 int fake_ret;
6041 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6042#else
6043 tcg_qemu_tb_exec(tc_ptr);
6044#endif
6045
6046 /*
6047 * Exit once we detect an external interrupt and interrupts are enabled
6048 */
6049 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6050 || ( (env->eflags & IF_MASK)
6051 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6052 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6053 )
6054 {
6055 break;
6056 }
6057 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6058 tlb_flush(env, true);
6059 }
6060 }
6061 env->current_tb = current;
6062
6063 tb_phys_invalidate(tb, -1);
6064 tb_free(tb);
6065/*
6066 Assert(tb->tb_next_offset[0] == 0xffff);
6067 Assert(tb->tb_next_offset[1] == 0xffff);
6068 Assert(tb->tb_next[0] == 0xffff);
6069 Assert(tb->tb_next[1] == 0xffff);
6070 Assert(tb->jmp_next[0] == NULL);
6071 Assert(tb->jmp_next[1] == NULL);
6072 Assert(tb->jmp_first == NULL); */
6073
6074 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6075
6076 /*
6077 * Execute the next instruction when we encounter instruction fusing.
6078 */
6079 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6080 {
6081 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6082 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6083 emulate_single_instr(env);
6084 }
6085
6086 env = savedenv;
6087 return 0;
6088}
6089
6090/**
6091 * Correctly loads a new ldtr selector.
6092 *
6093 * @param env1 CPU environment.
6094 * @param selector Selector to load.
6095 */
6096void sync_ldtr(CPUX86State *env1, int selector)
6097{
6098 CPUX86State *saved_env = env;
6099 if (setjmp(env1->jmp_env) == 0)
6100 {
6101 env = env1;
6102 helper_lldt(selector);
6103 env = saved_env;
6104 }
6105 else
6106 {
6107 env = saved_env;
6108#ifdef VBOX_STRICT
6109 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6110#endif
6111 }
6112}
6113
6114int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6115 uint32_t *esp_ptr, int dpl)
6116{
6117 int type, index, shift;
6118
6119 CPUX86State *savedenv = env;
6120 env = env1;
6121
6122 if (!(env->tr.flags & DESC_P_MASK))
6123 cpu_abort(env, "invalid tss");
6124 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6125 if ((type & 7) != 1)
6126 cpu_abort(env, "invalid tss type %d", type);
6127 shift = type >> 3;
6128 index = (dpl * 4 + 2) << shift;
6129 if (index + (4 << shift) - 1 > env->tr.limit)
6130 {
6131 env = savedenv;
6132 return 0;
6133 }
6134 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6135
6136 if (shift == 0) {
6137 *esp_ptr = lduw_kernel(env->tr.base + index);
6138 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6139 } else {
6140 *esp_ptr = ldl_kernel(env->tr.base + index);
6141 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6142 }
6143
6144 env = savedenv;
6145 return 1;
6146}
6147
6148//*****************************************************************************
6149// Needs to be at the bottom of the file (overriding macros)
6150
6151static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6152{
6153#ifdef USE_X86LDOUBLE
6154 CPU86_LDoubleU tmp;
6155 tmp.l.lower = *(uint64_t const *)ptr;
6156 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6157 return tmp.d;
6158#else
6159# error "Busted FPU saving/restoring!"
6160 return *(CPU86_LDouble *)ptr;
6161#endif
6162}
6163
6164static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6165{
6166#ifdef USE_X86LDOUBLE
6167 CPU86_LDoubleU tmp;
6168 tmp.d = f;
6169 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6170 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6171 *(uint16_t *)(ptr + 10) = 0;
6172 *(uint32_t *)(ptr + 12) = 0;
6173 AssertCompile(sizeof(long double) > 8);
6174#else
6175# error "Busted FPU saving/restoring!"
6176 *(CPU86_LDouble *)ptr = f;
6177#endif
6178}
6179
6180#undef stw
6181#undef stl
6182#undef stq
6183#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6184#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6185#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6186
6187//*****************************************************************************
6188void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6189{
6190 int fpus, fptag, i, nb_xmm_regs;
6191 CPU86_LDouble tmp;
6192 uint8_t *addr;
6193 int data64 = !!(env->hflags & HF_LMA_MASK);
6194
6195 if (env->cpuid_features & CPUID_FXSR)
6196 {
6197 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6198 fptag = 0;
6199 for(i = 0; i < 8; i++) {
6200 fptag |= (env->fptags[i] << i);
6201 }
6202 stw(ptr, env->fpuc);
6203 stw(ptr + 2, fpus);
6204 stw(ptr + 4, fptag ^ 0xff);
6205
6206 addr = ptr + 0x20;
6207 for(i = 0;i < 8; i++) {
6208 tmp = ST(i);
6209 helper_fstt_raw(tmp, addr);
6210 addr += 16;
6211 }
6212
6213 if (env->cr[4] & CR4_OSFXSR_MASK) {
6214 /* XXX: finish it */
6215 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6216 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6217 nb_xmm_regs = 8 << data64;
6218 addr = ptr + 0xa0;
6219 for(i = 0; i < nb_xmm_regs; i++) {
6220#if __GNUC__ < 4
6221 stq(addr, env->xmm_regs[i].XMM_Q(0));
6222 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6223#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6224 stl(addr, env->xmm_regs[i].XMM_L(0));
6225 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6226 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6227 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6228#endif
6229 addr += 16;
6230 }
6231 }
6232 }
6233 else
6234 {
6235 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6236 int fptag;
6237
6238 fp->FCW = env->fpuc;
6239 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6240 fptag = 0;
6241 for (i=7; i>=0; i--) {
6242 fptag <<= 2;
6243 if (env->fptags[i]) {
6244 fptag |= 3;
6245 } else {
6246 /* the FPU automatically computes it */
6247 }
6248 }
6249 fp->FTW = fptag;
6250
6251 for(i = 0;i < 8; i++) {
6252 tmp = ST(i);
6253 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6254 }
6255 }
6256}
6257
6258//*****************************************************************************
6259#undef lduw
6260#undef ldl
6261#undef ldq
6262#define lduw(a) *(uint16_t *)(a)
6263#define ldl(a) *(uint32_t *)(a)
6264#define ldq(a) *(uint64_t *)(a)
6265//*****************************************************************************
6266void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6267{
6268 int i, fpus, fptag, nb_xmm_regs;
6269 CPU86_LDouble tmp;
6270 uint8_t *addr;
6271 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6272
6273 if (env->cpuid_features & CPUID_FXSR)
6274 {
6275 env->fpuc = lduw(ptr);
6276 fpus = lduw(ptr + 2);
6277 fptag = lduw(ptr + 4);
6278 env->fpstt = (fpus >> 11) & 7;
6279 env->fpus = fpus & ~0x3800;
6280 fptag ^= 0xff;
6281 for(i = 0;i < 8; i++) {
6282 env->fptags[i] = ((fptag >> i) & 1);
6283 }
6284
6285 addr = ptr + 0x20;
6286 for(i = 0;i < 8; i++) {
6287 tmp = helper_fldt_raw(addr);
6288 ST(i) = tmp;
6289 addr += 16;
6290 }
6291
6292 if (env->cr[4] & CR4_OSFXSR_MASK) {
6293 /* XXX: finish it, endianness */
6294 env->mxcsr = ldl(ptr + 0x18);
6295 //ldl(ptr + 0x1c);
6296 nb_xmm_regs = 8 << data64;
6297 addr = ptr + 0xa0;
6298 for(i = 0; i < nb_xmm_regs; i++) {
6299#if HC_ARCH_BITS == 32
6300 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6301 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6302 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6303 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6304 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6305#else
6306 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6307 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6308#endif
6309 addr += 16;
6310 }
6311 }
6312 }
6313 else
6314 {
6315 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6316 int fptag, j;
6317
6318 env->fpuc = fp->FCW;
6319 env->fpstt = (fp->FSW >> 11) & 7;
6320 env->fpus = fp->FSW & ~0x3800;
6321 fptag = fp->FTW;
6322 for(i = 0;i < 8; i++) {
6323 env->fptags[i] = ((fptag & 3) == 3);
6324 fptag >>= 2;
6325 }
6326 j = env->fpstt;
6327 for(i = 0;i < 8; i++) {
6328 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6329 ST(i) = tmp;
6330 }
6331 }
6332}
6333//*****************************************************************************
6334//*****************************************************************************
6335
6336#endif /* VBOX */
6337
6338/* Secure Virtual Machine helpers */
6339
6340#if defined(CONFIG_USER_ONLY)
6341
6342void helper_vmrun(int aflag, int next_eip_addend)
6343{
6344}
6345void helper_vmmcall(void)
6346{
6347}
6348void helper_vmload(int aflag)
6349{
6350}
6351void helper_vmsave(int aflag)
6352{
6353}
6354void helper_stgi(void)
6355{
6356}
6357void helper_clgi(void)
6358{
6359}
6360void helper_skinit(void)
6361{
6362}
6363void helper_invlpga(int aflag)
6364{
6365}
6366void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6367{
6368}
6369void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6370{
6371}
6372
6373void helper_svm_check_io(uint32_t port, uint32_t param,
6374 uint32_t next_eip_addend)
6375{
6376}
6377#else
6378
6379static inline void svm_save_seg(target_phys_addr_t addr,
6380 const SegmentCache *sc)
6381{
6382 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6383 sc->selector);
6384 stq_phys(addr + offsetof(struct vmcb_seg, base),
6385 sc->base);
6386 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6387 sc->limit);
6388 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6389 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6390}
6391
6392static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6393{
6394 unsigned int flags;
6395
6396 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6397 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6398 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6399 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6400 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6401}
6402
6403static inline void svm_load_seg_cache(target_phys_addr_t addr,
6404 CPUState *env, int seg_reg)
6405{
6406 SegmentCache sc1, *sc = &sc1;
6407 svm_load_seg(addr, sc);
6408 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6409 sc->base, sc->limit, sc->flags);
6410}
6411
6412void helper_vmrun(int aflag, int next_eip_addend)
6413{
6414 target_ulong addr;
6415 uint32_t event_inj;
6416 uint32_t int_ctl;
6417
6418 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6419
6420 if (aflag == 2)
6421 addr = EAX;
6422 else
6423 addr = (uint32_t)EAX;
6424
6425 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6426
6427 env->vm_vmcb = addr;
6428
6429 /* save the current CPU state in the hsave page */
6430 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6431 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6432
6433 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6434 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6435
6436 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6437 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6438 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6439 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6440 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6441 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6442
6443 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6444 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6445
6446 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6447 &env->segs[R_ES]);
6448 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6449 &env->segs[R_CS]);
6450 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6451 &env->segs[R_SS]);
6452 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6453 &env->segs[R_DS]);
6454
6455 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6456 EIP + next_eip_addend);
6457 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6458 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6459
6460 /* load the interception bitmaps so we do not need to access the
6461 vmcb in svm mode */
6462 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6463 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6464 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6465 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6466 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6467 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6468
6469 /* enable intercepts */
6470 env->hflags |= HF_SVMI_MASK;
6471
6472 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6473
6474 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6475 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6476
6477 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6478 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6479
6480 /* clear exit_info_2 so we behave like the real hardware */
6481 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6482
6483 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6484 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6485 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6486 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6487 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6488 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6489 if (int_ctl & V_INTR_MASKING_MASK) {
6490 env->v_tpr = int_ctl & V_TPR_MASK;
6491 env->hflags2 |= HF2_VINTR_MASK;
6492 if (env->eflags & IF_MASK)
6493 env->hflags2 |= HF2_HIF_MASK;
6494 }
6495
6496 cpu_load_efer(env,
6497 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6498 env->eflags = 0;
6499 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6500 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6501 CC_OP = CC_OP_EFLAGS;
6502
6503 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6504 env, R_ES);
6505 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6506 env, R_CS);
6507 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6508 env, R_SS);
6509 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6510 env, R_DS);
6511
6512 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6513 env->eip = EIP;
6514 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6515 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6516 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6517 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6518 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6519
6520 /* FIXME: guest state consistency checks */
6521
6522 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6523 case TLB_CONTROL_DO_NOTHING:
6524 break;
6525 case TLB_CONTROL_FLUSH_ALL_ASID:
6526 /* FIXME: this is not 100% correct but should work for now */
6527 tlb_flush(env, 1);
6528 break;
6529 }
6530
6531 env->hflags2 |= HF2_GIF_MASK;
6532
6533 if (int_ctl & V_IRQ_MASK) {
6534 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6535 }
6536
6537 /* maybe we need to inject an event */
6538 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6539 if (event_inj & SVM_EVTINJ_VALID) {
6540 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6541 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6542 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6543
6544 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6545 /* FIXME: need to implement valid_err */
6546 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6547 case SVM_EVTINJ_TYPE_INTR:
6548 env->exception_index = vector;
6549 env->error_code = event_inj_err;
6550 env->exception_is_int = 0;
6551 env->exception_next_eip = -1;
6552 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6553 /* XXX: is it always correct ? */
6554 do_interrupt(vector, 0, 0, 0, 1);
6555 break;
6556 case SVM_EVTINJ_TYPE_NMI:
6557 env->exception_index = EXCP02_NMI;
6558 env->error_code = event_inj_err;
6559 env->exception_is_int = 0;
6560 env->exception_next_eip = EIP;
6561 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6562 cpu_loop_exit();
6563 break;
6564 case SVM_EVTINJ_TYPE_EXEPT:
6565 env->exception_index = vector;
6566 env->error_code = event_inj_err;
6567 env->exception_is_int = 0;
6568 env->exception_next_eip = -1;
6569 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6570 cpu_loop_exit();
6571 break;
6572 case SVM_EVTINJ_TYPE_SOFT:
6573 env->exception_index = vector;
6574 env->error_code = event_inj_err;
6575 env->exception_is_int = 1;
6576 env->exception_next_eip = EIP;
6577 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6578 cpu_loop_exit();
6579 break;
6580 }
6581 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6582 }
6583}
6584
6585void helper_vmmcall(void)
6586{
6587 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6588 raise_exception(EXCP06_ILLOP);
6589}
6590
6591void helper_vmload(int aflag)
6592{
6593 target_ulong addr;
6594 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6595
6596 if (aflag == 2)
6597 addr = EAX;
6598 else
6599 addr = (uint32_t)EAX;
6600
6601 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6602 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6603 env->segs[R_FS].base);
6604
6605 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6606 env, R_FS);
6607 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6608 env, R_GS);
6609 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6610 &env->tr);
6611 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6612 &env->ldt);
6613
6614#ifdef TARGET_X86_64
6615 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6616 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6617 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6618 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6619#endif
6620 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6621 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6622 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6623 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6624}
6625
6626void helper_vmsave(int aflag)
6627{
6628 target_ulong addr;
6629 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6630
6631 if (aflag == 2)
6632 addr = EAX;
6633 else
6634 addr = (uint32_t)EAX;
6635
6636 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6637 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6638 env->segs[R_FS].base);
6639
6640 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6641 &env->segs[R_FS]);
6642 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6643 &env->segs[R_GS]);
6644 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6645 &env->tr);
6646 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6647 &env->ldt);
6648
6649#ifdef TARGET_X86_64
6650 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6651 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6652 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6653 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6654#endif
6655 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6656 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6657 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6658 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6659}
6660
6661void helper_stgi(void)
6662{
6663 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6664 env->hflags2 |= HF2_GIF_MASK;
6665}
6666
6667void helper_clgi(void)
6668{
6669 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6670 env->hflags2 &= ~HF2_GIF_MASK;
6671}
6672
6673void helper_skinit(void)
6674{
6675 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6676 /* XXX: not implemented */
6677 raise_exception(EXCP06_ILLOP);
6678}
6679
6680void helper_invlpga(int aflag)
6681{
6682 target_ulong addr;
6683 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6684
6685 if (aflag == 2)
6686 addr = EAX;
6687 else
6688 addr = (uint32_t)EAX;
6689
6690 /* XXX: could use the ASID to see if it is needed to do the
6691 flush */
6692 tlb_flush_page(env, addr);
6693}
6694
6695void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6696{
6697 if (likely(!(env->hflags & HF_SVMI_MASK)))
6698 return;
6699#ifndef VBOX
6700 switch(type) {
6701 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6702 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6703 helper_vmexit(type, param);
6704 }
6705 break;
6706 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6707 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6708 helper_vmexit(type, param);
6709 }
6710 break;
6711 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6712 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6713 helper_vmexit(type, param);
6714 }
6715 break;
6716 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6717 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6718 helper_vmexit(type, param);
6719 }
6720 break;
6721 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6722 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6723 helper_vmexit(type, param);
6724 }
6725 break;
6726 case SVM_EXIT_MSR:
6727 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6728 /* FIXME: this should be read in at vmrun (faster this way?) */
6729 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6730 uint32_t t0, t1;
6731 switch((uint32_t)ECX) {
6732 case 0 ... 0x1fff:
6733 t0 = (ECX * 2) % 8;
6734 t1 = ECX / 8;
6735 break;
6736 case 0xc0000000 ... 0xc0001fff:
6737 t0 = (8192 + ECX - 0xc0000000) * 2;
6738 t1 = (t0 / 8);
6739 t0 %= 8;
6740 break;
6741 case 0xc0010000 ... 0xc0011fff:
6742 t0 = (16384 + ECX - 0xc0010000) * 2;
6743 t1 = (t0 / 8);
6744 t0 %= 8;
6745 break;
6746 default:
6747 helper_vmexit(type, param);
6748 t0 = 0;
6749 t1 = 0;
6750 break;
6751 }
6752 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6753 helper_vmexit(type, param);
6754 }
6755 break;
6756 default:
6757 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6758 helper_vmexit(type, param);
6759 }
6760 break;
6761 }
6762#else /* VBOX */
6763 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6764#endif /* VBOX */
6765}
6766
6767void helper_svm_check_io(uint32_t port, uint32_t param,
6768 uint32_t next_eip_addend)
6769{
6770 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6771 /* FIXME: this should be read in at vmrun (faster this way?) */
6772 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6773 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6774 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6775 /* next EIP */
6776 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6777 env->eip + next_eip_addend);
6778 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6779 }
6780 }
6781}
6782
6783/* Note: currently only 32 bits of exit_code are used */
6784void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6785{
6786 uint32_t int_ctl;
6787
6788 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6789 exit_code, exit_info_1,
6790 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6791 EIP);
6792
6793 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6794 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6795 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6796 } else {
6797 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6798 }
6799
6800 /* Save the VM state in the vmcb */
6801 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6802 &env->segs[R_ES]);
6803 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6804 &env->segs[R_CS]);
6805 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6806 &env->segs[R_SS]);
6807 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6808 &env->segs[R_DS]);
6809
6810 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6811 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6812
6813 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6814 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6815
6816 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6817 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6819 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6820 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6821
6822 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6823 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6824 int_ctl |= env->v_tpr & V_TPR_MASK;
6825 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6826 int_ctl |= V_IRQ_MASK;
6827 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6828
6829 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6830 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6831 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6832 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6833 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6834 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6835 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6836
6837 /* Reload the host state from vm_hsave */
6838 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6839 env->hflags &= ~HF_SVMI_MASK;
6840 env->intercept = 0;
6841 env->intercept_exceptions = 0;
6842 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6843 env->tsc_offset = 0;
6844
6845 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6846 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6847
6848 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6849 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6850
6851 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6852 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6853 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6854 /* we need to set the efer after the crs so the hidden flags get
6855 set properly */
6856 cpu_load_efer(env,
6857 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6858 env->eflags = 0;
6859 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6860 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6861 CC_OP = CC_OP_EFLAGS;
6862
6863 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6864 env, R_ES);
6865 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6866 env, R_CS);
6867 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6868 env, R_SS);
6869 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6870 env, R_DS);
6871
6872 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6873 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6874 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6875
6876 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6877 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6878
6879 /* other setups */
6880 cpu_x86_set_cpl(env, 0);
6881 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6882 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6883
6884 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6885 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6886 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6887 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6888 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6889
6890 env->hflags2 &= ~HF2_GIF_MASK;
6891 /* FIXME: Resets the current ASID register to zero (host ASID). */
6892
6893 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6894
6895 /* Clears the TSC_OFFSET inside the processor. */
6896
6897 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6898 from the page table indicated the host's CR3. If the PDPEs contain
6899 illegal state, the processor causes a shutdown. */
6900
6901 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6902 env->cr[0] |= CR0_PE_MASK;
6903 env->eflags &= ~VM_MASK;
6904
6905 /* Disables all breakpoints in the host DR7 register. */
6906
6907 /* Checks the reloaded host state for consistency. */
6908
6909 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6910 host's code segment or non-canonical (in the case of long mode), a
6911 #GP fault is delivered inside the host.) */
6912
6913 /* remove any pending exception */
6914 env->exception_index = -1;
6915 env->error_code = 0;
6916 env->old_exception = -1;
6917
6918 cpu_loop_exit();
6919}
6920
6921#endif
6922
6923/* MMX/SSE */
6924/* XXX: optimize by storing fptt and fptags in the static cpu state */
6925void helper_enter_mmx(void)
6926{
6927 env->fpstt = 0;
6928 *(uint32_t *)(env->fptags) = 0;
6929 *(uint32_t *)(env->fptags + 4) = 0;
6930}
6931
6932void helper_emms(void)
6933{
6934 /* set to empty state */
6935 *(uint32_t *)(env->fptags) = 0x01010101;
6936 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6937}
6938
6939/* XXX: suppress */
6940void helper_movq(void *d, void *s)
6941{
6942 *(uint64_t *)d = *(uint64_t *)s;
6943}
6944
6945#define SHIFT 0
6946#include "ops_sse.h"
6947
6948#define SHIFT 1
6949#include "ops_sse.h"
6950
6951#define SHIFT 0
6952#include "helper_template.h"
6953#undef SHIFT
6954
6955#define SHIFT 1
6956#include "helper_template.h"
6957#undef SHIFT
6958
6959#define SHIFT 2
6960#include "helper_template.h"
6961#undef SHIFT
6962
6963#ifdef TARGET_X86_64
6964
6965#define SHIFT 3
6966#include "helper_template.h"
6967#undef SHIFT
6968
6969#endif
6970
6971/* bit operations */
6972target_ulong helper_bsf(target_ulong t0)
6973{
6974 int count;
6975 target_ulong res;
6976
6977 res = t0;
6978 count = 0;
6979 while ((res & 1) == 0) {
6980 count++;
6981 res >>= 1;
6982 }
6983 return count;
6984}
6985
6986target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6987{
6988 int count;
6989 target_ulong res, mask;
6990
6991 if (wordsize > 0 && t0 == 0) {
6992 return wordsize;
6993 }
6994 res = t0;
6995 count = TARGET_LONG_BITS - 1;
6996 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6997 while ((res & mask) == 0) {
6998 count--;
6999 res <<= 1;
7000 }
7001 if (wordsize > 0) {
7002 return wordsize - 1 - count;
7003 }
7004 return count;
7005}
7006
7007target_ulong helper_bsr(target_ulong t0)
7008{
7009 return helper_lzcnt(t0, 0);
7010}
7011
7012static int compute_all_eflags(void)
7013{
7014 return CC_SRC;
7015}
7016
7017static int compute_c_eflags(void)
7018{
7019 return CC_SRC & CC_C;
7020}
7021
7022uint32_t helper_cc_compute_all(int op)
7023{
7024 switch (op) {
7025 default: /* should never happen */ return 0;
7026
7027 case CC_OP_EFLAGS: return compute_all_eflags();
7028
7029 case CC_OP_MULB: return compute_all_mulb();
7030 case CC_OP_MULW: return compute_all_mulw();
7031 case CC_OP_MULL: return compute_all_mull();
7032
7033 case CC_OP_ADDB: return compute_all_addb();
7034 case CC_OP_ADDW: return compute_all_addw();
7035 case CC_OP_ADDL: return compute_all_addl();
7036
7037 case CC_OP_ADCB: return compute_all_adcb();
7038 case CC_OP_ADCW: return compute_all_adcw();
7039 case CC_OP_ADCL: return compute_all_adcl();
7040
7041 case CC_OP_SUBB: return compute_all_subb();
7042 case CC_OP_SUBW: return compute_all_subw();
7043 case CC_OP_SUBL: return compute_all_subl();
7044
7045 case CC_OP_SBBB: return compute_all_sbbb();
7046 case CC_OP_SBBW: return compute_all_sbbw();
7047 case CC_OP_SBBL: return compute_all_sbbl();
7048
7049 case CC_OP_LOGICB: return compute_all_logicb();
7050 case CC_OP_LOGICW: return compute_all_logicw();
7051 case CC_OP_LOGICL: return compute_all_logicl();
7052
7053 case CC_OP_INCB: return compute_all_incb();
7054 case CC_OP_INCW: return compute_all_incw();
7055 case CC_OP_INCL: return compute_all_incl();
7056
7057 case CC_OP_DECB: return compute_all_decb();
7058 case CC_OP_DECW: return compute_all_decw();
7059 case CC_OP_DECL: return compute_all_decl();
7060
7061 case CC_OP_SHLB: return compute_all_shlb();
7062 case CC_OP_SHLW: return compute_all_shlw();
7063 case CC_OP_SHLL: return compute_all_shll();
7064
7065 case CC_OP_SARB: return compute_all_sarb();
7066 case CC_OP_SARW: return compute_all_sarw();
7067 case CC_OP_SARL: return compute_all_sarl();
7068
7069#ifdef TARGET_X86_64
7070 case CC_OP_MULQ: return compute_all_mulq();
7071
7072 case CC_OP_ADDQ: return compute_all_addq();
7073
7074 case CC_OP_ADCQ: return compute_all_adcq();
7075
7076 case CC_OP_SUBQ: return compute_all_subq();
7077
7078 case CC_OP_SBBQ: return compute_all_sbbq();
7079
7080 case CC_OP_LOGICQ: return compute_all_logicq();
7081
7082 case CC_OP_INCQ: return compute_all_incq();
7083
7084 case CC_OP_DECQ: return compute_all_decq();
7085
7086 case CC_OP_SHLQ: return compute_all_shlq();
7087
7088 case CC_OP_SARQ: return compute_all_sarq();
7089#endif
7090 }
7091}
7092
7093uint32_t helper_cc_compute_c(int op)
7094{
7095 switch (op) {
7096 default: /* should never happen */ return 0;
7097
7098 case CC_OP_EFLAGS: return compute_c_eflags();
7099
7100 case CC_OP_MULB: return compute_c_mull();
7101 case CC_OP_MULW: return compute_c_mull();
7102 case CC_OP_MULL: return compute_c_mull();
7103
7104 case CC_OP_ADDB: return compute_c_addb();
7105 case CC_OP_ADDW: return compute_c_addw();
7106 case CC_OP_ADDL: return compute_c_addl();
7107
7108 case CC_OP_ADCB: return compute_c_adcb();
7109 case CC_OP_ADCW: return compute_c_adcw();
7110 case CC_OP_ADCL: return compute_c_adcl();
7111
7112 case CC_OP_SUBB: return compute_c_subb();
7113 case CC_OP_SUBW: return compute_c_subw();
7114 case CC_OP_SUBL: return compute_c_subl();
7115
7116 case CC_OP_SBBB: return compute_c_sbbb();
7117 case CC_OP_SBBW: return compute_c_sbbw();
7118 case CC_OP_SBBL: return compute_c_sbbl();
7119
7120 case CC_OP_LOGICB: return compute_c_logicb();
7121 case CC_OP_LOGICW: return compute_c_logicw();
7122 case CC_OP_LOGICL: return compute_c_logicl();
7123
7124 case CC_OP_INCB: return compute_c_incl();
7125 case CC_OP_INCW: return compute_c_incl();
7126 case CC_OP_INCL: return compute_c_incl();
7127
7128 case CC_OP_DECB: return compute_c_incl();
7129 case CC_OP_DECW: return compute_c_incl();
7130 case CC_OP_DECL: return compute_c_incl();
7131
7132 case CC_OP_SHLB: return compute_c_shlb();
7133 case CC_OP_SHLW: return compute_c_shlw();
7134 case CC_OP_SHLL: return compute_c_shll();
7135
7136 case CC_OP_SARB: return compute_c_sarl();
7137 case CC_OP_SARW: return compute_c_sarl();
7138 case CC_OP_SARL: return compute_c_sarl();
7139
7140#ifdef TARGET_X86_64
7141 case CC_OP_MULQ: return compute_c_mull();
7142
7143 case CC_OP_ADDQ: return compute_c_addq();
7144
7145 case CC_OP_ADCQ: return compute_c_adcq();
7146
7147 case CC_OP_SUBQ: return compute_c_subq();
7148
7149 case CC_OP_SBBQ: return compute_c_sbbq();
7150
7151 case CC_OP_LOGICQ: return compute_c_logicq();
7152
7153 case CC_OP_INCQ: return compute_c_incl();
7154
7155 case CC_OP_DECQ: return compute_c_incl();
7156
7157 case CC_OP_SHLQ: return compute_c_shlq();
7158
7159 case CC_OP_SARQ: return compute_c_sarl();
7160#endif
7161 }
7162}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette