VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 45473

Last change on this file since 45473 was 45276, checked in by vboxsync, 11 years ago

Ring-1 compression patches, courtesy of trivirt AG:

  • main: diff to remove the hwvirt requirement for QNX
  • rem: diff for dealing with raw ring 0/1 selectors and general changes to allowed guest execution states
  • vmm: changes for using the guest's TSS selector index as our hypervisor TSS selector (makes str safe) (VBOX_WITH_SAFE_STR )
  • vmm: changes for dealing with guest ring 1 code (VBOX_WITH_RAW_RING1)
  • vmm: change to emulate smsw in RC/R0 (QNX uses this old style instruction a lot so going to qemu for emulation is very expensive)
  • vmm: change (hack) to kick out patm virtual handlers in case they conflict with guest GDT/TSS write monitors; we should allow multiple handlers per page, but that change would be rather invasive
  • Property svn:eol-style set to native
File size: 197.9 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /* @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /* @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274 sc->flags = e2;
275#ifdef VBOX
276 sc->newselector = 0;
277 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
278#endif
279}
280
281/* init the segment cache in vm86 mode. */
282static inline void load_seg_vm(int seg, int selector)
283{
284 selector &= 0xffff;
285#ifdef VBOX
286 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
287 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
288 flags |= (3 << DESC_DPL_SHIFT);
289
290 cpu_x86_load_seg_cache(env, seg, selector,
291 (selector << 4), 0xffff, flags);
292#else /* VBOX */
293 cpu_x86_load_seg_cache(env, seg, selector,
294 (selector << 4), 0xffff, 0);
295#endif /* VBOX */
296}
297
298static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
299 uint32_t *esp_ptr, int dpl)
300{
301#ifndef VBOX
302 int type, index, shift;
303#else
304 unsigned int type, index, shift;
305#endif
306
307#if 0
308 {
309 int i;
310 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
311 for(i=0;i<env->tr.limit;i++) {
312 printf("%02x ", env->tr.base[i]);
313 if ((i & 7) == 7) printf("\n");
314 }
315 printf("\n");
316 }
317#endif
318
319 if (!(env->tr.flags & DESC_P_MASK))
320 cpu_abort(env, "invalid tss");
321 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
322 if ((type & 7) != 1)
323 cpu_abort(env, "invalid tss type");
324 shift = type >> 3;
325 index = (dpl * 4 + 2) << shift;
326 if (index + (4 << shift) - 1 > env->tr.limit)
327 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
328 if (shift == 0) {
329 *esp_ptr = lduw_kernel(env->tr.base + index);
330 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
331 } else {
332 *esp_ptr = ldl_kernel(env->tr.base + index);
333 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
334 }
335}
336
337/* XXX: merge with load_seg() */
338static void tss_load_seg(int seg_reg, int selector)
339{
340 uint32_t e1, e2;
341 int rpl, dpl, cpl;
342
343#ifdef VBOX
344 e1 = e2 = 0; /* gcc warning? */
345 cpl = env->hflags & HF_CPL_MASK;
346 /* Trying to load a selector with CPL=1? */
347 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
348 {
349 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
350 selector = selector & 0xfffc;
351 }
352#endif /* VBOX */
353
354 if ((selector & 0xfffc) != 0) {
355 if (load_segment(&e1, &e2, selector) != 0)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 if (!(e2 & DESC_S_MASK))
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 rpl = selector & 3;
360 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
361 cpl = env->hflags & HF_CPL_MASK;
362 if (seg_reg == R_CS) {
363 if (!(e2 & DESC_CS_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 /* XXX: is it correct ? */
366 if (dpl != rpl)
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 if ((e2 & DESC_C_MASK) && dpl > rpl)
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 } else if (seg_reg == R_SS) {
371 /* SS must be writable data */
372 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 if (dpl != cpl || dpl != rpl)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 } else {
377 /* not readable code */
378 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
379 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
380 /* if data or non conforming code, checks the rights */
381 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
382 if (dpl < cpl || dpl < rpl)
383 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
384 }
385 }
386 if (!(e2 & DESC_P_MASK))
387 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 get_seg_base(e1, e2),
390 get_seg_limit(e1, e2),
391 e2);
392 } else {
393 if (seg_reg == R_SS || seg_reg == R_CS)
394 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
395#ifdef VBOX
396# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
397 cpu_x86_load_seg_cache(env, seg_reg, selector,
398 0, 0, 0);
399# endif
400#endif /* VBOX */
401 }
402}
403
404#define SWITCH_TSS_JMP 0
405#define SWITCH_TSS_IRET 1
406#define SWITCH_TSS_CALL 2
407
408/* XXX: restore CPU state in registers (PowerPC case) */
409static void switch_tss(int tss_selector,
410 uint32_t e1, uint32_t e2, int source,
411 uint32_t next_eip)
412{
413 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
414 target_ulong tss_base;
415 uint32_t new_regs[8], new_segs[6];
416 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
417 uint32_t old_eflags, eflags_mask;
418 SegmentCache *dt;
419#ifndef VBOX
420 int index;
421#else
422 unsigned int index;
423#endif
424 target_ulong ptr;
425
426 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
427 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
428
429 /* if task gate, we read the TSS segment and we load it */
430 if (type == 5) {
431 if (!(e2 & DESC_P_MASK))
432 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
433 tss_selector = e1 >> 16;
434 if (tss_selector & 4)
435 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
436 if (load_segment(&e1, &e2, tss_selector) != 0)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 if (e2 & DESC_S_MASK)
439 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
440 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
441 if ((type & 7) != 1)
442 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
443 }
444
445 if (!(e2 & DESC_P_MASK))
446 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
447
448 if (type & 8)
449 tss_limit_max = 103;
450 else
451 tss_limit_max = 43;
452 tss_limit = get_seg_limit(e1, e2);
453 tss_base = get_seg_base(e1, e2);
454 if ((tss_selector & 4) != 0 ||
455 tss_limit < tss_limit_max)
456 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
457 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
458 if (old_type & 8)
459 old_tss_limit_max = 103;
460 else
461 old_tss_limit_max = 43;
462
463 /* read all the registers from the new TSS */
464 if (type & 8) {
465 /* 32 bit */
466 new_cr3 = ldl_kernel(tss_base + 0x1c);
467 new_eip = ldl_kernel(tss_base + 0x20);
468 new_eflags = ldl_kernel(tss_base + 0x24);
469 for(i = 0; i < 8; i++)
470 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
471 for(i = 0; i < 6; i++)
472 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
473 new_ldt = lduw_kernel(tss_base + 0x60);
474 new_trap = ldl_kernel(tss_base + 0x64);
475 } else {
476 /* 16 bit */
477 new_cr3 = 0;
478 new_eip = lduw_kernel(tss_base + 0x0e);
479 new_eflags = lduw_kernel(tss_base + 0x10);
480 for(i = 0; i < 8; i++)
481 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
482 for(i = 0; i < 4; i++)
483 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
484 new_ldt = lduw_kernel(tss_base + 0x2a);
485 new_segs[R_FS] = 0;
486 new_segs[R_GS] = 0;
487 new_trap = 0;
488 }
489
490 /* NOTE: we must avoid memory exceptions during the task switch,
491 so we make dummy accesses before */
492 /* XXX: it can still fail in some cases, so a bigger hack is
493 necessary to valid the TLB after having done the accesses */
494
495 v1 = ldub_kernel(env->tr.base);
496 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
497 stb_kernel(env->tr.base, v1);
498 stb_kernel(env->tr.base + old_tss_limit_max, v2);
499
500 /* clear busy bit (it is restartable) */
501 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
502 target_ulong ptr;
503 uint32_t e2;
504 ptr = env->gdt.base + (env->tr.selector & ~7);
505 e2 = ldl_kernel(ptr + 4);
506 e2 &= ~DESC_TSS_BUSY_MASK;
507 stl_kernel(ptr + 4, e2);
508 }
509 old_eflags = compute_eflags();
510 if (source == SWITCH_TSS_IRET)
511 old_eflags &= ~NT_MASK;
512
513 /* save the current state in the old TSS */
514 if (type & 8) {
515 /* 32 bit */
516 stl_kernel(env->tr.base + 0x20, next_eip);
517 stl_kernel(env->tr.base + 0x24, old_eflags);
518 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
519 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
520 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
521 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
522 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
523 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
524 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
525 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
526 for(i = 0; i < 6; i++)
527 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
528#ifdef VBOX
529 /* Must store the ldt as it gets reloaded and might have been changed. */
530 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
531#endif
532#if defined(VBOX) && defined(DEBUG)
533 printf("TSS 32 bits switch\n");
534 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
535#endif
536 } else {
537 /* 16 bit */
538 stw_kernel(env->tr.base + 0x0e, next_eip);
539 stw_kernel(env->tr.base + 0x10, old_eflags);
540 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
541 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
542 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
543 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
544 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
545 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
546 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
547 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
548 for(i = 0; i < 4; i++)
549 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
550#ifdef VBOX
551 /* Must store the ldt as it gets reloaded and might have been changed. */
552 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
553#endif
554 }
555
556 /* now if an exception occurs, it will occurs in the next task
557 context */
558
559 if (source == SWITCH_TSS_CALL) {
560 stw_kernel(tss_base, env->tr.selector);
561 new_eflags |= NT_MASK;
562 }
563
564 /* set busy bit */
565 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
566 target_ulong ptr;
567 uint32_t e2;
568 ptr = env->gdt.base + (tss_selector & ~7);
569 e2 = ldl_kernel(ptr + 4);
570 e2 |= DESC_TSS_BUSY_MASK;
571 stl_kernel(ptr + 4, e2);
572 }
573
574 /* set the new CPU state */
575 /* from this point, any exception which occurs can give problems */
576 env->cr[0] |= CR0_TS_MASK;
577 env->hflags |= HF_TS_MASK;
578 env->tr.selector = tss_selector;
579 env->tr.base = tss_base;
580 env->tr.limit = tss_limit;
581 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
582#ifdef VBOX
583 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
584 env->tr.newselector = 0;
585#endif
586
587 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
588 cpu_x86_update_cr3(env, new_cr3);
589 }
590
591 /* load all registers without an exception, then reload them with
592 possible exception */
593 env->eip = new_eip;
594 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
595 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
596 if (!(type & 8))
597 eflags_mask &= 0xffff;
598 load_eflags(new_eflags, eflags_mask);
599 /* XXX: what to do in 16 bit case ? */
600 EAX = new_regs[0];
601 ECX = new_regs[1];
602 EDX = new_regs[2];
603 EBX = new_regs[3];
604 ESP = new_regs[4];
605 EBP = new_regs[5];
606 ESI = new_regs[6];
607 EDI = new_regs[7];
608 if (new_eflags & VM_MASK) {
609 for(i = 0; i < 6; i++)
610 load_seg_vm(i, new_segs[i]);
611 /* in vm86, CPL is always 3 */
612 cpu_x86_set_cpl(env, 3);
613 } else {
614 /* CPL is set the RPL of CS */
615 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
616 /* first just selectors as the rest may trigger exceptions */
617 for(i = 0; i < 6; i++)
618 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
619 }
620
621 env->ldt.selector = new_ldt & ~4;
622 env->ldt.base = 0;
623 env->ldt.limit = 0;
624 env->ldt.flags = 0;
625#ifdef VBOX
626 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
627 env->ldt.newselector = 0;
628#endif
629
630 /* load the LDT */
631 if (new_ldt & 4)
632 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
633
634 if ((new_ldt & 0xfffc) != 0) {
635 dt = &env->gdt;
636 index = new_ldt & ~7;
637 if ((index + 7) > dt->limit)
638 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
639 ptr = dt->base + index;
640 e1 = ldl_kernel(ptr);
641 e2 = ldl_kernel(ptr + 4);
642 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
643 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
644 if (!(e2 & DESC_P_MASK))
645 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
646 load_seg_cache_raw_dt(&env->ldt, e1, e2);
647 }
648
649 /* load the segments */
650 if (!(new_eflags & VM_MASK)) {
651 tss_load_seg(R_CS, new_segs[R_CS]);
652 tss_load_seg(R_SS, new_segs[R_SS]);
653 tss_load_seg(R_ES, new_segs[R_ES]);
654 tss_load_seg(R_DS, new_segs[R_DS]);
655 tss_load_seg(R_FS, new_segs[R_FS]);
656 tss_load_seg(R_GS, new_segs[R_GS]);
657 }
658
659 /* check that EIP is in the CS segment limits */
660 if (new_eip > env->segs[R_CS].limit) {
661 /* XXX: different exception if CALL ? */
662 raise_exception_err(EXCP0D_GPF, 0);
663 }
664
665#ifndef CONFIG_USER_ONLY
666 /* reset local breakpoints */
667 if (env->dr[7] & 0x55) {
668 for (i = 0; i < 4; i++) {
669 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
670 hw_breakpoint_remove(env, i);
671 }
672 env->dr[7] &= ~0x55;
673 }
674#endif
675}
676
677/* check if Port I/O is allowed in TSS */
678static inline void check_io(int addr, int size)
679{
680#ifndef VBOX
681 int io_offset, val, mask;
682#else
683 int val, mask;
684 unsigned int io_offset;
685#endif /* VBOX */
686
687 /* TSS must be a valid 32 bit one */
688 if (!(env->tr.flags & DESC_P_MASK) ||
689 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
690 env->tr.limit < 103)
691 goto fail;
692 io_offset = lduw_kernel(env->tr.base + 0x66);
693 io_offset += (addr >> 3);
694 /* Note: the check needs two bytes */
695 if ((io_offset + 1) > env->tr.limit)
696 goto fail;
697 val = lduw_kernel(env->tr.base + io_offset);
698 val >>= (addr & 7);
699 mask = (1 << size) - 1;
700 /* all bits must be zero to allow the I/O */
701 if ((val & mask) != 0) {
702 fail:
703 raise_exception_err(EXCP0D_GPF, 0);
704 }
705}
706
707#ifdef VBOX
708
709/* Keep in sync with gen_check_external_event() */
710void helper_check_external_event()
711{
712 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
713 | CPU_INTERRUPT_EXTERNAL_EXIT
714 | CPU_INTERRUPT_EXTERNAL_TIMER
715 | CPU_INTERRUPT_EXTERNAL_DMA))
716 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
717 && (env->eflags & IF_MASK)
718 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
719 {
720 helper_external_event();
721 }
722
723}
724
725void helper_sync_seg(uint32_t reg)
726{
727 if (env->segs[reg].newselector)
728 sync_seg(env, reg, env->segs[reg].newselector);
729}
730
731#endif /* VBOX */
732
733void helper_check_iob(uint32_t t0)
734{
735 check_io(t0, 1);
736}
737
738void helper_check_iow(uint32_t t0)
739{
740 check_io(t0, 2);
741}
742
743void helper_check_iol(uint32_t t0)
744{
745 check_io(t0, 4);
746}
747
748void helper_outb(uint32_t port, uint32_t data)
749{
750#ifndef VBOX
751 cpu_outb(port, data & 0xff);
752#else
753 cpu_outb(env, port, data & 0xff);
754#endif
755}
756
757target_ulong helper_inb(uint32_t port)
758{
759#ifndef VBOX
760 return cpu_inb(port);
761#else
762 return cpu_inb(env, port);
763#endif
764}
765
766void helper_outw(uint32_t port, uint32_t data)
767{
768#ifndef VBOX
769 cpu_outw(port, data & 0xffff);
770#else
771 cpu_outw(env, port, data & 0xffff);
772#endif
773}
774
775target_ulong helper_inw(uint32_t port)
776{
777#ifndef VBOX
778 return cpu_inw(port);
779#else
780 return cpu_inw(env, port);
781#endif
782}
783
784void helper_outl(uint32_t port, uint32_t data)
785{
786#ifndef VBOX
787 cpu_outl(port, data);
788#else
789 cpu_outl(env, port, data);
790#endif
791}
792
793target_ulong helper_inl(uint32_t port)
794{
795#ifndef VBOX
796 return cpu_inl(port);
797#else
798 return cpu_inl(env, port);
799#endif
800}
801
802static inline unsigned int get_sp_mask(unsigned int e2)
803{
804 if (e2 & DESC_B_MASK)
805 return 0xffffffff;
806 else
807 return 0xffff;
808}
809
810static int exeption_has_error_code(int intno)
811{
812 switch(intno) {
813 case 8:
814 case 10:
815 case 11:
816 case 12:
817 case 13:
818 case 14:
819 case 17:
820 return 1;
821 }
822 return 0;
823}
824
825#ifdef TARGET_X86_64
826#define SET_ESP(val, sp_mask)\
827do {\
828 if ((sp_mask) == 0xffff)\
829 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
830 else if ((sp_mask) == 0xffffffffLL)\
831 ESP = (uint32_t)(val);\
832 else\
833 ESP = (val);\
834} while (0)
835#else
836#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
837#endif
838
839/* in 64-bit machines, this can overflow. So this segment addition macro
840 * can be used to trim the value to 32-bit whenever needed */
841#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
842
843/* XXX: add a is_user flag to have proper security support */
844#define PUSHW(ssp, sp, sp_mask, val)\
845{\
846 sp -= 2;\
847 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
848}
849
850#define PUSHL(ssp, sp, sp_mask, val)\
851{\
852 sp -= 4;\
853 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
854}
855
856#define POPW(ssp, sp, sp_mask, val)\
857{\
858 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
859 sp += 2;\
860}
861
862#define POPL(ssp, sp, sp_mask, val)\
863{\
864 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
865 sp += 4;\
866}
867
868/* protected mode interrupt */
869static void do_interrupt_protected(int intno, int is_int, int error_code,
870 unsigned int next_eip, int is_hw)
871{
872 SegmentCache *dt;
873 target_ulong ptr, ssp;
874 int type, dpl, selector, ss_dpl, cpl;
875 int has_error_code, new_stack, shift;
876 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
877 uint32_t old_eip, sp_mask;
878
879#ifdef VBOX
880 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
881 cpu_loop_exit();
882#endif
883
884 has_error_code = 0;
885 if (!is_int && !is_hw)
886 has_error_code = exeption_has_error_code(intno);
887 if (is_int)
888 old_eip = next_eip;
889 else
890 old_eip = env->eip;
891
892 dt = &env->idt;
893#ifndef VBOX
894 if (intno * 8 + 7 > dt->limit)
895#else
896 if ((unsigned)intno * 8 + 7 > dt->limit)
897#endif
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 ptr = dt->base + intno * 8;
900 e1 = ldl_kernel(ptr);
901 e2 = ldl_kernel(ptr + 4);
902 /* check gate type */
903 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
904 switch(type) {
905 case 5: /* task gate */
906#ifdef VBOX
907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
908 cpl = env->hflags & HF_CPL_MASK;
909 /* check privilege if software int */
910 if (is_int && dpl < cpl)
911 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
912#endif
913 /* must do that check here to return the correct error code */
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
916 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
917 if (has_error_code) {
918 int type;
919 uint32_t mask;
920 /* push the error code */
921 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
922 shift = type >> 3;
923 if (env->segs[R_SS].flags & DESC_B_MASK)
924 mask = 0xffffffff;
925 else
926 mask = 0xffff;
927 esp = (ESP - (2 << shift)) & mask;
928 ssp = env->segs[R_SS].base + esp;
929 if (shift)
930 stl_kernel(ssp, error_code);
931 else
932 stw_kernel(ssp, error_code);
933 SET_ESP(esp, mask);
934 }
935 return;
936 case 6: /* 286 interrupt gate */
937 case 7: /* 286 trap gate */
938 case 14: /* 386 interrupt gate */
939 case 15: /* 386 trap gate */
940 break;
941 default:
942 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
943 break;
944 }
945 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
946 cpl = env->hflags & HF_CPL_MASK;
947 /* check privilege if software int */
948 if (is_int && dpl < cpl)
949 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
950 /* check valid bit */
951 if (!(e2 & DESC_P_MASK))
952 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
953 selector = e1 >> 16;
954 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
955 if ((selector & 0xfffc) == 0)
956 raise_exception_err(EXCP0D_GPF, 0);
957
958 if (load_segment(&e1, &e2, selector) != 0)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
963 if (dpl > cpl)
964 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
965 if (!(e2 & DESC_P_MASK))
966 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
967 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
968 /* to inner privilege */
969 get_ss_esp_from_tss(&ss, &esp, dpl);
970 if ((ss & 0xfffc) == 0)
971 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
972 if ((ss & 3) != dpl)
973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
974 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
976 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
977 if (ss_dpl != dpl)
978 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
979 if (!(ss_e2 & DESC_S_MASK) ||
980 (ss_e2 & DESC_CS_MASK) ||
981 !(ss_e2 & DESC_W_MASK))
982 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
983 if (!(ss_e2 & DESC_P_MASK))
984#ifdef VBOX /* See page 3-477 of 253666.pdf */
985 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
986#else
987 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
988#endif
989 new_stack = 1;
990 sp_mask = get_sp_mask(ss_e2);
991 ssp = get_seg_base(ss_e1, ss_e2);
992#if defined(VBOX) && defined(DEBUG)
993 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
994#endif
995 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
996 /* to same privilege */
997 if (env->eflags & VM_MASK)
998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
999 new_stack = 0;
1000 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1001 ssp = env->segs[R_SS].base;
1002 esp = ESP;
1003 dpl = cpl;
1004 } else {
1005 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1006 new_stack = 0; /* avoid warning */
1007 sp_mask = 0; /* avoid warning */
1008 ssp = 0; /* avoid warning */
1009 esp = 0; /* avoid warning */
1010 }
1011
1012 shift = type >> 3;
1013
1014#if 0
1015 /* XXX: check that enough room is available */
1016 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1017 if (env->eflags & VM_MASK)
1018 push_size += 8;
1019 push_size <<= shift;
1020#endif
1021 if (shift == 1) {
1022 if (new_stack) {
1023 if (env->eflags & VM_MASK) {
1024 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1025 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1026 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1027 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1028 }
1029 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1030 PUSHL(ssp, esp, sp_mask, ESP);
1031 }
1032 PUSHL(ssp, esp, sp_mask, compute_eflags());
1033 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1034 PUSHL(ssp, esp, sp_mask, old_eip);
1035 if (has_error_code) {
1036 PUSHL(ssp, esp, sp_mask, error_code);
1037 }
1038 } else {
1039 if (new_stack) {
1040 if (env->eflags & VM_MASK) {
1041 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1042 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1043 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1044 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1045 }
1046 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1047 PUSHW(ssp, esp, sp_mask, ESP);
1048 }
1049 PUSHW(ssp, esp, sp_mask, compute_eflags());
1050 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1051 PUSHW(ssp, esp, sp_mask, old_eip);
1052 if (has_error_code) {
1053 PUSHW(ssp, esp, sp_mask, error_code);
1054 }
1055 }
1056
1057 if (new_stack) {
1058 if (env->eflags & VM_MASK) {
1059 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1060 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1061 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1062 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1063 }
1064 ss = (ss & ~3) | dpl;
1065 cpu_x86_load_seg_cache(env, R_SS, ss,
1066 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1067 }
1068 SET_ESP(esp, sp_mask);
1069
1070 selector = (selector & ~3) | dpl;
1071 cpu_x86_load_seg_cache(env, R_CS, selector,
1072 get_seg_base(e1, e2),
1073 get_seg_limit(e1, e2),
1074 e2);
1075 cpu_x86_set_cpl(env, dpl);
1076 env->eip = offset;
1077
1078 /* interrupt gate clear IF mask */
1079 if ((type & 1) == 0) {
1080 env->eflags &= ~IF_MASK;
1081 }
1082#ifndef VBOX
1083 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1084#else
1085 /*
1086 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1087 * gets confused by seemingly changed EFLAGS. See #3491 and
1088 * public bug #2341.
1089 */
1090 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1091#endif
1092}
1093
1094#ifdef VBOX
1095
1096/* check if VME interrupt redirection is enabled in TSS */
1097DECLINLINE(bool) is_vme_irq_redirected(int intno)
1098{
1099 unsigned int io_offset, intredir_offset;
1100 unsigned char val, mask;
1101
1102 /* TSS must be a valid 32 bit one */
1103 if (!(env->tr.flags & DESC_P_MASK) ||
1104 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1105 env->tr.limit < 103)
1106 goto fail;
1107 io_offset = lduw_kernel(env->tr.base + 0x66);
1108 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1109 if (io_offset < 0x68 + 0x20)
1110 io_offset = 0x68 + 0x20;
1111 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1112 intredir_offset = io_offset - 0x20;
1113
1114 intredir_offset += (intno >> 3);
1115 if ((intredir_offset) > env->tr.limit)
1116 goto fail;
1117
1118 val = ldub_kernel(env->tr.base + intredir_offset);
1119 mask = 1 << (unsigned char)(intno & 7);
1120
1121 /* bit set means no redirection. */
1122 if ((val & mask) != 0) {
1123 return false;
1124 }
1125 return true;
1126
1127fail:
1128 raise_exception_err(EXCP0D_GPF, 0);
1129 return true;
1130}
1131
1132/* V86 mode software interrupt with CR4.VME=1 */
1133static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1134{
1135 target_ulong ptr, ssp;
1136 int selector;
1137 uint32_t offset, esp;
1138 uint32_t old_cs, old_eflags;
1139 uint32_t iopl;
1140
1141 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1142
1143 if (!is_vme_irq_redirected(intno))
1144 {
1145 if (iopl == 3)
1146 {
1147 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1148 return;
1149 }
1150 else
1151 raise_exception_err(EXCP0D_GPF, 0);
1152 }
1153
1154 /* virtual mode idt is at linear address 0 */
1155 ptr = 0 + intno * 4;
1156 offset = lduw_kernel(ptr);
1157 selector = lduw_kernel(ptr + 2);
1158 esp = ESP;
1159 ssp = env->segs[R_SS].base;
1160 old_cs = env->segs[R_CS].selector;
1161
1162 old_eflags = compute_eflags();
1163 if (iopl < 3)
1164 {
1165 /* copy VIF into IF and set IOPL to 3 */
1166 if (env->eflags & VIF_MASK)
1167 old_eflags |= IF_MASK;
1168 else
1169 old_eflags &= ~IF_MASK;
1170
1171 old_eflags |= (3 << IOPL_SHIFT);
1172 }
1173
1174 /* XXX: use SS segment size ? */
1175 PUSHW(ssp, esp, 0xffff, old_eflags);
1176 PUSHW(ssp, esp, 0xffff, old_cs);
1177 PUSHW(ssp, esp, 0xffff, next_eip);
1178
1179 /* update processor state */
1180 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1181 env->eip = offset;
1182 env->segs[R_CS].selector = selector;
1183 env->segs[R_CS].base = (selector << 4);
1184 env->eflags &= ~(TF_MASK | RF_MASK);
1185
1186 if (iopl < 3)
1187 env->eflags &= ~VIF_MASK;
1188 else
1189 env->eflags &= ~IF_MASK;
1190}
1191
1192#endif /* VBOX */
1193
1194#ifdef TARGET_X86_64
1195
1196#define PUSHQ(sp, val)\
1197{\
1198 sp -= 8;\
1199 stq_kernel(sp, (val));\
1200}
1201
1202#define POPQ(sp, val)\
1203{\
1204 val = ldq_kernel(sp);\
1205 sp += 8;\
1206}
1207
1208static inline target_ulong get_rsp_from_tss(int level)
1209{
1210 int index;
1211
1212#if 0
1213 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1214 env->tr.base, env->tr.limit);
1215#endif
1216
1217 if (!(env->tr.flags & DESC_P_MASK))
1218 cpu_abort(env, "invalid tss");
1219 index = 8 * level + 4;
1220 if ((index + 7) > env->tr.limit)
1221 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1222 return ldq_kernel(env->tr.base + index);
1223}
1224
1225/* 64 bit interrupt */
1226static void do_interrupt64(int intno, int is_int, int error_code,
1227 target_ulong next_eip, int is_hw)
1228{
1229 SegmentCache *dt;
1230 target_ulong ptr;
1231 int type, dpl, selector, cpl, ist;
1232 int has_error_code, new_stack;
1233 uint32_t e1, e2, e3, ss;
1234 target_ulong old_eip, esp, offset;
1235
1236#ifdef VBOX
1237 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1238 cpu_loop_exit();
1239#endif
1240
1241 has_error_code = 0;
1242 if (!is_int && !is_hw)
1243 has_error_code = exeption_has_error_code(intno);
1244 if (is_int)
1245 old_eip = next_eip;
1246 else
1247 old_eip = env->eip;
1248
1249 dt = &env->idt;
1250 if (intno * 16 + 15 > dt->limit)
1251 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1252 ptr = dt->base + intno * 16;
1253 e1 = ldl_kernel(ptr);
1254 e2 = ldl_kernel(ptr + 4);
1255 e3 = ldl_kernel(ptr + 8);
1256 /* check gate type */
1257 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1258 switch(type) {
1259 case 14: /* 386 interrupt gate */
1260 case 15: /* 386 trap gate */
1261 break;
1262 default:
1263 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1264 break;
1265 }
1266 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1267 cpl = env->hflags & HF_CPL_MASK;
1268 /* check privilege if software int */
1269 if (is_int && dpl < cpl)
1270 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1271 /* check valid bit */
1272 if (!(e2 & DESC_P_MASK))
1273 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1274 selector = e1 >> 16;
1275 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1276 ist = e2 & 7;
1277 if ((selector & 0xfffc) == 0)
1278 raise_exception_err(EXCP0D_GPF, 0);
1279
1280 if (load_segment(&e1, &e2, selector) != 0)
1281 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1282 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1283 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1284 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1285 if (dpl > cpl)
1286 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1287 if (!(e2 & DESC_P_MASK))
1288 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1289 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1290 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1291 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1292 /* to inner privilege */
1293 if (ist != 0)
1294 esp = get_rsp_from_tss(ist + 3);
1295 else
1296 esp = get_rsp_from_tss(dpl);
1297 esp &= ~0xfLL; /* align stack */
1298 ss = 0;
1299 new_stack = 1;
1300 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1301 /* to same privilege */
1302 if (env->eflags & VM_MASK)
1303 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1304 new_stack = 0;
1305 if (ist != 0)
1306 esp = get_rsp_from_tss(ist + 3);
1307 else
1308 esp = ESP;
1309 esp &= ~0xfLL; /* align stack */
1310 dpl = cpl;
1311 } else {
1312 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1313 new_stack = 0; /* avoid warning */
1314 esp = 0; /* avoid warning */
1315 }
1316
1317 PUSHQ(esp, env->segs[R_SS].selector);
1318 PUSHQ(esp, ESP);
1319 PUSHQ(esp, compute_eflags());
1320 PUSHQ(esp, env->segs[R_CS].selector);
1321 PUSHQ(esp, old_eip);
1322 if (has_error_code) {
1323 PUSHQ(esp, error_code);
1324 }
1325
1326 if (new_stack) {
1327 ss = 0 | dpl;
1328 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1329 }
1330 ESP = esp;
1331
1332 selector = (selector & ~3) | dpl;
1333 cpu_x86_load_seg_cache(env, R_CS, selector,
1334 get_seg_base(e1, e2),
1335 get_seg_limit(e1, e2),
1336 e2);
1337 cpu_x86_set_cpl(env, dpl);
1338 env->eip = offset;
1339
1340 /* interrupt gate clear IF mask */
1341 if ((type & 1) == 0) {
1342 env->eflags &= ~IF_MASK;
1343 }
1344#ifndef VBOX
1345 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1346#else /* VBOX */
1347 /*
1348 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1349 * gets confused by seemingly changed EFLAGS. See #3491 and
1350 * public bug #2341.
1351 */
1352 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1353#endif /* VBOX */
1354}
1355#endif
1356
1357#ifdef TARGET_X86_64
1358#if defined(CONFIG_USER_ONLY)
1359void helper_syscall(int next_eip_addend)
1360{
1361 env->exception_index = EXCP_SYSCALL;
1362 env->exception_next_eip = env->eip + next_eip_addend;
1363 cpu_loop_exit();
1364}
1365#else
1366void helper_syscall(int next_eip_addend)
1367{
1368 int selector;
1369
1370 if (!(env->efer & MSR_EFER_SCE)) {
1371 raise_exception_err(EXCP06_ILLOP, 0);
1372 }
1373 selector = (env->star >> 32) & 0xffff;
1374 if (env->hflags & HF_LMA_MASK) {
1375 int code64;
1376
1377 ECX = env->eip + next_eip_addend;
1378 env->regs[11] = compute_eflags();
1379
1380 code64 = env->hflags & HF_CS64_MASK;
1381
1382 cpu_x86_set_cpl(env, 0);
1383 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1384 0, 0xffffffff,
1385 DESC_G_MASK | DESC_P_MASK |
1386 DESC_S_MASK |
1387 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1388 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1389 0, 0xffffffff,
1390 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1391 DESC_S_MASK |
1392 DESC_W_MASK | DESC_A_MASK);
1393 env->eflags &= ~env->fmask;
1394 load_eflags(env->eflags, 0);
1395 if (code64)
1396 env->eip = env->lstar;
1397 else
1398 env->eip = env->cstar;
1399 } else {
1400 ECX = (uint32_t)(env->eip + next_eip_addend);
1401
1402 cpu_x86_set_cpl(env, 0);
1403 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1404 0, 0xffffffff,
1405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1406 DESC_S_MASK |
1407 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1408 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1409 0, 0xffffffff,
1410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1411 DESC_S_MASK |
1412 DESC_W_MASK | DESC_A_MASK);
1413 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1414 env->eip = (uint32_t)env->star;
1415 }
1416}
1417#endif
1418#endif
1419
1420#ifdef TARGET_X86_64
1421void helper_sysret(int dflag)
1422{
1423 int cpl, selector;
1424
1425 if (!(env->efer & MSR_EFER_SCE)) {
1426 raise_exception_err(EXCP06_ILLOP, 0);
1427 }
1428 cpl = env->hflags & HF_CPL_MASK;
1429 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1430 raise_exception_err(EXCP0D_GPF, 0);
1431 }
1432 selector = (env->star >> 48) & 0xffff;
1433 if (env->hflags & HF_LMA_MASK) {
1434 if (dflag == 2) {
1435 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1436 0, 0xffffffff,
1437 DESC_G_MASK | DESC_P_MASK |
1438 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1439 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1440 DESC_L_MASK);
1441 env->eip = ECX;
1442 } else {
1443 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1444 0, 0xffffffff,
1445 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1446 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1447 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1448 env->eip = (uint32_t)ECX;
1449 }
1450 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1451 0, 0xffffffff,
1452 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1453 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1454 DESC_W_MASK | DESC_A_MASK);
1455 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1456 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1457 cpu_x86_set_cpl(env, 3);
1458 } else {
1459 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1460 0, 0xffffffff,
1461 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1462 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1463 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1464 env->eip = (uint32_t)ECX;
1465 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1466 0, 0xffffffff,
1467 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1468 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1469 DESC_W_MASK | DESC_A_MASK);
1470 env->eflags |= IF_MASK;
1471 cpu_x86_set_cpl(env, 3);
1472 }
1473}
1474#endif
1475
1476#ifdef VBOX
1477
1478/**
1479 * Checks and processes external VMM events.
1480 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1481 */
1482void helper_external_event(void)
1483{
1484# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1485 uintptr_t uSP;
1486# ifdef RT_ARCH_AMD64
1487 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1488# else
1489 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1490# endif
1491 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1492# endif
1493 /* Keep in sync with flags checked by gen_check_external_event() */
1494 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1495 {
1496 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1497 ~CPU_INTERRUPT_EXTERNAL_HARD);
1498 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1499 }
1500 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1501 {
1502 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1503 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1504 cpu_exit(env);
1505 }
1506 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1507 {
1508 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1509 ~CPU_INTERRUPT_EXTERNAL_DMA);
1510 remR3DmaRun(env);
1511 }
1512 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1513 {
1514 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1515 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1516 remR3TimersRun(env);
1517 }
1518 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1519 {
1520 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1521 ~CPU_INTERRUPT_EXTERNAL_HARD);
1522 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1523 }
1524}
1525
1526/* helper for recording call instruction addresses for later scanning */
1527void helper_record_call()
1528{
1529 if ( !(env->state & CPU_RAW_RING0)
1530 && (env->cr[0] & CR0_PG_MASK)
1531 && !(env->eflags & X86_EFL_IF))
1532 remR3RecordCall(env);
1533}
1534
1535#endif /* VBOX */
1536
1537/* real mode interrupt */
1538static void do_interrupt_real(int intno, int is_int, int error_code,
1539 unsigned int next_eip)
1540{
1541 SegmentCache *dt;
1542 target_ulong ptr, ssp;
1543 int selector;
1544 uint32_t offset, esp;
1545 uint32_t old_cs, old_eip;
1546
1547 /* real mode (simpler !) */
1548 dt = &env->idt;
1549#ifndef VBOX
1550 if (intno * 4 + 3 > dt->limit)
1551#else
1552 if ((unsigned)intno * 4 + 3 > dt->limit)
1553#endif
1554 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1555 ptr = dt->base + intno * 4;
1556 offset = lduw_kernel(ptr);
1557 selector = lduw_kernel(ptr + 2);
1558 esp = ESP;
1559 ssp = env->segs[R_SS].base;
1560 if (is_int)
1561 old_eip = next_eip;
1562 else
1563 old_eip = env->eip;
1564 old_cs = env->segs[R_CS].selector;
1565 /* XXX: use SS segment size ? */
1566 PUSHW(ssp, esp, 0xffff, compute_eflags());
1567 PUSHW(ssp, esp, 0xffff, old_cs);
1568 PUSHW(ssp, esp, 0xffff, old_eip);
1569
1570 /* update processor state */
1571 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1572 env->eip = offset;
1573 env->segs[R_CS].selector = selector;
1574 env->segs[R_CS].base = (selector << 4);
1575 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1576}
1577
1578/* fake user mode interrupt */
1579void do_interrupt_user(int intno, int is_int, int error_code,
1580 target_ulong next_eip)
1581{
1582 SegmentCache *dt;
1583 target_ulong ptr;
1584 int dpl, cpl, shift;
1585 uint32_t e2;
1586
1587 dt = &env->idt;
1588 if (env->hflags & HF_LMA_MASK) {
1589 shift = 4;
1590 } else {
1591 shift = 3;
1592 }
1593 ptr = dt->base + (intno << shift);
1594 e2 = ldl_kernel(ptr + 4);
1595
1596 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1597 cpl = env->hflags & HF_CPL_MASK;
1598 /* check privilege if software int */
1599 if (is_int && dpl < cpl)
1600 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1601
1602 /* Since we emulate only user space, we cannot do more than
1603 exiting the emulation with the suitable exception and error
1604 code */
1605 if (is_int)
1606 EIP = next_eip;
1607}
1608
1609#if !defined(CONFIG_USER_ONLY)
1610static void handle_even_inj(int intno, int is_int, int error_code,
1611 int is_hw, int rm)
1612{
1613 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1614 if (!(event_inj & SVM_EVTINJ_VALID)) {
1615 int type;
1616 if (is_int)
1617 type = SVM_EVTINJ_TYPE_SOFT;
1618 else
1619 type = SVM_EVTINJ_TYPE_EXEPT;
1620 event_inj = intno | type | SVM_EVTINJ_VALID;
1621 if (!rm && exeption_has_error_code(intno)) {
1622 event_inj |= SVM_EVTINJ_VALID_ERR;
1623 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1624 }
1625 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1626 }
1627}
1628#endif
1629
1630/*
1631 * Begin execution of an interruption. is_int is TRUE if coming from
1632 * the int instruction. next_eip is the EIP value AFTER the interrupt
1633 * instruction. It is only relevant if is_int is TRUE.
1634 */
1635void do_interrupt(int intno, int is_int, int error_code,
1636 target_ulong next_eip, int is_hw)
1637{
1638 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1639 if ((env->cr[0] & CR0_PE_MASK)) {
1640 static int count;
1641 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1642 count, intno, error_code, is_int,
1643 env->hflags & HF_CPL_MASK,
1644 env->segs[R_CS].selector, EIP,
1645 (int)env->segs[R_CS].base + EIP,
1646 env->segs[R_SS].selector, ESP);
1647 if (intno == 0x0e) {
1648 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1649 } else {
1650 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1651 }
1652 qemu_log("\n");
1653 log_cpu_state(env, X86_DUMP_CCOP);
1654#if 0
1655 {
1656 int i;
1657 uint8_t *ptr;
1658 qemu_log(" code=");
1659 ptr = env->segs[R_CS].base + env->eip;
1660 for(i = 0; i < 16; i++) {
1661 qemu_log(" %02x", ldub(ptr + i));
1662 }
1663 qemu_log("\n");
1664 }
1665#endif
1666 count++;
1667 }
1668 }
1669#ifdef VBOX
1670 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1671 if (is_int) {
1672 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1673 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1674 } else {
1675 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1676 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1677 }
1678 }
1679#endif
1680 if (env->cr[0] & CR0_PE_MASK) {
1681#if !defined(CONFIG_USER_ONLY)
1682 if (env->hflags & HF_SVMI_MASK)
1683 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1684#endif
1685#ifdef TARGET_X86_64
1686 if (env->hflags & HF_LMA_MASK) {
1687 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1688 } else
1689#endif
1690 {
1691#ifdef VBOX
1692 /* int xx *, v86 code and VME enabled? */
1693 if ( (env->eflags & VM_MASK)
1694 && (env->cr[4] & CR4_VME_MASK)
1695 && is_int
1696 && !is_hw
1697 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1698 )
1699 do_soft_interrupt_vme(intno, error_code, next_eip);
1700 else
1701#endif /* VBOX */
1702 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1703 }
1704 } else {
1705#if !defined(CONFIG_USER_ONLY)
1706 if (env->hflags & HF_SVMI_MASK)
1707 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1708#endif
1709 do_interrupt_real(intno, is_int, error_code, next_eip);
1710 }
1711
1712#if !defined(CONFIG_USER_ONLY)
1713 if (env->hflags & HF_SVMI_MASK) {
1714 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1715 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1716 }
1717#endif
1718}
1719
1720/* This should come from sysemu.h - if we could include it here... */
1721void qemu_system_reset_request(void);
1722
1723/*
1724 * Check nested exceptions and change to double or triple fault if
1725 * needed. It should only be called, if this is not an interrupt.
1726 * Returns the new exception number.
1727 */
1728static int check_exception(int intno, int *error_code)
1729{
1730 int first_contributory = env->old_exception == 0 ||
1731 (env->old_exception >= 10 &&
1732 env->old_exception <= 13);
1733 int second_contributory = intno == 0 ||
1734 (intno >= 10 && intno <= 13);
1735
1736 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1737 env->old_exception, intno);
1738
1739#if !defined(CONFIG_USER_ONLY)
1740 if (env->old_exception == EXCP08_DBLE) {
1741 if (env->hflags & HF_SVMI_MASK)
1742 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1743
1744 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1745
1746# ifndef VBOX
1747 qemu_system_reset_request();
1748# else
1749 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1750# endif
1751 return EXCP_HLT;
1752 }
1753#endif
1754
1755 if ((first_contributory && second_contributory)
1756 || (env->old_exception == EXCP0E_PAGE &&
1757 (second_contributory || (intno == EXCP0E_PAGE)))) {
1758 intno = EXCP08_DBLE;
1759 *error_code = 0;
1760 }
1761
1762 if (second_contributory || (intno == EXCP0E_PAGE) ||
1763 (intno == EXCP08_DBLE))
1764 env->old_exception = intno;
1765
1766 return intno;
1767}
1768
1769/*
1770 * Signal an interruption. It is executed in the main CPU loop.
1771 * is_int is TRUE if coming from the int instruction. next_eip is the
1772 * EIP value AFTER the interrupt instruction. It is only relevant if
1773 * is_int is TRUE.
1774 */
1775static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1776 int next_eip_addend)
1777{
1778#if defined(VBOX) && defined(DEBUG)
1779 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1780#endif
1781 if (!is_int) {
1782 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1783 intno = check_exception(intno, &error_code);
1784 } else {
1785 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1786 }
1787
1788 env->exception_index = intno;
1789 env->error_code = error_code;
1790 env->exception_is_int = is_int;
1791 env->exception_next_eip = env->eip + next_eip_addend;
1792 cpu_loop_exit();
1793}
1794
1795/* shortcuts to generate exceptions */
1796
1797void raise_exception_err(int exception_index, int error_code)
1798{
1799 raise_interrupt(exception_index, 0, error_code, 0);
1800}
1801
1802void raise_exception(int exception_index)
1803{
1804 raise_interrupt(exception_index, 0, 0, 0);
1805}
1806
1807void raise_exception_env(int exception_index, CPUState *nenv)
1808{
1809 env = nenv;
1810 raise_exception(exception_index);
1811}
1812/* SMM support */
1813
1814#if defined(CONFIG_USER_ONLY)
1815
1816void do_smm_enter(void)
1817{
1818}
1819
1820void helper_rsm(void)
1821{
1822}
1823
1824#else
1825
1826#ifdef TARGET_X86_64
1827#define SMM_REVISION_ID 0x00020064
1828#else
1829#define SMM_REVISION_ID 0x00020000
1830#endif
1831
1832void do_smm_enter(void)
1833{
1834 target_ulong sm_state;
1835 SegmentCache *dt;
1836 int i, offset;
1837
1838 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1839 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1840
1841 env->hflags |= HF_SMM_MASK;
1842 cpu_smm_update(env);
1843
1844 sm_state = env->smbase + 0x8000;
1845
1846#ifdef TARGET_X86_64
1847 for(i = 0; i < 6; i++) {
1848 dt = &env->segs[i];
1849 offset = 0x7e00 + i * 16;
1850 stw_phys(sm_state + offset, dt->selector);
1851 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1852 stl_phys(sm_state + offset + 4, dt->limit);
1853 stq_phys(sm_state + offset + 8, dt->base);
1854 }
1855
1856 stq_phys(sm_state + 0x7e68, env->gdt.base);
1857 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1858
1859 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1860 stq_phys(sm_state + 0x7e78, env->ldt.base);
1861 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1862 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1863
1864 stq_phys(sm_state + 0x7e88, env->idt.base);
1865 stl_phys(sm_state + 0x7e84, env->idt.limit);
1866
1867 stw_phys(sm_state + 0x7e90, env->tr.selector);
1868 stq_phys(sm_state + 0x7e98, env->tr.base);
1869 stl_phys(sm_state + 0x7e94, env->tr.limit);
1870 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1871
1872 stq_phys(sm_state + 0x7ed0, env->efer);
1873
1874 stq_phys(sm_state + 0x7ff8, EAX);
1875 stq_phys(sm_state + 0x7ff0, ECX);
1876 stq_phys(sm_state + 0x7fe8, EDX);
1877 stq_phys(sm_state + 0x7fe0, EBX);
1878 stq_phys(sm_state + 0x7fd8, ESP);
1879 stq_phys(sm_state + 0x7fd0, EBP);
1880 stq_phys(sm_state + 0x7fc8, ESI);
1881 stq_phys(sm_state + 0x7fc0, EDI);
1882 for(i = 8; i < 16; i++)
1883 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1884 stq_phys(sm_state + 0x7f78, env->eip);
1885 stl_phys(sm_state + 0x7f70, compute_eflags());
1886 stl_phys(sm_state + 0x7f68, env->dr[6]);
1887 stl_phys(sm_state + 0x7f60, env->dr[7]);
1888
1889 stl_phys(sm_state + 0x7f48, env->cr[4]);
1890 stl_phys(sm_state + 0x7f50, env->cr[3]);
1891 stl_phys(sm_state + 0x7f58, env->cr[0]);
1892
1893 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1894 stl_phys(sm_state + 0x7f00, env->smbase);
1895#else
1896 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1897 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1898 stl_phys(sm_state + 0x7ff4, compute_eflags());
1899 stl_phys(sm_state + 0x7ff0, env->eip);
1900 stl_phys(sm_state + 0x7fec, EDI);
1901 stl_phys(sm_state + 0x7fe8, ESI);
1902 stl_phys(sm_state + 0x7fe4, EBP);
1903 stl_phys(sm_state + 0x7fe0, ESP);
1904 stl_phys(sm_state + 0x7fdc, EBX);
1905 stl_phys(sm_state + 0x7fd8, EDX);
1906 stl_phys(sm_state + 0x7fd4, ECX);
1907 stl_phys(sm_state + 0x7fd0, EAX);
1908 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1909 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1910
1911 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1912 stl_phys(sm_state + 0x7f64, env->tr.base);
1913 stl_phys(sm_state + 0x7f60, env->tr.limit);
1914 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1915
1916 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1917 stl_phys(sm_state + 0x7f80, env->ldt.base);
1918 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1919 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1920
1921 stl_phys(sm_state + 0x7f74, env->gdt.base);
1922 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1923
1924 stl_phys(sm_state + 0x7f58, env->idt.base);
1925 stl_phys(sm_state + 0x7f54, env->idt.limit);
1926
1927 for(i = 0; i < 6; i++) {
1928 dt = &env->segs[i];
1929 if (i < 3)
1930 offset = 0x7f84 + i * 12;
1931 else
1932 offset = 0x7f2c + (i - 3) * 12;
1933 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1934 stl_phys(sm_state + offset + 8, dt->base);
1935 stl_phys(sm_state + offset + 4, dt->limit);
1936 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1937 }
1938 stl_phys(sm_state + 0x7f14, env->cr[4]);
1939
1940 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1941 stl_phys(sm_state + 0x7ef8, env->smbase);
1942#endif
1943 /* init SMM cpu state */
1944
1945#ifdef TARGET_X86_64
1946 cpu_load_efer(env, 0);
1947#endif
1948 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1949 env->eip = 0x00008000;
1950 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1951 0xffffffff, 0);
1952 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1953 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1954 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1955 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1956 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1957
1958 cpu_x86_update_cr0(env,
1959 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1960 cpu_x86_update_cr4(env, 0);
1961 env->dr[7] = 0x00000400;
1962 CC_OP = CC_OP_EFLAGS;
1963}
1964
1965void helper_rsm(void)
1966{
1967#ifdef VBOX
1968 cpu_abort(env, "helper_rsm");
1969#else /* !VBOX */
1970 target_ulong sm_state;
1971 int i, offset;
1972 uint32_t val;
1973
1974 sm_state = env->smbase + 0x8000;
1975#ifdef TARGET_X86_64
1976 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1977
1978 for(i = 0; i < 6; i++) {
1979 offset = 0x7e00 + i * 16;
1980 cpu_x86_load_seg_cache(env, i,
1981 lduw_phys(sm_state + offset),
1982 ldq_phys(sm_state + offset + 8),
1983 ldl_phys(sm_state + offset + 4),
1984 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1985 }
1986
1987 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1988 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1989
1990 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1991 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1992 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1993 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1994#ifdef VBOX
1995 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1996 env->ldt.newselector = 0;
1997#endif
1998
1999 env->idt.base = ldq_phys(sm_state + 0x7e88);
2000 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2001
2002 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2003 env->tr.base = ldq_phys(sm_state + 0x7e98);
2004 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2005 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2006#ifdef VBOX
2007 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2008 env->tr.newselector = 0;
2009#endif
2010
2011 EAX = ldq_phys(sm_state + 0x7ff8);
2012 ECX = ldq_phys(sm_state + 0x7ff0);
2013 EDX = ldq_phys(sm_state + 0x7fe8);
2014 EBX = ldq_phys(sm_state + 0x7fe0);
2015 ESP = ldq_phys(sm_state + 0x7fd8);
2016 EBP = ldq_phys(sm_state + 0x7fd0);
2017 ESI = ldq_phys(sm_state + 0x7fc8);
2018 EDI = ldq_phys(sm_state + 0x7fc0);
2019 for(i = 8; i < 16; i++)
2020 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2021 env->eip = ldq_phys(sm_state + 0x7f78);
2022 load_eflags(ldl_phys(sm_state + 0x7f70),
2023 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2024 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2025 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2026
2027 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2028 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2029 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2030
2031 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2032 if (val & 0x20000) {
2033 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2034 }
2035#else
2036 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2037 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2038 load_eflags(ldl_phys(sm_state + 0x7ff4),
2039 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2040 env->eip = ldl_phys(sm_state + 0x7ff0);
2041 EDI = ldl_phys(sm_state + 0x7fec);
2042 ESI = ldl_phys(sm_state + 0x7fe8);
2043 EBP = ldl_phys(sm_state + 0x7fe4);
2044 ESP = ldl_phys(sm_state + 0x7fe0);
2045 EBX = ldl_phys(sm_state + 0x7fdc);
2046 EDX = ldl_phys(sm_state + 0x7fd8);
2047 ECX = ldl_phys(sm_state + 0x7fd4);
2048 EAX = ldl_phys(sm_state + 0x7fd0);
2049 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2050 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2051
2052 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2053 env->tr.base = ldl_phys(sm_state + 0x7f64);
2054 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2055 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2056#ifdef VBOX
2057 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2058 env->tr.newselector = 0;
2059#endif
2060
2061 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2062 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2063 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2064 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2065#ifdef VBOX
2066 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2067 env->ldt.newselector = 0;
2068#endif
2069
2070 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2071 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2072
2073 env->idt.base = ldl_phys(sm_state + 0x7f58);
2074 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2075
2076 for(i = 0; i < 6; i++) {
2077 if (i < 3)
2078 offset = 0x7f84 + i * 12;
2079 else
2080 offset = 0x7f2c + (i - 3) * 12;
2081 cpu_x86_load_seg_cache(env, i,
2082 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2083 ldl_phys(sm_state + offset + 8),
2084 ldl_phys(sm_state + offset + 4),
2085 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2086 }
2087 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2088
2089 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2090 if (val & 0x20000) {
2091 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2092 }
2093#endif
2094 CC_OP = CC_OP_EFLAGS;
2095 env->hflags &= ~HF_SMM_MASK;
2096 cpu_smm_update(env);
2097
2098 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2099 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2100#endif /* !VBOX */
2101}
2102
2103#endif /* !CONFIG_USER_ONLY */
2104
2105
2106/* division, flags are undefined */
2107
2108void helper_divb_AL(target_ulong t0)
2109{
2110 unsigned int num, den, q, r;
2111
2112 num = (EAX & 0xffff);
2113 den = (t0 & 0xff);
2114 if (den == 0) {
2115 raise_exception(EXCP00_DIVZ);
2116 }
2117 q = (num / den);
2118 if (q > 0xff)
2119 raise_exception(EXCP00_DIVZ);
2120 q &= 0xff;
2121 r = (num % den) & 0xff;
2122 EAX = (EAX & ~0xffff) | (r << 8) | q;
2123}
2124
2125void helper_idivb_AL(target_ulong t0)
2126{
2127 int num, den, q, r;
2128
2129 num = (int16_t)EAX;
2130 den = (int8_t)t0;
2131 if (den == 0) {
2132 raise_exception(EXCP00_DIVZ);
2133 }
2134 q = (num / den);
2135 if (q != (int8_t)q)
2136 raise_exception(EXCP00_DIVZ);
2137 q &= 0xff;
2138 r = (num % den) & 0xff;
2139 EAX = (EAX & ~0xffff) | (r << 8) | q;
2140}
2141
2142void helper_divw_AX(target_ulong t0)
2143{
2144 unsigned int num, den, q, r;
2145
2146 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2147 den = (t0 & 0xffff);
2148 if (den == 0) {
2149 raise_exception(EXCP00_DIVZ);
2150 }
2151 q = (num / den);
2152 if (q > 0xffff)
2153 raise_exception(EXCP00_DIVZ);
2154 q &= 0xffff;
2155 r = (num % den) & 0xffff;
2156 EAX = (EAX & ~0xffff) | q;
2157 EDX = (EDX & ~0xffff) | r;
2158}
2159
2160void helper_idivw_AX(target_ulong t0)
2161{
2162 int num, den, q, r;
2163
2164 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2165 den = (int16_t)t0;
2166 if (den == 0) {
2167 raise_exception(EXCP00_DIVZ);
2168 }
2169 q = (num / den);
2170 if (q != (int16_t)q)
2171 raise_exception(EXCP00_DIVZ);
2172 q &= 0xffff;
2173 r = (num % den) & 0xffff;
2174 EAX = (EAX & ~0xffff) | q;
2175 EDX = (EDX & ~0xffff) | r;
2176}
2177
2178void helper_divl_EAX(target_ulong t0)
2179{
2180 unsigned int den, r;
2181 uint64_t num, q;
2182
2183 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2184 den = t0;
2185 if (den == 0) {
2186 raise_exception(EXCP00_DIVZ);
2187 }
2188 q = (num / den);
2189 r = (num % den);
2190 if (q > 0xffffffff)
2191 raise_exception(EXCP00_DIVZ);
2192 EAX = (uint32_t)q;
2193 EDX = (uint32_t)r;
2194}
2195
2196void helper_idivl_EAX(target_ulong t0)
2197{
2198 int den, r;
2199 int64_t num, q;
2200
2201 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2202 den = t0;
2203 if (den == 0) {
2204 raise_exception(EXCP00_DIVZ);
2205 }
2206 q = (num / den);
2207 r = (num % den);
2208 if (q != (int32_t)q)
2209 raise_exception(EXCP00_DIVZ);
2210 EAX = (uint32_t)q;
2211 EDX = (uint32_t)r;
2212}
2213
2214/* bcd */
2215
2216/* XXX: exception */
2217void helper_aam(int base)
2218{
2219 int al, ah;
2220 al = EAX & 0xff;
2221 ah = al / base;
2222 al = al % base;
2223 EAX = (EAX & ~0xffff) | al | (ah << 8);
2224 CC_DST = al;
2225}
2226
2227void helper_aad(int base)
2228{
2229 int al, ah;
2230 al = EAX & 0xff;
2231 ah = (EAX >> 8) & 0xff;
2232 al = ((ah * base) + al) & 0xff;
2233 EAX = (EAX & ~0xffff) | al;
2234 CC_DST = al;
2235}
2236
2237void helper_aaa(void)
2238{
2239 int icarry;
2240 int al, ah, af;
2241 int eflags;
2242
2243 eflags = helper_cc_compute_all(CC_OP);
2244 af = eflags & CC_A;
2245 al = EAX & 0xff;
2246 ah = (EAX >> 8) & 0xff;
2247
2248 icarry = (al > 0xf9);
2249 if (((al & 0x0f) > 9 ) || af) {
2250 al = (al + 6) & 0x0f;
2251 ah = (ah + 1 + icarry) & 0xff;
2252 eflags |= CC_C | CC_A;
2253 } else {
2254 eflags &= ~(CC_C | CC_A);
2255 al &= 0x0f;
2256 }
2257 EAX = (EAX & ~0xffff) | al | (ah << 8);
2258 CC_SRC = eflags;
2259}
2260
2261void helper_aas(void)
2262{
2263 int icarry;
2264 int al, ah, af;
2265 int eflags;
2266
2267 eflags = helper_cc_compute_all(CC_OP);
2268 af = eflags & CC_A;
2269 al = EAX & 0xff;
2270 ah = (EAX >> 8) & 0xff;
2271
2272 icarry = (al < 6);
2273 if (((al & 0x0f) > 9 ) || af) {
2274 al = (al - 6) & 0x0f;
2275 ah = (ah - 1 - icarry) & 0xff;
2276 eflags |= CC_C | CC_A;
2277 } else {
2278 eflags &= ~(CC_C | CC_A);
2279 al &= 0x0f;
2280 }
2281 EAX = (EAX & ~0xffff) | al | (ah << 8);
2282 CC_SRC = eflags;
2283}
2284
2285void helper_daa(void)
2286{
2287 int al, af, cf;
2288 int eflags;
2289
2290 eflags = helper_cc_compute_all(CC_OP);
2291 cf = eflags & CC_C;
2292 af = eflags & CC_A;
2293 al = EAX & 0xff;
2294
2295 eflags = 0;
2296 if (((al & 0x0f) > 9 ) || af) {
2297 al = (al + 6) & 0xff;
2298 eflags |= CC_A;
2299 }
2300 if ((al > 0x9f) || cf) {
2301 al = (al + 0x60) & 0xff;
2302 eflags |= CC_C;
2303 }
2304 EAX = (EAX & ~0xff) | al;
2305 /* well, speed is not an issue here, so we compute the flags by hand */
2306 eflags |= (al == 0) << 6; /* zf */
2307 eflags |= parity_table[al]; /* pf */
2308 eflags |= (al & 0x80); /* sf */
2309 CC_SRC = eflags;
2310}
2311
2312void helper_das(void)
2313{
2314 int al, al1, af, cf;
2315 int eflags;
2316
2317 eflags = helper_cc_compute_all(CC_OP);
2318 cf = eflags & CC_C;
2319 af = eflags & CC_A;
2320 al = EAX & 0xff;
2321
2322 eflags = 0;
2323 al1 = al;
2324 if (((al & 0x0f) > 9 ) || af) {
2325 eflags |= CC_A;
2326 if (al < 6 || cf)
2327 eflags |= CC_C;
2328 al = (al - 6) & 0xff;
2329 }
2330 if ((al1 > 0x99) || cf) {
2331 al = (al - 0x60) & 0xff;
2332 eflags |= CC_C;
2333 }
2334 EAX = (EAX & ~0xff) | al;
2335 /* well, speed is not an issue here, so we compute the flags by hand */
2336 eflags |= (al == 0) << 6; /* zf */
2337 eflags |= parity_table[al]; /* pf */
2338 eflags |= (al & 0x80); /* sf */
2339 CC_SRC = eflags;
2340}
2341
2342void helper_into(int next_eip_addend)
2343{
2344 int eflags;
2345 eflags = helper_cc_compute_all(CC_OP);
2346 if (eflags & CC_O) {
2347 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2348 }
2349}
2350
2351void helper_cmpxchg8b(target_ulong a0)
2352{
2353 uint64_t d;
2354 int eflags;
2355
2356 eflags = helper_cc_compute_all(CC_OP);
2357 d = ldq(a0);
2358 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2359 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2360 eflags |= CC_Z;
2361 } else {
2362 /* always do the store */
2363 stq(a0, d);
2364 EDX = (uint32_t)(d >> 32);
2365 EAX = (uint32_t)d;
2366 eflags &= ~CC_Z;
2367 }
2368 CC_SRC = eflags;
2369}
2370
2371#ifdef TARGET_X86_64
2372void helper_cmpxchg16b(target_ulong a0)
2373{
2374 uint64_t d0, d1;
2375 int eflags;
2376
2377 if ((a0 & 0xf) != 0)
2378 raise_exception(EXCP0D_GPF);
2379 eflags = helper_cc_compute_all(CC_OP);
2380 d0 = ldq(a0);
2381 d1 = ldq(a0 + 8);
2382 if (d0 == EAX && d1 == EDX) {
2383 stq(a0, EBX);
2384 stq(a0 + 8, ECX);
2385 eflags |= CC_Z;
2386 } else {
2387 /* always do the store */
2388 stq(a0, d0);
2389 stq(a0 + 8, d1);
2390 EDX = d1;
2391 EAX = d0;
2392 eflags &= ~CC_Z;
2393 }
2394 CC_SRC = eflags;
2395}
2396#endif
2397
2398void helper_single_step(void)
2399{
2400#ifndef CONFIG_USER_ONLY
2401 check_hw_breakpoints(env, 1);
2402 env->dr[6] |= DR6_BS;
2403#endif
2404 raise_exception(EXCP01_DB);
2405}
2406
2407void helper_cpuid(void)
2408{
2409 uint32_t eax, ebx, ecx, edx;
2410
2411 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2412
2413 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2414 EAX = eax;
2415 EBX = ebx;
2416 ECX = ecx;
2417 EDX = edx;
2418}
2419
2420void helper_enter_level(int level, int data32, target_ulong t1)
2421{
2422 target_ulong ssp;
2423 uint32_t esp_mask, esp, ebp;
2424
2425 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2426 ssp = env->segs[R_SS].base;
2427 ebp = EBP;
2428 esp = ESP;
2429 if (data32) {
2430 /* 32 bit */
2431 esp -= 4;
2432 while (--level) {
2433 esp -= 4;
2434 ebp -= 4;
2435 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2436 }
2437 esp -= 4;
2438 stl(ssp + (esp & esp_mask), t1);
2439 } else {
2440 /* 16 bit */
2441 esp -= 2;
2442 while (--level) {
2443 esp -= 2;
2444 ebp -= 2;
2445 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2446 }
2447 esp -= 2;
2448 stw(ssp + (esp & esp_mask), t1);
2449 }
2450}
2451
2452#ifdef TARGET_X86_64
2453void helper_enter64_level(int level, int data64, target_ulong t1)
2454{
2455 target_ulong esp, ebp;
2456 ebp = EBP;
2457 esp = ESP;
2458
2459 if (data64) {
2460 /* 64 bit */
2461 esp -= 8;
2462 while (--level) {
2463 esp -= 8;
2464 ebp -= 8;
2465 stq(esp, ldq(ebp));
2466 }
2467 esp -= 8;
2468 stq(esp, t1);
2469 } else {
2470 /* 16 bit */
2471 esp -= 2;
2472 while (--level) {
2473 esp -= 2;
2474 ebp -= 2;
2475 stw(esp, lduw(ebp));
2476 }
2477 esp -= 2;
2478 stw(esp, t1);
2479 }
2480}
2481#endif
2482
2483void helper_lldt(int selector)
2484{
2485 SegmentCache *dt;
2486 uint32_t e1, e2;
2487#ifndef VBOX
2488 int index, entry_limit;
2489#else
2490 unsigned int index, entry_limit;
2491#endif
2492 target_ulong ptr;
2493
2494#ifdef VBOX
2495 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2496 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2497#endif
2498
2499 selector &= 0xffff;
2500 if ((selector & 0xfffc) == 0) {
2501 /* XXX: NULL selector case: invalid LDT */
2502 env->ldt.base = 0;
2503 env->ldt.limit = 0;
2504#ifdef VBOX
2505 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2506 env->ldt.newselector = 0;
2507#endif
2508 } else {
2509 if (selector & 0x4)
2510 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2511 dt = &env->gdt;
2512 index = selector & ~7;
2513#ifdef TARGET_X86_64
2514 if (env->hflags & HF_LMA_MASK)
2515 entry_limit = 15;
2516 else
2517#endif
2518 entry_limit = 7;
2519 if ((index + entry_limit) > dt->limit)
2520 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2521 ptr = dt->base + index;
2522 e1 = ldl_kernel(ptr);
2523 e2 = ldl_kernel(ptr + 4);
2524 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2525 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2526 if (!(e2 & DESC_P_MASK))
2527 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2528#ifdef TARGET_X86_64
2529 if (env->hflags & HF_LMA_MASK) {
2530 uint32_t e3;
2531 e3 = ldl_kernel(ptr + 8);
2532 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2533 env->ldt.base |= (target_ulong)e3 << 32;
2534 } else
2535#endif
2536 {
2537 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2538 }
2539 }
2540 env->ldt.selector = selector;
2541#ifdef VBOX
2542 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2543 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2544#endif
2545}
2546
2547void helper_ltr(int selector)
2548{
2549 SegmentCache *dt;
2550 uint32_t e1, e2;
2551#ifndef VBOX
2552 int index, type, entry_limit;
2553#else
2554 unsigned int index;
2555 int type, entry_limit;
2556#endif
2557 target_ulong ptr;
2558
2559#ifdef VBOX
2560 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2561 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2562 env->tr.flags, (RTSEL)(selector & 0xffff)));
2563 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request,
2564 CPU_INTERRUPT_EXTERNAL_EXIT);
2565#endif
2566 selector &= 0xffff;
2567 if ((selector & 0xfffc) == 0) {
2568 /* NULL selector case: invalid TR */
2569 env->tr.base = 0;
2570 env->tr.limit = 0;
2571 env->tr.flags = 0;
2572#ifdef VBOX
2573 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2574 env->tr.newselector = 0;
2575#endif
2576 } else {
2577 if (selector & 0x4)
2578 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2579 dt = &env->gdt;
2580 index = selector & ~7;
2581#ifdef TARGET_X86_64
2582 if (env->hflags & HF_LMA_MASK)
2583 entry_limit = 15;
2584 else
2585#endif
2586 entry_limit = 7;
2587 if ((index + entry_limit) > dt->limit)
2588 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2589 ptr = dt->base + index;
2590 e1 = ldl_kernel(ptr);
2591 e2 = ldl_kernel(ptr + 4);
2592 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2593 if ((e2 & DESC_S_MASK) ||
2594 (type != 1 && type != 9))
2595 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2596 if (!(e2 & DESC_P_MASK))
2597 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2598#ifdef TARGET_X86_64
2599 if (env->hflags & HF_LMA_MASK) {
2600 uint32_t e3, e4;
2601 e3 = ldl_kernel(ptr + 8);
2602 e4 = ldl_kernel(ptr + 12);
2603 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2604 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2605 load_seg_cache_raw_dt(&env->tr, e1, e2);
2606 env->tr.base |= (target_ulong)e3 << 32;
2607 } else
2608#endif
2609 {
2610 load_seg_cache_raw_dt(&env->tr, e1, e2);
2611 }
2612 e2 |= DESC_TSS_BUSY_MASK;
2613 stl_kernel(ptr + 4, e2);
2614 }
2615 env->tr.selector = selector;
2616#ifdef VBOX
2617 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2618 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2619 env->tr.flags, (RTSEL)(selector & 0xffff)));
2620#endif
2621}
2622
2623/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2624void helper_load_seg(int seg_reg, int selector)
2625{
2626 uint32_t e1, e2;
2627 int cpl, dpl, rpl;
2628 SegmentCache *dt;
2629#ifndef VBOX
2630 int index;
2631#else
2632 unsigned int index;
2633#endif
2634 target_ulong ptr;
2635
2636 selector &= 0xffff;
2637 cpl = env->hflags & HF_CPL_MASK;
2638#ifdef VBOX
2639
2640 /* Trying to load a selector with CPL=1? */
2641 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2642 {
2643 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2644 selector = selector & 0xfffc;
2645 }
2646#endif /* VBOX */
2647 if ((selector & 0xfffc) == 0) {
2648 /* null selector case */
2649 if (seg_reg == R_SS
2650#ifdef TARGET_X86_64
2651 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2652#endif
2653 )
2654 raise_exception_err(EXCP0D_GPF, 0);
2655 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2656 } else {
2657
2658 if (selector & 0x4)
2659 dt = &env->ldt;
2660 else
2661 dt = &env->gdt;
2662 index = selector & ~7;
2663 if ((index + 7) > dt->limit)
2664 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2665 ptr = dt->base + index;
2666 e1 = ldl_kernel(ptr);
2667 e2 = ldl_kernel(ptr + 4);
2668
2669 if (!(e2 & DESC_S_MASK))
2670 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2671 rpl = selector & 3;
2672 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2673 if (seg_reg == R_SS) {
2674 /* must be writable segment */
2675 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2676 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2677 if (rpl != cpl || dpl != cpl)
2678 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2679 } else {
2680 /* must be readable segment */
2681 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2682 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2683
2684 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2685 /* if not conforming code, test rights */
2686 if (dpl < cpl || dpl < rpl)
2687 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2688 }
2689 }
2690
2691 if (!(e2 & DESC_P_MASK)) {
2692 if (seg_reg == R_SS)
2693 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2694 else
2695 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2696 }
2697
2698 /* set the access bit if not already set */
2699 if (!(e2 & DESC_A_MASK)) {
2700 e2 |= DESC_A_MASK;
2701 stl_kernel(ptr + 4, e2);
2702 }
2703
2704 cpu_x86_load_seg_cache(env, seg_reg, selector,
2705 get_seg_base(e1, e2),
2706 get_seg_limit(e1, e2),
2707 e2);
2708#if 0
2709 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2710 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2711#endif
2712 }
2713}
2714
2715/* protected mode jump */
2716void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2717 int next_eip_addend)
2718{
2719 int gate_cs, type;
2720 uint32_t e1, e2, cpl, dpl, rpl, limit;
2721 target_ulong next_eip;
2722
2723#ifdef VBOX /** @todo Why do we do this? */
2724 e1 = e2 = 0;
2725#endif
2726 if ((new_cs & 0xfffc) == 0)
2727 raise_exception_err(EXCP0D_GPF, 0);
2728 if (load_segment(&e1, &e2, new_cs) != 0)
2729 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2730 cpl = env->hflags & HF_CPL_MASK;
2731 if (e2 & DESC_S_MASK) {
2732 if (!(e2 & DESC_CS_MASK))
2733 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2734 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2735 if (e2 & DESC_C_MASK) {
2736 /* conforming code segment */
2737 if (dpl > cpl)
2738 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2739 } else {
2740 /* non conforming code segment */
2741 rpl = new_cs & 3;
2742 if (rpl > cpl)
2743 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2744 if (dpl != cpl)
2745 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2746 }
2747 if (!(e2 & DESC_P_MASK))
2748 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2749 limit = get_seg_limit(e1, e2);
2750 if (new_eip > limit &&
2751 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2752 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2753 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2754 get_seg_base(e1, e2), limit, e2);
2755 EIP = new_eip;
2756 } else {
2757 /* jump to call or task gate */
2758 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2759 rpl = new_cs & 3;
2760 cpl = env->hflags & HF_CPL_MASK;
2761 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2762 switch(type) {
2763 case 1: /* 286 TSS */
2764 case 9: /* 386 TSS */
2765 case 5: /* task gate */
2766 if (dpl < cpl || dpl < rpl)
2767 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2768 next_eip = env->eip + next_eip_addend;
2769 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2770 CC_OP = CC_OP_EFLAGS;
2771 break;
2772 case 4: /* 286 call gate */
2773 case 12: /* 386 call gate */
2774 if ((dpl < cpl) || (dpl < rpl))
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 if (!(e2 & DESC_P_MASK))
2777 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2778 gate_cs = e1 >> 16;
2779 new_eip = (e1 & 0xffff);
2780 if (type == 12)
2781 new_eip |= (e2 & 0xffff0000);
2782 if (load_segment(&e1, &e2, gate_cs) != 0)
2783 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2784 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2785 /* must be code segment */
2786 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2787 (DESC_S_MASK | DESC_CS_MASK)))
2788 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2789 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2790 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2791 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2792 if (!(e2 & DESC_P_MASK))
2793#ifdef VBOX /* See page 3-514 of 253666.pdf */
2794 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2795#else
2796 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2797#endif
2798 limit = get_seg_limit(e1, e2);
2799 if (new_eip > limit)
2800 raise_exception_err(EXCP0D_GPF, 0);
2801 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2802 get_seg_base(e1, e2), limit, e2);
2803 EIP = new_eip;
2804 break;
2805 default:
2806 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2807 break;
2808 }
2809 }
2810}
2811
2812/* real mode call */
2813void helper_lcall_real(int new_cs, target_ulong new_eip1,
2814 int shift, int next_eip)
2815{
2816 int new_eip;
2817 uint32_t esp, esp_mask;
2818 target_ulong ssp;
2819
2820 new_eip = new_eip1;
2821 esp = ESP;
2822 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2823 ssp = env->segs[R_SS].base;
2824 if (shift) {
2825 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2826 PUSHL(ssp, esp, esp_mask, next_eip);
2827 } else {
2828 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2829 PUSHW(ssp, esp, esp_mask, next_eip);
2830 }
2831
2832 SET_ESP(esp, esp_mask);
2833 env->eip = new_eip;
2834 env->segs[R_CS].selector = new_cs;
2835 env->segs[R_CS].base = (new_cs << 4);
2836}
2837
2838/* protected mode call */
2839void helper_lcall_protected(int new_cs, target_ulong new_eip,
2840 int shift, int next_eip_addend)
2841{
2842 int new_stack, i;
2843 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2844 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2845 uint32_t val, limit, old_sp_mask;
2846 target_ulong ssp, old_ssp, next_eip;
2847
2848#ifdef VBOX /** @todo Why do we do this? */
2849 e1 = e2 = 0;
2850#endif
2851 next_eip = env->eip + next_eip_addend;
2852 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2853 LOG_PCALL_STATE(env);
2854 if ((new_cs & 0xfffc) == 0)
2855 raise_exception_err(EXCP0D_GPF, 0);
2856 if (load_segment(&e1, &e2, new_cs) != 0)
2857 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2858 cpl = env->hflags & HF_CPL_MASK;
2859 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2860 if (e2 & DESC_S_MASK) {
2861 if (!(e2 & DESC_CS_MASK))
2862 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2863 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2864 if (e2 & DESC_C_MASK) {
2865 /* conforming code segment */
2866 if (dpl > cpl)
2867 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2868 } else {
2869 /* non conforming code segment */
2870 rpl = new_cs & 3;
2871 if (rpl > cpl)
2872 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2873 if (dpl != cpl)
2874 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2875 }
2876 if (!(e2 & DESC_P_MASK))
2877 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2878
2879#ifdef TARGET_X86_64
2880 /* XXX: check 16/32 bit cases in long mode */
2881 if (shift == 2) {
2882 target_ulong rsp;
2883 /* 64 bit case */
2884 rsp = ESP;
2885 PUSHQ(rsp, env->segs[R_CS].selector);
2886 PUSHQ(rsp, next_eip);
2887 /* from this point, not restartable */
2888 ESP = rsp;
2889 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2890 get_seg_base(e1, e2),
2891 get_seg_limit(e1, e2), e2);
2892 EIP = new_eip;
2893 } else
2894#endif
2895 {
2896 sp = ESP;
2897 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2898 ssp = env->segs[R_SS].base;
2899 if (shift) {
2900 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2901 PUSHL(ssp, sp, sp_mask, next_eip);
2902 } else {
2903 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2904 PUSHW(ssp, sp, sp_mask, next_eip);
2905 }
2906
2907 limit = get_seg_limit(e1, e2);
2908 if (new_eip > limit)
2909 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2910 /* from this point, not restartable */
2911 SET_ESP(sp, sp_mask);
2912 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2913 get_seg_base(e1, e2), limit, e2);
2914 EIP = new_eip;
2915 }
2916 } else {
2917 /* check gate type */
2918 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2920 rpl = new_cs & 3;
2921 switch(type) {
2922 case 1: /* available 286 TSS */
2923 case 9: /* available 386 TSS */
2924 case 5: /* task gate */
2925 if (dpl < cpl || dpl < rpl)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2928 CC_OP = CC_OP_EFLAGS;
2929 return;
2930 case 4: /* 286 call gate */
2931 case 12: /* 386 call gate */
2932 break;
2933 default:
2934 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2935 break;
2936 }
2937 shift = type >> 3;
2938
2939 if (dpl < cpl || dpl < rpl)
2940 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2941 /* check valid bit */
2942 if (!(e2 & DESC_P_MASK))
2943 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2944 selector = e1 >> 16;
2945 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2946 param_count = e2 & 0x1f;
2947 if ((selector & 0xfffc) == 0)
2948 raise_exception_err(EXCP0D_GPF, 0);
2949
2950 if (load_segment(&e1, &e2, selector) != 0)
2951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2952 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2953 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2954 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2955 if (dpl > cpl)
2956 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2957 if (!(e2 & DESC_P_MASK))
2958 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2959
2960 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2961 /* to inner privilege */
2962 get_ss_esp_from_tss(&ss, &sp, dpl);
2963 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2964 ss, sp, param_count, ESP);
2965 if ((ss & 0xfffc) == 0)
2966 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2967 if ((ss & 3) != dpl)
2968 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2969 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2970 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2971 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2972 if (ss_dpl != dpl)
2973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2974 if (!(ss_e2 & DESC_S_MASK) ||
2975 (ss_e2 & DESC_CS_MASK) ||
2976 !(ss_e2 & DESC_W_MASK))
2977 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2978 if (!(ss_e2 & DESC_P_MASK))
2979#ifdef VBOX /* See page 3-99 of 253666.pdf */
2980 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2981#else
2982 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2983#endif
2984
2985 // push_size = ((param_count * 2) + 8) << shift;
2986
2987 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2988 old_ssp = env->segs[R_SS].base;
2989
2990 sp_mask = get_sp_mask(ss_e2);
2991 ssp = get_seg_base(ss_e1, ss_e2);
2992 if (shift) {
2993 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2994 PUSHL(ssp, sp, sp_mask, ESP);
2995 for(i = param_count - 1; i >= 0; i--) {
2996 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2997 PUSHL(ssp, sp, sp_mask, val);
2998 }
2999 } else {
3000 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3001 PUSHW(ssp, sp, sp_mask, ESP);
3002 for(i = param_count - 1; i >= 0; i--) {
3003 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3004 PUSHW(ssp, sp, sp_mask, val);
3005 }
3006 }
3007 new_stack = 1;
3008 } else {
3009 /* to same privilege */
3010 sp = ESP;
3011 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3012 ssp = env->segs[R_SS].base;
3013 // push_size = (4 << shift);
3014 new_stack = 0;
3015 }
3016
3017 if (shift) {
3018 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3019 PUSHL(ssp, sp, sp_mask, next_eip);
3020 } else {
3021 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3022 PUSHW(ssp, sp, sp_mask, next_eip);
3023 }
3024
3025 /* from this point, not restartable */
3026
3027 if (new_stack) {
3028 ss = (ss & ~3) | dpl;
3029 cpu_x86_load_seg_cache(env, R_SS, ss,
3030 ssp,
3031 get_seg_limit(ss_e1, ss_e2),
3032 ss_e2);
3033 }
3034
3035 selector = (selector & ~3) | dpl;
3036 cpu_x86_load_seg_cache(env, R_CS, selector,
3037 get_seg_base(e1, e2),
3038 get_seg_limit(e1, e2),
3039 e2);
3040 cpu_x86_set_cpl(env, dpl);
3041 SET_ESP(sp, sp_mask);
3042 EIP = offset;
3043 }
3044}
3045
3046/* real and vm86 mode iret */
3047void helper_iret_real(int shift)
3048{
3049 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3050 target_ulong ssp;
3051 int eflags_mask;
3052#ifdef VBOX
3053 bool fVME = false;
3054
3055 remR3TrapClear(env->pVM);
3056#endif /* VBOX */
3057
3058 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3059 sp = ESP;
3060 ssp = env->segs[R_SS].base;
3061 if (shift == 1) {
3062 /* 32 bits */
3063 POPL(ssp, sp, sp_mask, new_eip);
3064 POPL(ssp, sp, sp_mask, new_cs);
3065 new_cs &= 0xffff;
3066 POPL(ssp, sp, sp_mask, new_eflags);
3067 } else {
3068 /* 16 bits */
3069 POPW(ssp, sp, sp_mask, new_eip);
3070 POPW(ssp, sp, sp_mask, new_cs);
3071 POPW(ssp, sp, sp_mask, new_eflags);
3072 }
3073#ifdef VBOX
3074 if ( (env->eflags & VM_MASK)
3075 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3076 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3077 {
3078 fVME = true;
3079 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3080 /* if TF will be set -> #GP */
3081 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3082 || (new_eflags & TF_MASK))
3083 raise_exception(EXCP0D_GPF);
3084 }
3085#endif /* VBOX */
3086 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3087 env->segs[R_CS].selector = new_cs;
3088 env->segs[R_CS].base = (new_cs << 4);
3089 env->eip = new_eip;
3090#ifdef VBOX
3091 if (fVME)
3092 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3093 else
3094#endif
3095 if (env->eflags & VM_MASK)
3096 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3097 else
3098 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3099 if (shift == 0)
3100 eflags_mask &= 0xffff;
3101 load_eflags(new_eflags, eflags_mask);
3102 env->hflags2 &= ~HF2_NMI_MASK;
3103#ifdef VBOX
3104 if (fVME)
3105 {
3106 if (new_eflags & IF_MASK)
3107 env->eflags |= VIF_MASK;
3108 else
3109 env->eflags &= ~VIF_MASK;
3110 }
3111#endif /* VBOX */
3112}
3113
3114static inline void validate_seg(int seg_reg, int cpl)
3115{
3116 int dpl;
3117 uint32_t e2;
3118
3119 /* XXX: on x86_64, we do not want to nullify FS and GS because
3120 they may still contain a valid base. I would be interested to
3121 know how a real x86_64 CPU behaves */
3122 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3123 (env->segs[seg_reg].selector & 0xfffc) == 0)
3124 return;
3125
3126 e2 = env->segs[seg_reg].flags;
3127 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3128 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3129 /* data or non conforming code segment */
3130 if (dpl < cpl) {
3131 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3132 }
3133 }
3134}
3135
3136/* protected mode iret */
3137static inline void helper_ret_protected(int shift, int is_iret, int addend)
3138{
3139 uint32_t new_cs, new_eflags, new_ss;
3140 uint32_t new_es, new_ds, new_fs, new_gs;
3141 uint32_t e1, e2, ss_e1, ss_e2;
3142 int cpl, dpl, rpl, eflags_mask, iopl;
3143 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3144
3145#ifdef VBOX /** @todo Why do we do this? */
3146 ss_e1 = ss_e2 = e1 = e2 = 0;
3147#endif
3148
3149#ifdef TARGET_X86_64
3150 if (shift == 2)
3151 sp_mask = -1;
3152 else
3153#endif
3154 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3155 sp = ESP;
3156 ssp = env->segs[R_SS].base;
3157 new_eflags = 0; /* avoid warning */
3158#ifdef TARGET_X86_64
3159 if (shift == 2) {
3160 POPQ(sp, new_eip);
3161 POPQ(sp, new_cs);
3162 new_cs &= 0xffff;
3163 if (is_iret) {
3164 POPQ(sp, new_eflags);
3165 }
3166 } else
3167#endif
3168 if (shift == 1) {
3169 /* 32 bits */
3170 POPL(ssp, sp, sp_mask, new_eip);
3171 POPL(ssp, sp, sp_mask, new_cs);
3172 new_cs &= 0xffff;
3173 if (is_iret) {
3174 POPL(ssp, sp, sp_mask, new_eflags);
3175#define LOG_GROUP LOG_GROUP_REM
3176#if defined(VBOX) && defined(DEBUG)
3177 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3178 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3179 Log(("iret: new EFLAGS %08X\n", new_eflags));
3180 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3181#endif
3182 if (new_eflags & VM_MASK)
3183 goto return_to_vm86;
3184 }
3185#ifdef VBOX
3186 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3187 {
3188# ifdef VBOX_WITH_RAW_RING1
3189 if ( !EMIsRawRing1Enabled(env->pVM)
3190 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3191 {
3192 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3193 new_cs = new_cs & 0xfffc;
3194 }
3195 else
3196 {
3197 /* Ugly assumption: assume a genuine switch to ring-1. */
3198 Log(("Genuine switch to ring-1 (iret)\n"));
3199 }
3200# else
3201 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3202 new_cs = new_cs & 0xfffc;
3203# endif
3204 }
3205# ifdef VBOX_WITH_RAW_RING1
3206 else
3207 if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3208 {
3209 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3210 new_cs = (new_cs & 0xfffc) | 1;
3211 }
3212# endif
3213#endif
3214 } else {
3215 /* 16 bits */
3216 POPW(ssp, sp, sp_mask, new_eip);
3217 POPW(ssp, sp, sp_mask, new_cs);
3218 if (is_iret)
3219 POPW(ssp, sp, sp_mask, new_eflags);
3220 }
3221 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3222 new_cs, new_eip, shift, addend);
3223 LOG_PCALL_STATE(env);
3224 if ((new_cs & 0xfffc) == 0)
3225 {
3226#if defined(VBOX) && defined(DEBUG)
3227 Log(("new_cs & 0xfffc) == 0\n"));
3228#endif
3229 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3230 }
3231 if (load_segment(&e1, &e2, new_cs) != 0)
3232 {
3233#if defined(VBOX) && defined(DEBUG)
3234 Log(("load_segment failed\n"));
3235#endif
3236 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3237 }
3238 if (!(e2 & DESC_S_MASK) ||
3239 !(e2 & DESC_CS_MASK))
3240 {
3241#if defined(VBOX) && defined(DEBUG)
3242 Log(("e2 mask %08x\n", e2));
3243#endif
3244 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3245 }
3246 cpl = env->hflags & HF_CPL_MASK;
3247 rpl = new_cs & 3;
3248 if (rpl < cpl)
3249 {
3250#if defined(VBOX) && defined(DEBUG)
3251 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3252#endif
3253 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3254 }
3255 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3256
3257 if (e2 & DESC_C_MASK) {
3258 if (dpl > rpl)
3259 {
3260#if defined(VBOX) && defined(DEBUG)
3261 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3262#endif
3263 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3264 }
3265 } else {
3266 if (dpl != rpl)
3267 {
3268#if defined(VBOX) && defined(DEBUG)
3269 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3270#endif
3271 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3272 }
3273 }
3274 if (!(e2 & DESC_P_MASK))
3275 {
3276#if defined(VBOX) && defined(DEBUG)
3277 Log(("DESC_P_MASK e2=%08x\n", e2));
3278#endif
3279 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3280 }
3281
3282 sp += addend;
3283 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3284 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3285 /* return to same privilege level */
3286#ifdef VBOX
3287 if (!(e2 & DESC_A_MASK))
3288 e2 = set_segment_accessed(new_cs, e2);
3289#endif
3290 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3291 get_seg_base(e1, e2),
3292 get_seg_limit(e1, e2),
3293 e2);
3294 } else {
3295 /* return to different privilege level */
3296#ifdef TARGET_X86_64
3297 if (shift == 2) {
3298 POPQ(sp, new_esp);
3299 POPQ(sp, new_ss);
3300 new_ss &= 0xffff;
3301 } else
3302#endif
3303 if (shift == 1) {
3304 /* 32 bits */
3305 POPL(ssp, sp, sp_mask, new_esp);
3306 POPL(ssp, sp, sp_mask, new_ss);
3307 new_ss &= 0xffff;
3308 } else {
3309 /* 16 bits */
3310 POPW(ssp, sp, sp_mask, new_esp);
3311 POPW(ssp, sp, sp_mask, new_ss);
3312 }
3313 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3314 new_ss, new_esp);
3315 if ((new_ss & 0xfffc) == 0) {
3316#ifdef TARGET_X86_64
3317 /* NULL ss is allowed in long mode if cpl != 3*/
3318 /* XXX: test CS64 ? */
3319 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3320# ifdef VBOX
3321 if (!(e2 & DESC_A_MASK))
3322 e2 = set_segment_accessed(new_cs, e2);
3323# endif
3324 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3325 0, 0xffffffff,
3326 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3327 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3328 DESC_W_MASK | DESC_A_MASK);
3329 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3330 } else
3331#endif
3332 {
3333 raise_exception_err(EXCP0D_GPF, 0);
3334 }
3335 } else {
3336 if ((new_ss & 3) != rpl)
3337 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3338 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3339 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3340 if (!(ss_e2 & DESC_S_MASK) ||
3341 (ss_e2 & DESC_CS_MASK) ||
3342 !(ss_e2 & DESC_W_MASK))
3343 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3344 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3345 if (dpl != rpl)
3346 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3347 if (!(ss_e2 & DESC_P_MASK))
3348 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3349#ifdef VBOX
3350 if (!(e2 & DESC_A_MASK))
3351 e2 = set_segment_accessed(new_cs, e2);
3352 if (!(ss_e2 & DESC_A_MASK))
3353 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3354#endif
3355 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3356 get_seg_base(ss_e1, ss_e2),
3357 get_seg_limit(ss_e1, ss_e2),
3358 ss_e2);
3359 }
3360
3361 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3362 get_seg_base(e1, e2),
3363 get_seg_limit(e1, e2),
3364 e2);
3365 cpu_x86_set_cpl(env, rpl);
3366 sp = new_esp;
3367#ifdef TARGET_X86_64
3368 if (env->hflags & HF_CS64_MASK)
3369 sp_mask = -1;
3370 else
3371#endif
3372 sp_mask = get_sp_mask(ss_e2);
3373
3374 /* validate data segments */
3375 validate_seg(R_ES, rpl);
3376 validate_seg(R_DS, rpl);
3377 validate_seg(R_FS, rpl);
3378 validate_seg(R_GS, rpl);
3379
3380 sp += addend;
3381 }
3382 SET_ESP(sp, sp_mask);
3383 env->eip = new_eip;
3384 if (is_iret) {
3385 /* NOTE: 'cpl' is the _old_ CPL */
3386 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3387 if (cpl == 0)
3388#ifdef VBOX
3389 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3390#else
3391 eflags_mask |= IOPL_MASK;
3392#endif
3393 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3394 if (cpl <= iopl)
3395 eflags_mask |= IF_MASK;
3396 if (shift == 0)
3397 eflags_mask &= 0xffff;
3398 load_eflags(new_eflags, eflags_mask);
3399 }
3400 return;
3401
3402 return_to_vm86:
3403 POPL(ssp, sp, sp_mask, new_esp);
3404 POPL(ssp, sp, sp_mask, new_ss);
3405 POPL(ssp, sp, sp_mask, new_es);
3406 POPL(ssp, sp, sp_mask, new_ds);
3407 POPL(ssp, sp, sp_mask, new_fs);
3408 POPL(ssp, sp, sp_mask, new_gs);
3409
3410 /* modify processor state */
3411 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3412 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3413 load_seg_vm(R_CS, new_cs & 0xffff);
3414 cpu_x86_set_cpl(env, 3);
3415 load_seg_vm(R_SS, new_ss & 0xffff);
3416 load_seg_vm(R_ES, new_es & 0xffff);
3417 load_seg_vm(R_DS, new_ds & 0xffff);
3418 load_seg_vm(R_FS, new_fs & 0xffff);
3419 load_seg_vm(R_GS, new_gs & 0xffff);
3420
3421 env->eip = new_eip & 0xffff;
3422 ESP = new_esp;
3423}
3424
3425void helper_iret_protected(int shift, int next_eip)
3426{
3427 int tss_selector, type;
3428 uint32_t e1, e2;
3429
3430#ifdef VBOX
3431 e1 = e2 = 0; /** @todo Why do we do this? */
3432 remR3TrapClear(env->pVM);
3433#endif
3434
3435 /* specific case for TSS */
3436 if (env->eflags & NT_MASK) {
3437#ifdef TARGET_X86_64
3438 if (env->hflags & HF_LMA_MASK)
3439 raise_exception_err(EXCP0D_GPF, 0);
3440#endif
3441 tss_selector = lduw_kernel(env->tr.base + 0);
3442 if (tss_selector & 4)
3443 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3444 if (load_segment(&e1, &e2, tss_selector) != 0)
3445 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3446 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3447 /* NOTE: we check both segment and busy TSS */
3448 if (type != 3)
3449 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3450 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3451 } else {
3452 helper_ret_protected(shift, 1, 0);
3453 }
3454 env->hflags2 &= ~HF2_NMI_MASK;
3455}
3456
3457void helper_lret_protected(int shift, int addend)
3458{
3459 helper_ret_protected(shift, 0, addend);
3460}
3461
3462void helper_sysenter(void)
3463{
3464 if (env->sysenter_cs == 0) {
3465 raise_exception_err(EXCP0D_GPF, 0);
3466 }
3467 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3468 cpu_x86_set_cpl(env, 0);
3469
3470#ifdef TARGET_X86_64
3471 if (env->hflags & HF_LMA_MASK) {
3472 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3473 0, 0xffffffff,
3474 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3475 DESC_S_MASK |
3476 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3477 } else
3478#endif
3479 {
3480 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3481 0, 0xffffffff,
3482 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3483 DESC_S_MASK |
3484 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3485 }
3486 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3487 0, 0xffffffff,
3488 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3489 DESC_S_MASK |
3490 DESC_W_MASK | DESC_A_MASK);
3491 ESP = env->sysenter_esp;
3492 EIP = env->sysenter_eip;
3493}
3494
3495void helper_sysexit(int dflag)
3496{
3497 int cpl;
3498
3499 cpl = env->hflags & HF_CPL_MASK;
3500 if (env->sysenter_cs == 0 || cpl != 0) {
3501 raise_exception_err(EXCP0D_GPF, 0);
3502 }
3503 cpu_x86_set_cpl(env, 3);
3504#ifdef TARGET_X86_64
3505 if (dflag == 2) {
3506 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3507 0, 0xffffffff,
3508 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3509 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3510 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3511 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3512 0, 0xffffffff,
3513 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3514 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3515 DESC_W_MASK | DESC_A_MASK);
3516 } else
3517#endif
3518 {
3519 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3520 0, 0xffffffff,
3521 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3522 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3523 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3524 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3525 0, 0xffffffff,
3526 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3527 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3528 DESC_W_MASK | DESC_A_MASK);
3529 }
3530 ESP = ECX;
3531 EIP = EDX;
3532}
3533
3534#if defined(CONFIG_USER_ONLY)
3535target_ulong helper_read_crN(int reg)
3536{
3537 return 0;
3538}
3539
3540void helper_write_crN(int reg, target_ulong t0)
3541{
3542}
3543
3544void helper_movl_drN_T0(int reg, target_ulong t0)
3545{
3546}
3547#else
3548target_ulong helper_read_crN(int reg)
3549{
3550 target_ulong val;
3551
3552 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3553 switch(reg) {
3554 default:
3555 val = env->cr[reg];
3556 break;
3557 case 8:
3558 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3559#ifndef VBOX
3560 val = cpu_get_apic_tpr(env->apic_state);
3561#else /* VBOX */
3562 val = cpu_get_apic_tpr(env);
3563#endif /* VBOX */
3564 } else {
3565 val = env->v_tpr;
3566 }
3567 break;
3568 }
3569 return val;
3570}
3571
3572void helper_write_crN(int reg, target_ulong t0)
3573{
3574 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3575 switch(reg) {
3576 case 0:
3577 cpu_x86_update_cr0(env, t0);
3578 break;
3579 case 3:
3580 cpu_x86_update_cr3(env, t0);
3581 break;
3582 case 4:
3583 cpu_x86_update_cr4(env, t0);
3584 break;
3585 case 8:
3586 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3587#ifndef VBOX
3588 cpu_set_apic_tpr(env->apic_state, t0);
3589#else /* VBOX */
3590 cpu_set_apic_tpr(env, t0);
3591#endif /* VBOX */
3592 }
3593 env->v_tpr = t0 & 0x0f;
3594 break;
3595 default:
3596 env->cr[reg] = t0;
3597 break;
3598 }
3599}
3600
3601void helper_movl_drN_T0(int reg, target_ulong t0)
3602{
3603 int i;
3604
3605 if (reg < 4) {
3606 hw_breakpoint_remove(env, reg);
3607 env->dr[reg] = t0;
3608 hw_breakpoint_insert(env, reg);
3609 } else if (reg == 7) {
3610 for (i = 0; i < 4; i++)
3611 hw_breakpoint_remove(env, i);
3612 env->dr[7] = t0;
3613 for (i = 0; i < 4; i++)
3614 hw_breakpoint_insert(env, i);
3615 } else
3616 env->dr[reg] = t0;
3617}
3618#endif
3619
3620void helper_lmsw(target_ulong t0)
3621{
3622 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3623 if already set to one. */
3624 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3625 helper_write_crN(0, t0);
3626}
3627
3628void helper_clts(void)
3629{
3630 env->cr[0] &= ~CR0_TS_MASK;
3631 env->hflags &= ~HF_TS_MASK;
3632}
3633
3634void helper_invlpg(target_ulong addr)
3635{
3636 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3637 tlb_flush_page(env, addr);
3638}
3639
3640void helper_rdtsc(void)
3641{
3642 uint64_t val;
3643
3644 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3645 raise_exception(EXCP0D_GPF);
3646 }
3647 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3648
3649 val = cpu_get_tsc(env) + env->tsc_offset;
3650 EAX = (uint32_t)(val);
3651 EDX = (uint32_t)(val >> 32);
3652}
3653
3654void helper_rdtscp(void)
3655{
3656 helper_rdtsc();
3657#ifndef VBOX
3658 ECX = (uint32_t)(env->tsc_aux);
3659#else /* VBOX */
3660 uint64_t val;
3661 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3662 ECX = (uint32_t)(val);
3663 else
3664 ECX = 0;
3665#endif /* VBOX */
3666}
3667
3668void helper_rdpmc(void)
3669{
3670#ifdef VBOX
3671 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3672 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3673 raise_exception(EXCP0D_GPF);
3674 }
3675 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3676 EAX = 0;
3677 EDX = 0;
3678#else /* !VBOX */
3679 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3680 raise_exception(EXCP0D_GPF);
3681 }
3682 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3683
3684 /* currently unimplemented */
3685 raise_exception_err(EXCP06_ILLOP, 0);
3686#endif /* !VBOX */
3687}
3688
3689#if defined(CONFIG_USER_ONLY)
3690void helper_wrmsr(void)
3691{
3692}
3693
3694void helper_rdmsr(void)
3695{
3696}
3697#else
3698void helper_wrmsr(void)
3699{
3700 uint64_t val;
3701
3702 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3703
3704 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3705
3706 switch((uint32_t)ECX) {
3707 case MSR_IA32_SYSENTER_CS:
3708 env->sysenter_cs = val & 0xffff;
3709 break;
3710 case MSR_IA32_SYSENTER_ESP:
3711 env->sysenter_esp = val;
3712 break;
3713 case MSR_IA32_SYSENTER_EIP:
3714 env->sysenter_eip = val;
3715 break;
3716 case MSR_IA32_APICBASE:
3717# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3718 cpu_set_apic_base(env->apic_state, val);
3719# endif
3720 break;
3721 case MSR_EFER:
3722 {
3723 uint64_t update_mask;
3724 update_mask = 0;
3725 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3726 update_mask |= MSR_EFER_SCE;
3727 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3728 update_mask |= MSR_EFER_LME;
3729 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3730 update_mask |= MSR_EFER_FFXSR;
3731 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3732 update_mask |= MSR_EFER_NXE;
3733 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3734 update_mask |= MSR_EFER_SVME;
3735 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3736 update_mask |= MSR_EFER_FFXSR;
3737 cpu_load_efer(env, (env->efer & ~update_mask) |
3738 (val & update_mask));
3739 }
3740 break;
3741 case MSR_STAR:
3742 env->star = val;
3743 break;
3744 case MSR_PAT:
3745 env->pat = val;
3746 break;
3747 case MSR_VM_HSAVE_PA:
3748 env->vm_hsave = val;
3749 break;
3750#ifdef TARGET_X86_64
3751 case MSR_LSTAR:
3752 env->lstar = val;
3753 break;
3754 case MSR_CSTAR:
3755 env->cstar = val;
3756 break;
3757 case MSR_FMASK:
3758 env->fmask = val;
3759 break;
3760 case MSR_FSBASE:
3761 env->segs[R_FS].base = val;
3762 break;
3763 case MSR_GSBASE:
3764 env->segs[R_GS].base = val;
3765 break;
3766 case MSR_KERNELGSBASE:
3767 env->kernelgsbase = val;
3768 break;
3769#endif
3770# ifndef VBOX
3771 case MSR_MTRRphysBase(0):
3772 case MSR_MTRRphysBase(1):
3773 case MSR_MTRRphysBase(2):
3774 case MSR_MTRRphysBase(3):
3775 case MSR_MTRRphysBase(4):
3776 case MSR_MTRRphysBase(5):
3777 case MSR_MTRRphysBase(6):
3778 case MSR_MTRRphysBase(7):
3779 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3780 break;
3781 case MSR_MTRRphysMask(0):
3782 case MSR_MTRRphysMask(1):
3783 case MSR_MTRRphysMask(2):
3784 case MSR_MTRRphysMask(3):
3785 case MSR_MTRRphysMask(4):
3786 case MSR_MTRRphysMask(5):
3787 case MSR_MTRRphysMask(6):
3788 case MSR_MTRRphysMask(7):
3789 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3790 break;
3791 case MSR_MTRRfix64K_00000:
3792 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3793 break;
3794 case MSR_MTRRfix16K_80000:
3795 case MSR_MTRRfix16K_A0000:
3796 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3797 break;
3798 case MSR_MTRRfix4K_C0000:
3799 case MSR_MTRRfix4K_C8000:
3800 case MSR_MTRRfix4K_D0000:
3801 case MSR_MTRRfix4K_D8000:
3802 case MSR_MTRRfix4K_E0000:
3803 case MSR_MTRRfix4K_E8000:
3804 case MSR_MTRRfix4K_F0000:
3805 case MSR_MTRRfix4K_F8000:
3806 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3807 break;
3808 case MSR_MTRRdefType:
3809 env->mtrr_deftype = val;
3810 break;
3811 case MSR_MCG_STATUS:
3812 env->mcg_status = val;
3813 break;
3814 case MSR_MCG_CTL:
3815 if ((env->mcg_cap & MCG_CTL_P)
3816 && (val == 0 || val == ~(uint64_t)0))
3817 env->mcg_ctl = val;
3818 break;
3819 case MSR_TSC_AUX:
3820 env->tsc_aux = val;
3821 break;
3822# endif /* !VBOX */
3823 default:
3824# ifndef VBOX
3825 if ((uint32_t)ECX >= MSR_MC0_CTL
3826 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3827 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3828 if ((offset & 0x3) != 0
3829 || (val == 0 || val == ~(uint64_t)0))
3830 env->mce_banks[offset] = val;
3831 break;
3832 }
3833 /* XXX: exception ? */
3834# endif
3835 break;
3836 }
3837
3838# ifdef VBOX
3839 /* call CPUM. */
3840 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3841 {
3842 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3843 }
3844# endif
3845}
3846
3847void helper_rdmsr(void)
3848{
3849 uint64_t val;
3850
3851 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3852
3853 switch((uint32_t)ECX) {
3854 case MSR_IA32_SYSENTER_CS:
3855 val = env->sysenter_cs;
3856 break;
3857 case MSR_IA32_SYSENTER_ESP:
3858 val = env->sysenter_esp;
3859 break;
3860 case MSR_IA32_SYSENTER_EIP:
3861 val = env->sysenter_eip;
3862 break;
3863 case MSR_IA32_APICBASE:
3864#ifndef VBOX
3865 val = cpu_get_apic_base(env->apic_state);
3866#else /* VBOX */
3867 val = cpu_get_apic_base(env);
3868#endif /* VBOX */
3869 break;
3870 case MSR_EFER:
3871 val = env->efer;
3872 break;
3873 case MSR_STAR:
3874 val = env->star;
3875 break;
3876 case MSR_PAT:
3877 val = env->pat;
3878 break;
3879 case MSR_VM_HSAVE_PA:
3880 val = env->vm_hsave;
3881 break;
3882# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3883 case MSR_IA32_PERF_STATUS:
3884 /* tsc_increment_by_tick */
3885 val = 1000ULL;
3886 /* CPU multiplier */
3887 val |= (((uint64_t)4ULL) << 40);
3888 break;
3889# endif /* !VBOX */
3890#ifdef TARGET_X86_64
3891 case MSR_LSTAR:
3892 val = env->lstar;
3893 break;
3894 case MSR_CSTAR:
3895 val = env->cstar;
3896 break;
3897 case MSR_FMASK:
3898 val = env->fmask;
3899 break;
3900 case MSR_FSBASE:
3901 val = env->segs[R_FS].base;
3902 break;
3903 case MSR_GSBASE:
3904 val = env->segs[R_GS].base;
3905 break;
3906 case MSR_KERNELGSBASE:
3907 val = env->kernelgsbase;
3908 break;
3909# ifndef VBOX
3910 case MSR_TSC_AUX:
3911 val = env->tsc_aux;
3912 break;
3913# endif /*!VBOX*/
3914#endif
3915# ifndef VBOX
3916 case MSR_MTRRphysBase(0):
3917 case MSR_MTRRphysBase(1):
3918 case MSR_MTRRphysBase(2):
3919 case MSR_MTRRphysBase(3):
3920 case MSR_MTRRphysBase(4):
3921 case MSR_MTRRphysBase(5):
3922 case MSR_MTRRphysBase(6):
3923 case MSR_MTRRphysBase(7):
3924 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3925 break;
3926 case MSR_MTRRphysMask(0):
3927 case MSR_MTRRphysMask(1):
3928 case MSR_MTRRphysMask(2):
3929 case MSR_MTRRphysMask(3):
3930 case MSR_MTRRphysMask(4):
3931 case MSR_MTRRphysMask(5):
3932 case MSR_MTRRphysMask(6):
3933 case MSR_MTRRphysMask(7):
3934 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3935 break;
3936 case MSR_MTRRfix64K_00000:
3937 val = env->mtrr_fixed[0];
3938 break;
3939 case MSR_MTRRfix16K_80000:
3940 case MSR_MTRRfix16K_A0000:
3941 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3942 break;
3943 case MSR_MTRRfix4K_C0000:
3944 case MSR_MTRRfix4K_C8000:
3945 case MSR_MTRRfix4K_D0000:
3946 case MSR_MTRRfix4K_D8000:
3947 case MSR_MTRRfix4K_E0000:
3948 case MSR_MTRRfix4K_E8000:
3949 case MSR_MTRRfix4K_F0000:
3950 case MSR_MTRRfix4K_F8000:
3951 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3952 break;
3953 case MSR_MTRRdefType:
3954 val = env->mtrr_deftype;
3955 break;
3956 case MSR_MTRRcap:
3957 if (env->cpuid_features & CPUID_MTRR)
3958 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3959 else
3960 /* XXX: exception ? */
3961 val = 0;
3962 break;
3963 case MSR_MCG_CAP:
3964 val = env->mcg_cap;
3965 break;
3966 case MSR_MCG_CTL:
3967 if (env->mcg_cap & MCG_CTL_P)
3968 val = env->mcg_ctl;
3969 else
3970 val = 0;
3971 break;
3972 case MSR_MCG_STATUS:
3973 val = env->mcg_status;
3974 break;
3975# endif /* !VBOX */
3976 default:
3977# ifndef VBOX
3978 if ((uint32_t)ECX >= MSR_MC0_CTL
3979 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3980 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3981 val = env->mce_banks[offset];
3982 break;
3983 }
3984 /* XXX: exception ? */
3985 val = 0;
3986# else /* VBOX */
3987 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3988 {
3989 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3990 val = 0;
3991 }
3992# endif /* VBOX */
3993 break;
3994 }
3995 EAX = (uint32_t)(val);
3996 EDX = (uint32_t)(val >> 32);
3997
3998# ifdef VBOX_STRICT
3999 if ((uint32_t)ECX != MSR_IA32_TSC) {
4000 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4001 val = 0;
4002 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4003 }
4004# endif
4005}
4006#endif
4007
4008target_ulong helper_lsl(target_ulong selector1)
4009{
4010 unsigned int limit;
4011 uint32_t e1, e2, eflags, selector;
4012 int rpl, dpl, cpl, type;
4013
4014 selector = selector1 & 0xffff;
4015 eflags = helper_cc_compute_all(CC_OP);
4016 if ((selector & 0xfffc) == 0)
4017 goto fail;
4018 if (load_segment(&e1, &e2, selector) != 0)
4019 goto fail;
4020 rpl = selector & 3;
4021 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4022 cpl = env->hflags & HF_CPL_MASK;
4023 if (e2 & DESC_S_MASK) {
4024 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4025 /* conforming */
4026 } else {
4027 if (dpl < cpl || dpl < rpl)
4028 goto fail;
4029 }
4030 } else {
4031 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4032 switch(type) {
4033 case 1:
4034 case 2:
4035 case 3:
4036 case 9:
4037 case 11:
4038 break;
4039 default:
4040 goto fail;
4041 }
4042 if (dpl < cpl || dpl < rpl) {
4043 fail:
4044 CC_SRC = eflags & ~CC_Z;
4045 return 0;
4046 }
4047 }
4048 limit = get_seg_limit(e1, e2);
4049 CC_SRC = eflags | CC_Z;
4050 return limit;
4051}
4052
4053target_ulong helper_lar(target_ulong selector1)
4054{
4055 uint32_t e1, e2, eflags, selector;
4056 int rpl, dpl, cpl, type;
4057
4058 selector = selector1 & 0xffff;
4059 eflags = helper_cc_compute_all(CC_OP);
4060 if ((selector & 0xfffc) == 0)
4061 goto fail;
4062 if (load_segment(&e1, &e2, selector) != 0)
4063 goto fail;
4064 rpl = selector & 3;
4065 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4066 cpl = env->hflags & HF_CPL_MASK;
4067 if (e2 & DESC_S_MASK) {
4068 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4069 /* conforming */
4070 } else {
4071 if (dpl < cpl || dpl < rpl)
4072 goto fail;
4073 }
4074 } else {
4075 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4076 switch(type) {
4077 case 1:
4078 case 2:
4079 case 3:
4080 case 4:
4081 case 5:
4082 case 9:
4083 case 11:
4084 case 12:
4085 break;
4086 default:
4087 goto fail;
4088 }
4089 if (dpl < cpl || dpl < rpl) {
4090 fail:
4091 CC_SRC = eflags & ~CC_Z;
4092 return 0;
4093 }
4094 }
4095 CC_SRC = eflags | CC_Z;
4096 return e2 & 0x00f0ff00;
4097}
4098
4099void helper_verr(target_ulong selector1)
4100{
4101 uint32_t e1, e2, eflags, selector;
4102 int rpl, dpl, cpl;
4103
4104 selector = selector1 & 0xffff;
4105 eflags = helper_cc_compute_all(CC_OP);
4106 if ((selector & 0xfffc) == 0)
4107 goto fail;
4108 if (load_segment(&e1, &e2, selector) != 0)
4109 goto fail;
4110 if (!(e2 & DESC_S_MASK))
4111 goto fail;
4112 rpl = selector & 3;
4113 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4114 cpl = env->hflags & HF_CPL_MASK;
4115 if (e2 & DESC_CS_MASK) {
4116 if (!(e2 & DESC_R_MASK))
4117 goto fail;
4118 if (!(e2 & DESC_C_MASK)) {
4119 if (dpl < cpl || dpl < rpl)
4120 goto fail;
4121 }
4122 } else {
4123 if (dpl < cpl || dpl < rpl) {
4124 fail:
4125 CC_SRC = eflags & ~CC_Z;
4126 return;
4127 }
4128 }
4129 CC_SRC = eflags | CC_Z;
4130}
4131
4132void helper_verw(target_ulong selector1)
4133{
4134 uint32_t e1, e2, eflags, selector;
4135 int rpl, dpl, cpl;
4136
4137 selector = selector1 & 0xffff;
4138 eflags = helper_cc_compute_all(CC_OP);
4139 if ((selector & 0xfffc) == 0)
4140 goto fail;
4141 if (load_segment(&e1, &e2, selector) != 0)
4142 goto fail;
4143 if (!(e2 & DESC_S_MASK))
4144 goto fail;
4145 rpl = selector & 3;
4146 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4147 cpl = env->hflags & HF_CPL_MASK;
4148 if (e2 & DESC_CS_MASK) {
4149 goto fail;
4150 } else {
4151 if (dpl < cpl || dpl < rpl)
4152 goto fail;
4153 if (!(e2 & DESC_W_MASK)) {
4154 fail:
4155 CC_SRC = eflags & ~CC_Z;
4156 return;
4157 }
4158 }
4159 CC_SRC = eflags | CC_Z;
4160}
4161
4162/* x87 FPU helpers */
4163
4164static void fpu_set_exception(int mask)
4165{
4166 env->fpus |= mask;
4167 if (env->fpus & (~env->fpuc & FPUC_EM))
4168 env->fpus |= FPUS_SE | FPUS_B;
4169}
4170
4171static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4172{
4173 if (b == 0.0)
4174 fpu_set_exception(FPUS_ZE);
4175 return a / b;
4176}
4177
4178static void fpu_raise_exception(void)
4179{
4180 if (env->cr[0] & CR0_NE_MASK) {
4181 raise_exception(EXCP10_COPR);
4182 }
4183#if !defined(CONFIG_USER_ONLY)
4184 else {
4185 cpu_set_ferr(env);
4186 }
4187#endif
4188}
4189
4190void helper_flds_FT0(uint32_t val)
4191{
4192 union {
4193 float32 f;
4194 uint32_t i;
4195 } u;
4196 u.i = val;
4197 FT0 = float32_to_floatx(u.f, &env->fp_status);
4198}
4199
4200void helper_fldl_FT0(uint64_t val)
4201{
4202 union {
4203 float64 f;
4204 uint64_t i;
4205 } u;
4206 u.i = val;
4207 FT0 = float64_to_floatx(u.f, &env->fp_status);
4208}
4209
4210void helper_fildl_FT0(int32_t val)
4211{
4212 FT0 = int32_to_floatx(val, &env->fp_status);
4213}
4214
4215void helper_flds_ST0(uint32_t val)
4216{
4217 int new_fpstt;
4218 union {
4219 float32 f;
4220 uint32_t i;
4221 } u;
4222 new_fpstt = (env->fpstt - 1) & 7;
4223 u.i = val;
4224 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4225 env->fpstt = new_fpstt;
4226 env->fptags[new_fpstt] = 0; /* validate stack entry */
4227}
4228
4229void helper_fldl_ST0(uint64_t val)
4230{
4231 int new_fpstt;
4232 union {
4233 float64 f;
4234 uint64_t i;
4235 } u;
4236 new_fpstt = (env->fpstt - 1) & 7;
4237 u.i = val;
4238 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4239 env->fpstt = new_fpstt;
4240 env->fptags[new_fpstt] = 0; /* validate stack entry */
4241}
4242
4243void helper_fildl_ST0(int32_t val)
4244{
4245 int new_fpstt;
4246 new_fpstt = (env->fpstt - 1) & 7;
4247 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4248 env->fpstt = new_fpstt;
4249 env->fptags[new_fpstt] = 0; /* validate stack entry */
4250}
4251
4252void helper_fildll_ST0(int64_t val)
4253{
4254 int new_fpstt;
4255 new_fpstt = (env->fpstt - 1) & 7;
4256 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4257 env->fpstt = new_fpstt;
4258 env->fptags[new_fpstt] = 0; /* validate stack entry */
4259}
4260
4261#ifndef VBOX
4262uint32_t helper_fsts_ST0(void)
4263#else
4264RTCCUINTREG helper_fsts_ST0(void)
4265#endif
4266{
4267 union {
4268 float32 f;
4269 uint32_t i;
4270 } u;
4271 u.f = floatx_to_float32(ST0, &env->fp_status);
4272 return u.i;
4273}
4274
4275uint64_t helper_fstl_ST0(void)
4276{
4277 union {
4278 float64 f;
4279 uint64_t i;
4280 } u;
4281 u.f = floatx_to_float64(ST0, &env->fp_status);
4282 return u.i;
4283}
4284
4285#ifndef VBOX
4286int32_t helper_fist_ST0(void)
4287#else
4288RTCCINTREG helper_fist_ST0(void)
4289#endif
4290{
4291 int32_t val;
4292 val = floatx_to_int32(ST0, &env->fp_status);
4293 if (val != (int16_t)val)
4294 val = -32768;
4295 return val;
4296}
4297
4298#ifndef VBOX
4299int32_t helper_fistl_ST0(void)
4300#else
4301RTCCINTREG helper_fistl_ST0(void)
4302#endif
4303{
4304 int32_t val;
4305 val = floatx_to_int32(ST0, &env->fp_status);
4306 return val;
4307}
4308
4309int64_t helper_fistll_ST0(void)
4310{
4311 int64_t val;
4312 val = floatx_to_int64(ST0, &env->fp_status);
4313 return val;
4314}
4315
4316#ifndef VBOX
4317int32_t helper_fistt_ST0(void)
4318#else
4319RTCCINTREG helper_fistt_ST0(void)
4320#endif
4321{
4322 int32_t val;
4323 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4324 if (val != (int16_t)val)
4325 val = -32768;
4326 return val;
4327}
4328
4329#ifndef VBOX
4330int32_t helper_fisttl_ST0(void)
4331#else
4332RTCCINTREG helper_fisttl_ST0(void)
4333#endif
4334{
4335 int32_t val;
4336 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4337 return val;
4338}
4339
4340int64_t helper_fisttll_ST0(void)
4341{
4342 int64_t val;
4343 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4344 return val;
4345}
4346
4347void helper_fldt_ST0(target_ulong ptr)
4348{
4349 int new_fpstt;
4350 new_fpstt = (env->fpstt - 1) & 7;
4351 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4352 env->fpstt = new_fpstt;
4353 env->fptags[new_fpstt] = 0; /* validate stack entry */
4354}
4355
4356void helper_fstt_ST0(target_ulong ptr)
4357{
4358 helper_fstt(ST0, ptr);
4359}
4360
4361void helper_fpush(void)
4362{
4363 fpush();
4364}
4365
4366void helper_fpop(void)
4367{
4368 fpop();
4369}
4370
4371void helper_fdecstp(void)
4372{
4373 env->fpstt = (env->fpstt - 1) & 7;
4374 env->fpus &= (~0x4700);
4375}
4376
4377void helper_fincstp(void)
4378{
4379 env->fpstt = (env->fpstt + 1) & 7;
4380 env->fpus &= (~0x4700);
4381}
4382
4383/* FPU move */
4384
4385void helper_ffree_STN(int st_index)
4386{
4387 env->fptags[(env->fpstt + st_index) & 7] = 1;
4388}
4389
4390void helper_fmov_ST0_FT0(void)
4391{
4392 ST0 = FT0;
4393}
4394
4395void helper_fmov_FT0_STN(int st_index)
4396{
4397 FT0 = ST(st_index);
4398}
4399
4400void helper_fmov_ST0_STN(int st_index)
4401{
4402 ST0 = ST(st_index);
4403}
4404
4405void helper_fmov_STN_ST0(int st_index)
4406{
4407 ST(st_index) = ST0;
4408}
4409
4410void helper_fxchg_ST0_STN(int st_index)
4411{
4412 CPU86_LDouble tmp;
4413 tmp = ST(st_index);
4414 ST(st_index) = ST0;
4415 ST0 = tmp;
4416}
4417
4418/* FPU operations */
4419
4420static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4421
4422void helper_fcom_ST0_FT0(void)
4423{
4424 int ret;
4425
4426 ret = floatx_compare(ST0, FT0, &env->fp_status);
4427 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4428}
4429
4430void helper_fucom_ST0_FT0(void)
4431{
4432 int ret;
4433
4434 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4435 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4436}
4437
4438static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4439
4440void helper_fcomi_ST0_FT0(void)
4441{
4442 int eflags;
4443 int ret;
4444
4445 ret = floatx_compare(ST0, FT0, &env->fp_status);
4446 eflags = helper_cc_compute_all(CC_OP);
4447 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4448 CC_SRC = eflags;
4449}
4450
4451void helper_fucomi_ST0_FT0(void)
4452{
4453 int eflags;
4454 int ret;
4455
4456 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4457 eflags = helper_cc_compute_all(CC_OP);
4458 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4459 CC_SRC = eflags;
4460}
4461
4462void helper_fadd_ST0_FT0(void)
4463{
4464 ST0 += FT0;
4465}
4466
4467void helper_fmul_ST0_FT0(void)
4468{
4469 ST0 *= FT0;
4470}
4471
4472void helper_fsub_ST0_FT0(void)
4473{
4474 ST0 -= FT0;
4475}
4476
4477void helper_fsubr_ST0_FT0(void)
4478{
4479 ST0 = FT0 - ST0;
4480}
4481
4482void helper_fdiv_ST0_FT0(void)
4483{
4484 ST0 = helper_fdiv(ST0, FT0);
4485}
4486
4487void helper_fdivr_ST0_FT0(void)
4488{
4489 ST0 = helper_fdiv(FT0, ST0);
4490}
4491
4492/* fp operations between STN and ST0 */
4493
4494void helper_fadd_STN_ST0(int st_index)
4495{
4496 ST(st_index) += ST0;
4497}
4498
4499void helper_fmul_STN_ST0(int st_index)
4500{
4501 ST(st_index) *= ST0;
4502}
4503
4504void helper_fsub_STN_ST0(int st_index)
4505{
4506 ST(st_index) -= ST0;
4507}
4508
4509void helper_fsubr_STN_ST0(int st_index)
4510{
4511 CPU86_LDouble *p;
4512 p = &ST(st_index);
4513 *p = ST0 - *p;
4514}
4515
4516void helper_fdiv_STN_ST0(int st_index)
4517{
4518 CPU86_LDouble *p;
4519 p = &ST(st_index);
4520 *p = helper_fdiv(*p, ST0);
4521}
4522
4523void helper_fdivr_STN_ST0(int st_index)
4524{
4525 CPU86_LDouble *p;
4526 p = &ST(st_index);
4527 *p = helper_fdiv(ST0, *p);
4528}
4529
4530/* misc FPU operations */
4531void helper_fchs_ST0(void)
4532{
4533 ST0 = floatx_chs(ST0);
4534}
4535
4536void helper_fabs_ST0(void)
4537{
4538 ST0 = floatx_abs(ST0);
4539}
4540
4541void helper_fld1_ST0(void)
4542{
4543 ST0 = f15rk[1];
4544}
4545
4546void helper_fldl2t_ST0(void)
4547{
4548 ST0 = f15rk[6];
4549}
4550
4551void helper_fldl2e_ST0(void)
4552{
4553 ST0 = f15rk[5];
4554}
4555
4556void helper_fldpi_ST0(void)
4557{
4558 ST0 = f15rk[2];
4559}
4560
4561void helper_fldlg2_ST0(void)
4562{
4563 ST0 = f15rk[3];
4564}
4565
4566void helper_fldln2_ST0(void)
4567{
4568 ST0 = f15rk[4];
4569}
4570
4571void helper_fldz_ST0(void)
4572{
4573 ST0 = f15rk[0];
4574}
4575
4576void helper_fldz_FT0(void)
4577{
4578 FT0 = f15rk[0];
4579}
4580
4581#ifndef VBOX
4582uint32_t helper_fnstsw(void)
4583#else
4584RTCCUINTREG helper_fnstsw(void)
4585#endif
4586{
4587 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4588}
4589
4590#ifndef VBOX
4591uint32_t helper_fnstcw(void)
4592#else
4593RTCCUINTREG helper_fnstcw(void)
4594#endif
4595{
4596 return env->fpuc;
4597}
4598
4599static void update_fp_status(void)
4600{
4601 int rnd_type;
4602
4603 /* set rounding mode */
4604 switch(env->fpuc & RC_MASK) {
4605 default:
4606 case RC_NEAR:
4607 rnd_type = float_round_nearest_even;
4608 break;
4609 case RC_DOWN:
4610 rnd_type = float_round_down;
4611 break;
4612 case RC_UP:
4613 rnd_type = float_round_up;
4614 break;
4615 case RC_CHOP:
4616 rnd_type = float_round_to_zero;
4617 break;
4618 }
4619 set_float_rounding_mode(rnd_type, &env->fp_status);
4620#ifdef FLOATX80
4621 switch((env->fpuc >> 8) & 3) {
4622 case 0:
4623 rnd_type = 32;
4624 break;
4625 case 2:
4626 rnd_type = 64;
4627 break;
4628 case 3:
4629 default:
4630 rnd_type = 80;
4631 break;
4632 }
4633 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4634#endif
4635}
4636
4637void helper_fldcw(uint32_t val)
4638{
4639 env->fpuc = val;
4640 update_fp_status();
4641}
4642
4643void helper_fclex(void)
4644{
4645 env->fpus &= 0x7f00;
4646}
4647
4648void helper_fwait(void)
4649{
4650 if (env->fpus & FPUS_SE)
4651 fpu_raise_exception();
4652}
4653
4654void helper_fninit(void)
4655{
4656 env->fpus = 0;
4657 env->fpstt = 0;
4658 env->fpuc = 0x37f;
4659 env->fptags[0] = 1;
4660 env->fptags[1] = 1;
4661 env->fptags[2] = 1;
4662 env->fptags[3] = 1;
4663 env->fptags[4] = 1;
4664 env->fptags[5] = 1;
4665 env->fptags[6] = 1;
4666 env->fptags[7] = 1;
4667}
4668
4669/* BCD ops */
4670
4671void helper_fbld_ST0(target_ulong ptr)
4672{
4673 CPU86_LDouble tmp;
4674 uint64_t val;
4675 unsigned int v;
4676 int i;
4677
4678 val = 0;
4679 for(i = 8; i >= 0; i--) {
4680 v = ldub(ptr + i);
4681 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4682 }
4683 tmp = val;
4684 if (ldub(ptr + 9) & 0x80)
4685 tmp = -tmp;
4686 fpush();
4687 ST0 = tmp;
4688}
4689
4690void helper_fbst_ST0(target_ulong ptr)
4691{
4692 int v;
4693 target_ulong mem_ref, mem_end;
4694 int64_t val;
4695
4696 val = floatx_to_int64(ST0, &env->fp_status);
4697 mem_ref = ptr;
4698 mem_end = mem_ref + 9;
4699 if (val < 0) {
4700 stb(mem_end, 0x80);
4701 val = -val;
4702 } else {
4703 stb(mem_end, 0x00);
4704 }
4705 while (mem_ref < mem_end) {
4706 if (val == 0)
4707 break;
4708 v = val % 100;
4709 val = val / 100;
4710 v = ((v / 10) << 4) | (v % 10);
4711 stb(mem_ref++, v);
4712 }
4713 while (mem_ref < mem_end) {
4714 stb(mem_ref++, 0);
4715 }
4716}
4717
4718void helper_f2xm1(void)
4719{
4720 ST0 = pow(2.0,ST0) - 1.0;
4721}
4722
4723void helper_fyl2x(void)
4724{
4725 CPU86_LDouble fptemp;
4726
4727 fptemp = ST0;
4728 if (fptemp>0.0){
4729 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4730 ST1 *= fptemp;
4731 fpop();
4732 } else {
4733 env->fpus &= (~0x4700);
4734 env->fpus |= 0x400;
4735 }
4736}
4737
4738void helper_fptan(void)
4739{
4740 CPU86_LDouble fptemp;
4741
4742 fptemp = ST0;
4743 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4744 env->fpus |= 0x400;
4745 } else {
4746 ST0 = tan(fptemp);
4747 fpush();
4748 ST0 = 1.0;
4749 env->fpus &= (~0x400); /* C2 <-- 0 */
4750 /* the above code is for |arg| < 2**52 only */
4751 }
4752}
4753
4754void helper_fpatan(void)
4755{
4756 CPU86_LDouble fptemp, fpsrcop;
4757
4758 fpsrcop = ST1;
4759 fptemp = ST0;
4760 ST1 = atan2(fpsrcop,fptemp);
4761 fpop();
4762}
4763
4764void helper_fxtract(void)
4765{
4766 CPU86_LDoubleU temp;
4767 unsigned int expdif;
4768
4769 temp.d = ST0;
4770 expdif = EXPD(temp) - EXPBIAS;
4771 /*DP exponent bias*/
4772 ST0 = expdif;
4773 fpush();
4774 BIASEXPONENT(temp);
4775 ST0 = temp.d;
4776}
4777
4778void helper_fprem1(void)
4779{
4780 CPU86_LDouble dblq, fpsrcop, fptemp;
4781 CPU86_LDoubleU fpsrcop1, fptemp1;
4782 int expdif;
4783 signed long long int q;
4784
4785#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4786 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4787#else
4788 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4789#endif
4790 ST0 = 0.0 / 0.0; /* NaN */
4791 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4792 return;
4793 }
4794
4795 fpsrcop = ST0;
4796 fptemp = ST1;
4797 fpsrcop1.d = fpsrcop;
4798 fptemp1.d = fptemp;
4799 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4800
4801 if (expdif < 0) {
4802 /* optimisation? taken from the AMD docs */
4803 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4804 /* ST0 is unchanged */
4805 return;
4806 }
4807
4808 if (expdif < 53) {
4809 dblq = fpsrcop / fptemp;
4810 /* round dblq towards nearest integer */
4811 dblq = rint(dblq);
4812 ST0 = fpsrcop - fptemp * dblq;
4813
4814 /* convert dblq to q by truncating towards zero */
4815 if (dblq < 0.0)
4816 q = (signed long long int)(-dblq);
4817 else
4818 q = (signed long long int)dblq;
4819
4820 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4821 /* (C0,C3,C1) <-- (q2,q1,q0) */
4822 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4823 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4824 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4825 } else {
4826 env->fpus |= 0x400; /* C2 <-- 1 */
4827 fptemp = pow(2.0, expdif - 50);
4828 fpsrcop = (ST0 / ST1) / fptemp;
4829 /* fpsrcop = integer obtained by chopping */
4830 fpsrcop = (fpsrcop < 0.0) ?
4831 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4832 ST0 -= (ST1 * fpsrcop * fptemp);
4833 }
4834}
4835
4836void helper_fprem(void)
4837{
4838 CPU86_LDouble dblq, fpsrcop, fptemp;
4839 CPU86_LDoubleU fpsrcop1, fptemp1;
4840 int expdif;
4841 signed long long int q;
4842
4843#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4844 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4845#else
4846 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4847#endif
4848 ST0 = 0.0 / 0.0; /* NaN */
4849 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4850 return;
4851 }
4852
4853 fpsrcop = (CPU86_LDouble)ST0;
4854 fptemp = (CPU86_LDouble)ST1;
4855 fpsrcop1.d = fpsrcop;
4856 fptemp1.d = fptemp;
4857 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4858
4859 if (expdif < 0) {
4860 /* optimisation? taken from the AMD docs */
4861 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4862 /* ST0 is unchanged */
4863 return;
4864 }
4865
4866 if ( expdif < 53 ) {
4867 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4868 /* round dblq towards zero */
4869 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4870 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4871
4872 /* convert dblq to q by truncating towards zero */
4873 if (dblq < 0.0)
4874 q = (signed long long int)(-dblq);
4875 else
4876 q = (signed long long int)dblq;
4877
4878 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4879 /* (C0,C3,C1) <-- (q2,q1,q0) */
4880 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4881 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4882 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4883 } else {
4884 int N = 32 + (expdif % 32); /* as per AMD docs */
4885 env->fpus |= 0x400; /* C2 <-- 1 */
4886 fptemp = pow(2.0, (double)(expdif - N));
4887 fpsrcop = (ST0 / ST1) / fptemp;
4888 /* fpsrcop = integer obtained by chopping */
4889 fpsrcop = (fpsrcop < 0.0) ?
4890 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4891 ST0 -= (ST1 * fpsrcop * fptemp);
4892 }
4893}
4894
4895void helper_fyl2xp1(void)
4896{
4897 CPU86_LDouble fptemp;
4898
4899 fptemp = ST0;
4900 if ((fptemp+1.0)>0.0) {
4901 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4902 ST1 *= fptemp;
4903 fpop();
4904 } else {
4905 env->fpus &= (~0x4700);
4906 env->fpus |= 0x400;
4907 }
4908}
4909
4910void helper_fsqrt(void)
4911{
4912 CPU86_LDouble fptemp;
4913
4914 fptemp = ST0;
4915 if (fptemp<0.0) {
4916 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4917 env->fpus |= 0x400;
4918 }
4919 ST0 = sqrt(fptemp);
4920}
4921
4922void helper_fsincos(void)
4923{
4924 CPU86_LDouble fptemp;
4925
4926 fptemp = ST0;
4927 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4928 env->fpus |= 0x400;
4929 } else {
4930 ST0 = sin(fptemp);
4931 fpush();
4932 ST0 = cos(fptemp);
4933 env->fpus &= (~0x400); /* C2 <-- 0 */
4934 /* the above code is for |arg| < 2**63 only */
4935 }
4936}
4937
4938void helper_frndint(void)
4939{
4940 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4941}
4942
4943void helper_fscale(void)
4944{
4945 ST0 = ldexp (ST0, (int)(ST1));
4946}
4947
4948void helper_fsin(void)
4949{
4950 CPU86_LDouble fptemp;
4951
4952 fptemp = ST0;
4953 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4954 env->fpus |= 0x400;
4955 } else {
4956 ST0 = sin(fptemp);
4957 env->fpus &= (~0x400); /* C2 <-- 0 */
4958 /* the above code is for |arg| < 2**53 only */
4959 }
4960}
4961
4962void helper_fcos(void)
4963{
4964 CPU86_LDouble fptemp;
4965
4966 fptemp = ST0;
4967 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4968 env->fpus |= 0x400;
4969 } else {
4970 ST0 = cos(fptemp);
4971 env->fpus &= (~0x400); /* C2 <-- 0 */
4972 /* the above code is for |arg5 < 2**63 only */
4973 }
4974}
4975
4976void helper_fxam_ST0(void)
4977{
4978 CPU86_LDoubleU temp;
4979 int expdif;
4980
4981 temp.d = ST0;
4982
4983 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4984 if (SIGND(temp))
4985 env->fpus |= 0x200; /* C1 <-- 1 */
4986
4987 /* XXX: test fptags too */
4988 expdif = EXPD(temp);
4989 if (expdif == MAXEXPD) {
4990#ifdef USE_X86LDOUBLE
4991 if (MANTD(temp) == 0x8000000000000000ULL)
4992#else
4993 if (MANTD(temp) == 0)
4994#endif
4995 env->fpus |= 0x500 /*Infinity*/;
4996 else
4997 env->fpus |= 0x100 /*NaN*/;
4998 } else if (expdif == 0) {
4999 if (MANTD(temp) == 0)
5000 env->fpus |= 0x4000 /*Zero*/;
5001 else
5002 env->fpus |= 0x4400 /*Denormal*/;
5003 } else {
5004 env->fpus |= 0x400;
5005 }
5006}
5007
5008void helper_fstenv(target_ulong ptr, int data32)
5009{
5010 int fpus, fptag, exp, i;
5011 uint64_t mant;
5012 CPU86_LDoubleU tmp;
5013
5014 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5015 fptag = 0;
5016 for (i=7; i>=0; i--) {
5017 fptag <<= 2;
5018 if (env->fptags[i]) {
5019 fptag |= 3;
5020 } else {
5021 tmp.d = env->fpregs[i].d;
5022 exp = EXPD(tmp);
5023 mant = MANTD(tmp);
5024 if (exp == 0 && mant == 0) {
5025 /* zero */
5026 fptag |= 1;
5027 } else if (exp == 0 || exp == MAXEXPD
5028#ifdef USE_X86LDOUBLE
5029 || (mant & (1LL << 63)) == 0
5030#endif
5031 ) {
5032 /* NaNs, infinity, denormal */
5033 fptag |= 2;
5034 }
5035 }
5036 }
5037 if (data32) {
5038 /* 32 bit */
5039 stl(ptr, env->fpuc);
5040 stl(ptr + 4, fpus);
5041 stl(ptr + 8, fptag);
5042 stl(ptr + 12, 0); /* fpip */
5043 stl(ptr + 16, 0); /* fpcs */
5044 stl(ptr + 20, 0); /* fpoo */
5045 stl(ptr + 24, 0); /* fpos */
5046 } else {
5047 /* 16 bit */
5048 stw(ptr, env->fpuc);
5049 stw(ptr + 2, fpus);
5050 stw(ptr + 4, fptag);
5051 stw(ptr + 6, 0);
5052 stw(ptr + 8, 0);
5053 stw(ptr + 10, 0);
5054 stw(ptr + 12, 0);
5055 }
5056}
5057
5058void helper_fldenv(target_ulong ptr, int data32)
5059{
5060 int i, fpus, fptag;
5061
5062 if (data32) {
5063 env->fpuc = lduw(ptr);
5064 fpus = lduw(ptr + 4);
5065 fptag = lduw(ptr + 8);
5066 }
5067 else {
5068 env->fpuc = lduw(ptr);
5069 fpus = lduw(ptr + 2);
5070 fptag = lduw(ptr + 4);
5071 }
5072 env->fpstt = (fpus >> 11) & 7;
5073 env->fpus = fpus & ~0x3800;
5074 for(i = 0;i < 8; i++) {
5075 env->fptags[i] = ((fptag & 3) == 3);
5076 fptag >>= 2;
5077 }
5078}
5079
5080void helper_fsave(target_ulong ptr, int data32)
5081{
5082 CPU86_LDouble tmp;
5083 int i;
5084
5085 helper_fstenv(ptr, data32);
5086
5087 ptr += (14 << data32);
5088 for(i = 0;i < 8; i++) {
5089 tmp = ST(i);
5090 helper_fstt(tmp, ptr);
5091 ptr += 10;
5092 }
5093
5094 /* fninit */
5095 env->fpus = 0;
5096 env->fpstt = 0;
5097 env->fpuc = 0x37f;
5098 env->fptags[0] = 1;
5099 env->fptags[1] = 1;
5100 env->fptags[2] = 1;
5101 env->fptags[3] = 1;
5102 env->fptags[4] = 1;
5103 env->fptags[5] = 1;
5104 env->fptags[6] = 1;
5105 env->fptags[7] = 1;
5106}
5107
5108void helper_frstor(target_ulong ptr, int data32)
5109{
5110 CPU86_LDouble tmp;
5111 int i;
5112
5113 helper_fldenv(ptr, data32);
5114 ptr += (14 << data32);
5115
5116 for(i = 0;i < 8; i++) {
5117 tmp = helper_fldt(ptr);
5118 ST(i) = tmp;
5119 ptr += 10;
5120 }
5121}
5122
5123void helper_fxsave(target_ulong ptr, int data64)
5124{
5125 int fpus, fptag, i, nb_xmm_regs;
5126 CPU86_LDouble tmp;
5127 target_ulong addr;
5128
5129 /* The operand must be 16 byte aligned */
5130 if (ptr & 0xf) {
5131 raise_exception(EXCP0D_GPF);
5132 }
5133
5134 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5135 fptag = 0;
5136 for(i = 0; i < 8; i++) {
5137 fptag |= (env->fptags[i] << i);
5138 }
5139 stw(ptr, env->fpuc);
5140 stw(ptr + 2, fpus);
5141 stw(ptr + 4, fptag ^ 0xff);
5142#ifdef TARGET_X86_64
5143 if (data64) {
5144 stq(ptr + 0x08, 0); /* rip */
5145 stq(ptr + 0x10, 0); /* rdp */
5146 } else
5147#endif
5148 {
5149 stl(ptr + 0x08, 0); /* eip */
5150 stl(ptr + 0x0c, 0); /* sel */
5151 stl(ptr + 0x10, 0); /* dp */
5152 stl(ptr + 0x14, 0); /* sel */
5153 }
5154
5155 addr = ptr + 0x20;
5156 for(i = 0;i < 8; i++) {
5157 tmp = ST(i);
5158 helper_fstt(tmp, addr);
5159 addr += 16;
5160 }
5161
5162 if (env->cr[4] & CR4_OSFXSR_MASK) {
5163 /* XXX: finish it */
5164 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5165 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5166 if (env->hflags & HF_CS64_MASK)
5167 nb_xmm_regs = 16;
5168 else
5169 nb_xmm_regs = 8;
5170 addr = ptr + 0xa0;
5171 /* Fast FXSAVE leaves out the XMM registers */
5172 if (!(env->efer & MSR_EFER_FFXSR)
5173 || (env->hflags & HF_CPL_MASK)
5174 || !(env->hflags & HF_LMA_MASK)) {
5175 for(i = 0; i < nb_xmm_regs; i++) {
5176 stq(addr, env->xmm_regs[i].XMM_Q(0));
5177 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5178 addr += 16;
5179 }
5180 }
5181 }
5182}
5183
5184void helper_fxrstor(target_ulong ptr, int data64)
5185{
5186 int i, fpus, fptag, nb_xmm_regs;
5187 CPU86_LDouble tmp;
5188 target_ulong addr;
5189
5190 /* The operand must be 16 byte aligned */
5191 if (ptr & 0xf) {
5192 raise_exception(EXCP0D_GPF);
5193 }
5194
5195 env->fpuc = lduw(ptr);
5196 fpus = lduw(ptr + 2);
5197 fptag = lduw(ptr + 4);
5198 env->fpstt = (fpus >> 11) & 7;
5199 env->fpus = fpus & ~0x3800;
5200 fptag ^= 0xff;
5201 for(i = 0;i < 8; i++) {
5202 env->fptags[i] = ((fptag >> i) & 1);
5203 }
5204
5205 addr = ptr + 0x20;
5206 for(i = 0;i < 8; i++) {
5207 tmp = helper_fldt(addr);
5208 ST(i) = tmp;
5209 addr += 16;
5210 }
5211
5212 if (env->cr[4] & CR4_OSFXSR_MASK) {
5213 /* XXX: finish it */
5214 env->mxcsr = ldl(ptr + 0x18);
5215 //ldl(ptr + 0x1c);
5216 if (env->hflags & HF_CS64_MASK)
5217 nb_xmm_regs = 16;
5218 else
5219 nb_xmm_regs = 8;
5220 addr = ptr + 0xa0;
5221 /* Fast FXRESTORE leaves out the XMM registers */
5222 if (!(env->efer & MSR_EFER_FFXSR)
5223 || (env->hflags & HF_CPL_MASK)
5224 || !(env->hflags & HF_LMA_MASK)) {
5225 for(i = 0; i < nb_xmm_regs; i++) {
5226#if !defined(VBOX) || __GNUC__ < 4
5227 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5228 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5229#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5230# if 1
5231 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5232 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5233 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5234 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5235# else
5236 /* this works fine on Mac OS X, gcc 4.0.1 */
5237 uint64_t u64 = ldq(addr);
5238 env->xmm_regs[i].XMM_Q(0);
5239 u64 = ldq(addr + 4);
5240 env->xmm_regs[i].XMM_Q(1) = u64;
5241# endif
5242#endif
5243 addr += 16;
5244 }
5245 }
5246 }
5247}
5248
5249#ifndef USE_X86LDOUBLE
5250
5251void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5252{
5253 CPU86_LDoubleU temp;
5254 int e;
5255
5256 temp.d = f;
5257 /* mantissa */
5258 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5259 /* exponent + sign */
5260 e = EXPD(temp) - EXPBIAS + 16383;
5261 e |= SIGND(temp) >> 16;
5262 *pexp = e;
5263}
5264
5265CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5266{
5267 CPU86_LDoubleU temp;
5268 int e;
5269 uint64_t ll;
5270
5271 /* XXX: handle overflow ? */
5272 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5273 e |= (upper >> 4) & 0x800; /* sign */
5274 ll = (mant >> 11) & ((1LL << 52) - 1);
5275#ifdef __arm__
5276 temp.l.upper = (e << 20) | (ll >> 32);
5277 temp.l.lower = ll;
5278#else
5279 temp.ll = ll | ((uint64_t)e << 52);
5280#endif
5281 return temp.d;
5282}
5283
5284#else
5285
5286void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5287{
5288 CPU86_LDoubleU temp;
5289
5290 temp.d = f;
5291 *pmant = temp.l.lower;
5292 *pexp = temp.l.upper;
5293}
5294
5295CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5296{
5297 CPU86_LDoubleU temp;
5298
5299 temp.l.upper = upper;
5300 temp.l.lower = mant;
5301 return temp.d;
5302}
5303#endif
5304
5305#ifdef TARGET_X86_64
5306
5307//#define DEBUG_MULDIV
5308
5309static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5310{
5311 *plow += a;
5312 /* carry test */
5313 if (*plow < a)
5314 (*phigh)++;
5315 *phigh += b;
5316}
5317
5318static void neg128(uint64_t *plow, uint64_t *phigh)
5319{
5320 *plow = ~ *plow;
5321 *phigh = ~ *phigh;
5322 add128(plow, phigh, 1, 0);
5323}
5324
5325/* return TRUE if overflow */
5326static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5327{
5328 uint64_t q, r, a1, a0;
5329 int i, qb, ab;
5330
5331 a0 = *plow;
5332 a1 = *phigh;
5333 if (a1 == 0) {
5334 q = a0 / b;
5335 r = a0 % b;
5336 *plow = q;
5337 *phigh = r;
5338 } else {
5339 if (a1 >= b)
5340 return 1;
5341 /* XXX: use a better algorithm */
5342 for(i = 0; i < 64; i++) {
5343 ab = a1 >> 63;
5344 a1 = (a1 << 1) | (a0 >> 63);
5345 if (ab || a1 >= b) {
5346 a1 -= b;
5347 qb = 1;
5348 } else {
5349 qb = 0;
5350 }
5351 a0 = (a0 << 1) | qb;
5352 }
5353#if defined(DEBUG_MULDIV)
5354 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5355 *phigh, *plow, b, a0, a1);
5356#endif
5357 *plow = a0;
5358 *phigh = a1;
5359 }
5360 return 0;
5361}
5362
5363/* return TRUE if overflow */
5364static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5365{
5366 int sa, sb;
5367 sa = ((int64_t)*phigh < 0);
5368 if (sa)
5369 neg128(plow, phigh);
5370 sb = (b < 0);
5371 if (sb)
5372 b = -b;
5373 if (div64(plow, phigh, b) != 0)
5374 return 1;
5375 if (sa ^ sb) {
5376 if (*plow > (1ULL << 63))
5377 return 1;
5378 *plow = - *plow;
5379 } else {
5380 if (*plow >= (1ULL << 63))
5381 return 1;
5382 }
5383 if (sa)
5384 *phigh = - *phigh;
5385 return 0;
5386}
5387
5388void helper_mulq_EAX_T0(target_ulong t0)
5389{
5390 uint64_t r0, r1;
5391
5392 mulu64(&r0, &r1, EAX, t0);
5393 EAX = r0;
5394 EDX = r1;
5395 CC_DST = r0;
5396 CC_SRC = r1;
5397}
5398
5399void helper_imulq_EAX_T0(target_ulong t0)
5400{
5401 uint64_t r0, r1;
5402
5403 muls64(&r0, &r1, EAX, t0);
5404 EAX = r0;
5405 EDX = r1;
5406 CC_DST = r0;
5407 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5408}
5409
5410target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5411{
5412 uint64_t r0, r1;
5413
5414 muls64(&r0, &r1, t0, t1);
5415 CC_DST = r0;
5416 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5417 return r0;
5418}
5419
5420void helper_divq_EAX(target_ulong t0)
5421{
5422 uint64_t r0, r1;
5423 if (t0 == 0) {
5424 raise_exception(EXCP00_DIVZ);
5425 }
5426 r0 = EAX;
5427 r1 = EDX;
5428 if (div64(&r0, &r1, t0))
5429 raise_exception(EXCP00_DIVZ);
5430 EAX = r0;
5431 EDX = r1;
5432}
5433
5434void helper_idivq_EAX(target_ulong t0)
5435{
5436 uint64_t r0, r1;
5437 if (t0 == 0) {
5438 raise_exception(EXCP00_DIVZ);
5439 }
5440 r0 = EAX;
5441 r1 = EDX;
5442 if (idiv64(&r0, &r1, t0))
5443 raise_exception(EXCP00_DIVZ);
5444 EAX = r0;
5445 EDX = r1;
5446}
5447#endif
5448
5449static void do_hlt(void)
5450{
5451 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5452 env->halted = 1;
5453 env->exception_index = EXCP_HLT;
5454 cpu_loop_exit();
5455}
5456
5457void helper_hlt(int next_eip_addend)
5458{
5459 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5460 EIP += next_eip_addend;
5461
5462 do_hlt();
5463}
5464
5465void helper_monitor(target_ulong ptr)
5466{
5467#ifdef VBOX
5468 if ((uint32_t)ECX > 1)
5469 raise_exception(EXCP0D_GPF);
5470#else /* !VBOX */
5471 if ((uint32_t)ECX != 0)
5472 raise_exception(EXCP0D_GPF);
5473#endif /* !VBOX */
5474 /* XXX: store address ? */
5475 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5476}
5477
5478void helper_mwait(int next_eip_addend)
5479{
5480 if ((uint32_t)ECX != 0)
5481 raise_exception(EXCP0D_GPF);
5482#ifdef VBOX
5483 helper_hlt(next_eip_addend);
5484#else /* !VBOX */
5485 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5486 EIP += next_eip_addend;
5487
5488 /* XXX: not complete but not completely erroneous */
5489 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5490 /* more than one CPU: do not sleep because another CPU may
5491 wake this one */
5492 } else {
5493 do_hlt();
5494 }
5495#endif /* !VBOX */
5496}
5497
5498void helper_debug(void)
5499{
5500 env->exception_index = EXCP_DEBUG;
5501 cpu_loop_exit();
5502}
5503
5504void helper_reset_rf(void)
5505{
5506 env->eflags &= ~RF_MASK;
5507}
5508
5509void helper_raise_interrupt(int intno, int next_eip_addend)
5510{
5511 raise_interrupt(intno, 1, 0, next_eip_addend);
5512}
5513
5514void helper_raise_exception(int exception_index)
5515{
5516 raise_exception(exception_index);
5517}
5518
5519void helper_cli(void)
5520{
5521 env->eflags &= ~IF_MASK;
5522}
5523
5524void helper_sti(void)
5525{
5526 env->eflags |= IF_MASK;
5527}
5528
5529#ifdef VBOX
5530void helper_cli_vme(void)
5531{
5532 env->eflags &= ~VIF_MASK;
5533}
5534
5535void helper_sti_vme(void)
5536{
5537 /* First check, then change eflags according to the AMD manual */
5538 if (env->eflags & VIP_MASK) {
5539 raise_exception(EXCP0D_GPF);
5540 }
5541 env->eflags |= VIF_MASK;
5542}
5543#endif /* VBOX */
5544
5545#if 0
5546/* vm86plus instructions */
5547void helper_cli_vm(void)
5548{
5549 env->eflags &= ~VIF_MASK;
5550}
5551
5552void helper_sti_vm(void)
5553{
5554 env->eflags |= VIF_MASK;
5555 if (env->eflags & VIP_MASK) {
5556 raise_exception(EXCP0D_GPF);
5557 }
5558}
5559#endif
5560
5561void helper_set_inhibit_irq(void)
5562{
5563 env->hflags |= HF_INHIBIT_IRQ_MASK;
5564}
5565
5566void helper_reset_inhibit_irq(void)
5567{
5568 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5569}
5570
5571void helper_boundw(target_ulong a0, int v)
5572{
5573 int low, high;
5574 low = ldsw(a0);
5575 high = ldsw(a0 + 2);
5576 v = (int16_t)v;
5577 if (v < low || v > high) {
5578 raise_exception(EXCP05_BOUND);
5579 }
5580}
5581
5582void helper_boundl(target_ulong a0, int v)
5583{
5584 int low, high;
5585 low = ldl(a0);
5586 high = ldl(a0 + 4);
5587 if (v < low || v > high) {
5588 raise_exception(EXCP05_BOUND);
5589 }
5590}
5591
5592static float approx_rsqrt(float a)
5593{
5594 return 1.0 / sqrt(a);
5595}
5596
5597static float approx_rcp(float a)
5598{
5599 return 1.0 / a;
5600}
5601
5602#if !defined(CONFIG_USER_ONLY)
5603
5604#define MMUSUFFIX _mmu
5605
5606#define SHIFT 0
5607#include "softmmu_template.h"
5608
5609#define SHIFT 1
5610#include "softmmu_template.h"
5611
5612#define SHIFT 2
5613#include "softmmu_template.h"
5614
5615#define SHIFT 3
5616#include "softmmu_template.h"
5617
5618#endif
5619
5620#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5621/* This code assumes real physical address always fit into host CPU reg,
5622 which is wrong in general, but true for our current use cases. */
5623RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5624{
5625 return remR3PhysReadS8(addr);
5626}
5627RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5628{
5629 return remR3PhysReadU8(addr);
5630}
5631void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5632{
5633 remR3PhysWriteU8(addr, val);
5634}
5635RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5636{
5637 return remR3PhysReadS16(addr);
5638}
5639RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5640{
5641 return remR3PhysReadU16(addr);
5642}
5643void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5644{
5645 remR3PhysWriteU16(addr, val);
5646}
5647RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5648{
5649 return remR3PhysReadS32(addr);
5650}
5651RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5652{
5653 return remR3PhysReadU32(addr);
5654}
5655void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5656{
5657 remR3PhysWriteU32(addr, val);
5658}
5659uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5660{
5661 return remR3PhysReadU64(addr);
5662}
5663void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5664{
5665 remR3PhysWriteU64(addr, val);
5666}
5667#endif /* VBOX */
5668
5669#if !defined(CONFIG_USER_ONLY)
5670/* try to fill the TLB and return an exception if error. If retaddr is
5671 NULL, it means that the function was called in C code (i.e. not
5672 from generated code or from helper.c) */
5673/* XXX: fix it to restore all registers */
5674void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5675{
5676 TranslationBlock *tb;
5677 int ret;
5678 uintptr_t pc;
5679 CPUX86State *saved_env;
5680
5681 /* XXX: hack to restore env in all cases, even if not called from
5682 generated code */
5683 saved_env = env;
5684 env = cpu_single_env;
5685
5686 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5687 if (ret) {
5688 if (retaddr) {
5689 /* now we have a real cpu fault */
5690 pc = (uintptr_t)retaddr;
5691 tb = tb_find_pc(pc);
5692 if (tb) {
5693 /* the PC is inside the translated code. It means that we have
5694 a virtual CPU fault */
5695 cpu_restore_state(tb, env, pc, NULL);
5696 }
5697 }
5698 raise_exception_err(env->exception_index, env->error_code);
5699 }
5700 env = saved_env;
5701}
5702#endif
5703
5704#ifdef VBOX
5705
5706/**
5707 * Correctly computes the eflags.
5708 * @returns eflags.
5709 * @param env1 CPU environment.
5710 */
5711uint32_t raw_compute_eflags(CPUX86State *env1)
5712{
5713 CPUX86State *savedenv = env;
5714 uint32_t efl;
5715 env = env1;
5716 efl = compute_eflags();
5717 env = savedenv;
5718 return efl;
5719}
5720
5721/**
5722 * Reads byte from virtual address in guest memory area.
5723 * XXX: is it working for any addresses? swapped out pages?
5724 * @returns read data byte.
5725 * @param env1 CPU environment.
5726 * @param pvAddr GC Virtual address.
5727 */
5728uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5729{
5730 CPUX86State *savedenv = env;
5731 uint8_t u8;
5732 env = env1;
5733 u8 = ldub_kernel(addr);
5734 env = savedenv;
5735 return u8;
5736}
5737
5738/**
5739 * Reads byte from virtual address in guest memory area.
5740 * XXX: is it working for any addresses? swapped out pages?
5741 * @returns read data byte.
5742 * @param env1 CPU environment.
5743 * @param pvAddr GC Virtual address.
5744 */
5745uint16_t read_word(CPUX86State *env1, target_ulong addr)
5746{
5747 CPUX86State *savedenv = env;
5748 uint16_t u16;
5749 env = env1;
5750 u16 = lduw_kernel(addr);
5751 env = savedenv;
5752 return u16;
5753}
5754
5755/**
5756 * Reads byte from virtual address in guest memory area.
5757 * XXX: is it working for any addresses? swapped out pages?
5758 * @returns read data byte.
5759 * @param env1 CPU environment.
5760 * @param pvAddr GC Virtual address.
5761 */
5762uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5763{
5764 CPUX86State *savedenv = env;
5765 uint32_t u32;
5766 env = env1;
5767 u32 = ldl_kernel(addr);
5768 env = savedenv;
5769 return u32;
5770}
5771
5772/**
5773 * Writes byte to virtual address in guest memory area.
5774 * XXX: is it working for any addresses? swapped out pages?
5775 * @returns read data byte.
5776 * @param env1 CPU environment.
5777 * @param pvAddr GC Virtual address.
5778 * @param val byte value
5779 */
5780void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5781{
5782 CPUX86State *savedenv = env;
5783 env = env1;
5784 stb(addr, val);
5785 env = savedenv;
5786}
5787
5788void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5789{
5790 CPUX86State *savedenv = env;
5791 env = env1;
5792 stw(addr, val);
5793 env = savedenv;
5794}
5795
5796void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5797{
5798 CPUX86State *savedenv = env;
5799 env = env1;
5800 stl(addr, val);
5801 env = savedenv;
5802}
5803
5804/**
5805 * Correctly loads selector into segment register with updating internal
5806 * qemu data/caches.
5807 * @param env1 CPU environment.
5808 * @param seg_reg Segment register.
5809 * @param selector Selector to load.
5810 */
5811void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5812{
5813 CPUX86State *savedenv = env;
5814#ifdef FORCE_SEGMENT_SYNC
5815 jmp_buf old_buf;
5816#endif
5817
5818 env = env1;
5819
5820 if ( env->eflags & X86_EFL_VM
5821 || !(env->cr[0] & X86_CR0_PE))
5822 {
5823 load_seg_vm(seg_reg, selector);
5824
5825 env = savedenv;
5826
5827 /* Successful sync. */
5828 Assert(env1->segs[seg_reg].newselector == 0);
5829 }
5830 else
5831 {
5832 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5833 time critical - let's not do that */
5834#ifdef FORCE_SEGMENT_SYNC
5835 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5836#endif
5837 if (setjmp(env1->jmp_env) == 0)
5838 {
5839 if (seg_reg == R_CS)
5840 {
5841 uint32_t e1, e2;
5842 e1 = e2 = 0;
5843 load_segment(&e1, &e2, selector);
5844 cpu_x86_load_seg_cache(env, R_CS, selector,
5845 get_seg_base(e1, e2),
5846 get_seg_limit(e1, e2),
5847 e2);
5848 }
5849 else
5850 helper_load_seg(seg_reg, selector);
5851 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5852 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5853
5854 env = savedenv;
5855
5856 /* Successful sync. */
5857 Assert(env1->segs[seg_reg].newselector == 0);
5858 }
5859 else
5860 {
5861 env = savedenv;
5862
5863 /* Postpone sync until the guest uses the selector. */
5864 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5865 env1->segs[seg_reg].newselector = selector;
5866 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5867 env1->exception_index = -1;
5868 env1->error_code = 0;
5869 env1->old_exception = -1;
5870 }
5871#ifdef FORCE_SEGMENT_SYNC
5872 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5873#endif
5874 }
5875
5876}
5877
5878DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5879{
5880 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5881}
5882
5883
5884int emulate_single_instr(CPUX86State *env1)
5885{
5886 TranslationBlock *tb;
5887 TranslationBlock *current;
5888 int flags;
5889 uint8_t *tc_ptr;
5890 target_ulong old_eip;
5891
5892 /* ensures env is loaded! */
5893 CPUX86State *savedenv = env;
5894 env = env1;
5895
5896 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5897
5898 current = env->current_tb;
5899 env->current_tb = NULL;
5900 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5901
5902 /*
5903 * Translate only one instruction.
5904 */
5905 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5906 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5907 env->segs[R_CS].base, flags, 0);
5908
5909 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5910
5911
5912 /* tb_link_phys: */
5913 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5914 tb->jmp_next[0] = NULL;
5915 tb->jmp_next[1] = NULL;
5916 Assert(tb->jmp_next[0] == NULL);
5917 Assert(tb->jmp_next[1] == NULL);
5918 if (tb->tb_next_offset[0] != 0xffff)
5919 tb_reset_jump(tb, 0);
5920 if (tb->tb_next_offset[1] != 0xffff)
5921 tb_reset_jump(tb, 1);
5922
5923 /*
5924 * Execute it using emulation
5925 */
5926 old_eip = env->eip;
5927 env->current_tb = tb;
5928
5929 /*
5930 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5931 * perhaps not a very safe hack
5932 */
5933 while (old_eip == env->eip)
5934 {
5935 tc_ptr = tb->tc_ptr;
5936
5937#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5938 int fake_ret;
5939 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5940#else
5941 tcg_qemu_tb_exec(tc_ptr);
5942#endif
5943
5944 /*
5945 * Exit once we detect an external interrupt and interrupts are enabled
5946 */
5947 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5948 || ( (env->eflags & IF_MASK)
5949 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5950 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5951 )
5952 {
5953 break;
5954 }
5955 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5956 tlb_flush(env, true);
5957 }
5958 }
5959 env->current_tb = current;
5960
5961 tb_phys_invalidate(tb, -1);
5962 tb_free(tb);
5963/*
5964 Assert(tb->tb_next_offset[0] == 0xffff);
5965 Assert(tb->tb_next_offset[1] == 0xffff);
5966 Assert(tb->tb_next[0] == 0xffff);
5967 Assert(tb->tb_next[1] == 0xffff);
5968 Assert(tb->jmp_next[0] == NULL);
5969 Assert(tb->jmp_next[1] == NULL);
5970 Assert(tb->jmp_first == NULL); */
5971
5972 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5973
5974 /*
5975 * Execute the next instruction when we encounter instruction fusing.
5976 */
5977 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5978 {
5979 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5980 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5981 emulate_single_instr(env);
5982 }
5983
5984 env = savedenv;
5985 return 0;
5986}
5987
5988/**
5989 * Correctly loads a new ldtr selector.
5990 *
5991 * @param env1 CPU environment.
5992 * @param selector Selector to load.
5993 */
5994void sync_ldtr(CPUX86State *env1, int selector)
5995{
5996 CPUX86State *saved_env = env;
5997 if (setjmp(env1->jmp_env) == 0)
5998 {
5999 env = env1;
6000 helper_lldt(selector);
6001 env = saved_env;
6002 }
6003 else
6004 {
6005 env = saved_env;
6006#ifdef VBOX_STRICT
6007 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6008#endif
6009 }
6010}
6011
6012int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6013 uint32_t *esp_ptr, int dpl)
6014{
6015 int type, index, shift;
6016
6017 CPUX86State *savedenv = env;
6018 env = env1;
6019
6020 if (!(env->tr.flags & DESC_P_MASK))
6021 cpu_abort(env, "invalid tss");
6022 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6023 if ((type & 7) != 1)
6024 cpu_abort(env, "invalid tss type %d", type);
6025 shift = type >> 3;
6026 index = (dpl * 4 + 2) << shift;
6027 if (index + (4 << shift) - 1 > env->tr.limit)
6028 {
6029 env = savedenv;
6030 return 0;
6031 }
6032 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6033
6034 if (shift == 0) {
6035 *esp_ptr = lduw_kernel(env->tr.base + index);
6036 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6037 } else {
6038 *esp_ptr = ldl_kernel(env->tr.base + index);
6039 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6040 }
6041
6042 env = savedenv;
6043 return 1;
6044}
6045
6046//*****************************************************************************
6047// Needs to be at the bottom of the file (overriding macros)
6048
6049static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6050{
6051#ifdef USE_X86LDOUBLE
6052 CPU86_LDoubleU tmp;
6053 tmp.l.lower = *(uint64_t const *)ptr;
6054 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6055 return tmp.d;
6056#else
6057# error "Busted FPU saving/restoring!"
6058 return *(CPU86_LDouble *)ptr;
6059#endif
6060}
6061
6062static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6063{
6064#ifdef USE_X86LDOUBLE
6065 CPU86_LDoubleU tmp;
6066 tmp.d = f;
6067 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6068 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6069 *(uint16_t *)(ptr + 10) = 0;
6070 *(uint32_t *)(ptr + 12) = 0;
6071 AssertCompile(sizeof(long double) > 8);
6072#else
6073# error "Busted FPU saving/restoring!"
6074 *(CPU86_LDouble *)ptr = f;
6075#endif
6076}
6077
6078#undef stw
6079#undef stl
6080#undef stq
6081#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6082#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6083#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6084
6085//*****************************************************************************
6086void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6087{
6088 int fpus, fptag, i, nb_xmm_regs;
6089 CPU86_LDouble tmp;
6090 uint8_t *addr;
6091 int data64 = !!(env->hflags & HF_LMA_MASK);
6092
6093 if (env->cpuid_features & CPUID_FXSR)
6094 {
6095 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6096 fptag = 0;
6097 for(i = 0; i < 8; i++) {
6098 fptag |= (env->fptags[i] << i);
6099 }
6100 stw(ptr, env->fpuc);
6101 stw(ptr + 2, fpus);
6102 stw(ptr + 4, fptag ^ 0xff);
6103
6104 addr = ptr + 0x20;
6105 for(i = 0;i < 8; i++) {
6106 tmp = ST(i);
6107 helper_fstt_raw(tmp, addr);
6108 addr += 16;
6109 }
6110
6111 if (env->cr[4] & CR4_OSFXSR_MASK) {
6112 /* XXX: finish it */
6113 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6114 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6115 nb_xmm_regs = 8 << data64;
6116 addr = ptr + 0xa0;
6117 for(i = 0; i < nb_xmm_regs; i++) {
6118#if __GNUC__ < 4
6119 stq(addr, env->xmm_regs[i].XMM_Q(0));
6120 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6121#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6122 stl(addr, env->xmm_regs[i].XMM_L(0));
6123 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6124 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6125 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6126#endif
6127 addr += 16;
6128 }
6129 }
6130 }
6131 else
6132 {
6133 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6134 int fptag;
6135
6136 fp->FCW = env->fpuc;
6137 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6138 fptag = 0;
6139 for (i=7; i>=0; i--) {
6140 fptag <<= 2;
6141 if (env->fptags[i]) {
6142 fptag |= 3;
6143 } else {
6144 /* the FPU automatically computes it */
6145 }
6146 }
6147 fp->FTW = fptag;
6148
6149 for(i = 0;i < 8; i++) {
6150 tmp = ST(i);
6151 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6152 }
6153 }
6154}
6155
6156//*****************************************************************************
6157#undef lduw
6158#undef ldl
6159#undef ldq
6160#define lduw(a) *(uint16_t *)(a)
6161#define ldl(a) *(uint32_t *)(a)
6162#define ldq(a) *(uint64_t *)(a)
6163//*****************************************************************************
6164void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6165{
6166 int i, fpus, fptag, nb_xmm_regs;
6167 CPU86_LDouble tmp;
6168 uint8_t *addr;
6169 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6170
6171 if (env->cpuid_features & CPUID_FXSR)
6172 {
6173 env->fpuc = lduw(ptr);
6174 fpus = lduw(ptr + 2);
6175 fptag = lduw(ptr + 4);
6176 env->fpstt = (fpus >> 11) & 7;
6177 env->fpus = fpus & ~0x3800;
6178 fptag ^= 0xff;
6179 for(i = 0;i < 8; i++) {
6180 env->fptags[i] = ((fptag >> i) & 1);
6181 }
6182
6183 addr = ptr + 0x20;
6184 for(i = 0;i < 8; i++) {
6185 tmp = helper_fldt_raw(addr);
6186 ST(i) = tmp;
6187 addr += 16;
6188 }
6189
6190 if (env->cr[4] & CR4_OSFXSR_MASK) {
6191 /* XXX: finish it, endianness */
6192 env->mxcsr = ldl(ptr + 0x18);
6193 //ldl(ptr + 0x1c);
6194 nb_xmm_regs = 8 << data64;
6195 addr = ptr + 0xa0;
6196 for(i = 0; i < nb_xmm_regs; i++) {
6197#if HC_ARCH_BITS == 32
6198 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6199 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6200 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6201 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6202 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6203#else
6204 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6205 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6206#endif
6207 addr += 16;
6208 }
6209 }
6210 }
6211 else
6212 {
6213 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6214 int fptag, j;
6215
6216 env->fpuc = fp->FCW;
6217 env->fpstt = (fp->FSW >> 11) & 7;
6218 env->fpus = fp->FSW & ~0x3800;
6219 fptag = fp->FTW;
6220 for(i = 0;i < 8; i++) {
6221 env->fptags[i] = ((fptag & 3) == 3);
6222 fptag >>= 2;
6223 }
6224 j = env->fpstt;
6225 for(i = 0;i < 8; i++) {
6226 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6227 ST(i) = tmp;
6228 }
6229 }
6230}
6231//*****************************************************************************
6232//*****************************************************************************
6233
6234#endif /* VBOX */
6235
6236/* Secure Virtual Machine helpers */
6237
6238#if defined(CONFIG_USER_ONLY)
6239
6240void helper_vmrun(int aflag, int next_eip_addend)
6241{
6242}
6243void helper_vmmcall(void)
6244{
6245}
6246void helper_vmload(int aflag)
6247{
6248}
6249void helper_vmsave(int aflag)
6250{
6251}
6252void helper_stgi(void)
6253{
6254}
6255void helper_clgi(void)
6256{
6257}
6258void helper_skinit(void)
6259{
6260}
6261void helper_invlpga(int aflag)
6262{
6263}
6264void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6265{
6266}
6267void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6268{
6269}
6270
6271void helper_svm_check_io(uint32_t port, uint32_t param,
6272 uint32_t next_eip_addend)
6273{
6274}
6275#else
6276
6277static inline void svm_save_seg(target_phys_addr_t addr,
6278 const SegmentCache *sc)
6279{
6280 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6281 sc->selector);
6282 stq_phys(addr + offsetof(struct vmcb_seg, base),
6283 sc->base);
6284 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6285 sc->limit);
6286 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6287 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6288}
6289
6290static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6291{
6292 unsigned int flags;
6293
6294 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6295 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6296 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6297 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6298 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6299}
6300
6301static inline void svm_load_seg_cache(target_phys_addr_t addr,
6302 CPUState *env, int seg_reg)
6303{
6304 SegmentCache sc1, *sc = &sc1;
6305 svm_load_seg(addr, sc);
6306 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6307 sc->base, sc->limit, sc->flags);
6308}
6309
6310void helper_vmrun(int aflag, int next_eip_addend)
6311{
6312 target_ulong addr;
6313 uint32_t event_inj;
6314 uint32_t int_ctl;
6315
6316 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6317
6318 if (aflag == 2)
6319 addr = EAX;
6320 else
6321 addr = (uint32_t)EAX;
6322
6323 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6324
6325 env->vm_vmcb = addr;
6326
6327 /* save the current CPU state in the hsave page */
6328 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6329 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6330
6331 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6332 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6333
6334 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6335 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6336 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6337 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6338 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6339 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6340
6341 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6342 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6343
6344 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6345 &env->segs[R_ES]);
6346 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6347 &env->segs[R_CS]);
6348 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6349 &env->segs[R_SS]);
6350 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6351 &env->segs[R_DS]);
6352
6353 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6354 EIP + next_eip_addend);
6355 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6356 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6357
6358 /* load the interception bitmaps so we do not need to access the
6359 vmcb in svm mode */
6360 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6361 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6362 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6363 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6364 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6365 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6366
6367 /* enable intercepts */
6368 env->hflags |= HF_SVMI_MASK;
6369
6370 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6371
6372 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6373 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6374
6375 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6376 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6377
6378 /* clear exit_info_2 so we behave like the real hardware */
6379 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6380
6381 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6382 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6383 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6384 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6385 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6386 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6387 if (int_ctl & V_INTR_MASKING_MASK) {
6388 env->v_tpr = int_ctl & V_TPR_MASK;
6389 env->hflags2 |= HF2_VINTR_MASK;
6390 if (env->eflags & IF_MASK)
6391 env->hflags2 |= HF2_HIF_MASK;
6392 }
6393
6394 cpu_load_efer(env,
6395 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6396 env->eflags = 0;
6397 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6398 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6399 CC_OP = CC_OP_EFLAGS;
6400
6401 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6402 env, R_ES);
6403 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6404 env, R_CS);
6405 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6406 env, R_SS);
6407 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6408 env, R_DS);
6409
6410 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6411 env->eip = EIP;
6412 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6413 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6414 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6415 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6416 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6417
6418 /* FIXME: guest state consistency checks */
6419
6420 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6421 case TLB_CONTROL_DO_NOTHING:
6422 break;
6423 case TLB_CONTROL_FLUSH_ALL_ASID:
6424 /* FIXME: this is not 100% correct but should work for now */
6425 tlb_flush(env, 1);
6426 break;
6427 }
6428
6429 env->hflags2 |= HF2_GIF_MASK;
6430
6431 if (int_ctl & V_IRQ_MASK) {
6432 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6433 }
6434
6435 /* maybe we need to inject an event */
6436 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6437 if (event_inj & SVM_EVTINJ_VALID) {
6438 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6439 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6440 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6441
6442 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6443 /* FIXME: need to implement valid_err */
6444 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6445 case SVM_EVTINJ_TYPE_INTR:
6446 env->exception_index = vector;
6447 env->error_code = event_inj_err;
6448 env->exception_is_int = 0;
6449 env->exception_next_eip = -1;
6450 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6451 /* XXX: is it always correct ? */
6452 do_interrupt(vector, 0, 0, 0, 1);
6453 break;
6454 case SVM_EVTINJ_TYPE_NMI:
6455 env->exception_index = EXCP02_NMI;
6456 env->error_code = event_inj_err;
6457 env->exception_is_int = 0;
6458 env->exception_next_eip = EIP;
6459 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6460 cpu_loop_exit();
6461 break;
6462 case SVM_EVTINJ_TYPE_EXEPT:
6463 env->exception_index = vector;
6464 env->error_code = event_inj_err;
6465 env->exception_is_int = 0;
6466 env->exception_next_eip = -1;
6467 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6468 cpu_loop_exit();
6469 break;
6470 case SVM_EVTINJ_TYPE_SOFT:
6471 env->exception_index = vector;
6472 env->error_code = event_inj_err;
6473 env->exception_is_int = 1;
6474 env->exception_next_eip = EIP;
6475 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6476 cpu_loop_exit();
6477 break;
6478 }
6479 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6480 }
6481}
6482
6483void helper_vmmcall(void)
6484{
6485 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6486 raise_exception(EXCP06_ILLOP);
6487}
6488
6489void helper_vmload(int aflag)
6490{
6491 target_ulong addr;
6492 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6493
6494 if (aflag == 2)
6495 addr = EAX;
6496 else
6497 addr = (uint32_t)EAX;
6498
6499 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6500 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6501 env->segs[R_FS].base);
6502
6503 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6504 env, R_FS);
6505 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6506 env, R_GS);
6507 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6508 &env->tr);
6509 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6510 &env->ldt);
6511
6512#ifdef TARGET_X86_64
6513 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6514 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6515 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6516 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6517#endif
6518 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6519 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6520 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6521 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6522}
6523
6524void helper_vmsave(int aflag)
6525{
6526 target_ulong addr;
6527 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6528
6529 if (aflag == 2)
6530 addr = EAX;
6531 else
6532 addr = (uint32_t)EAX;
6533
6534 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6535 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6536 env->segs[R_FS].base);
6537
6538 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6539 &env->segs[R_FS]);
6540 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6541 &env->segs[R_GS]);
6542 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6543 &env->tr);
6544 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6545 &env->ldt);
6546
6547#ifdef TARGET_X86_64
6548 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6549 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6550 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6551 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6552#endif
6553 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6554 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6555 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6556 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6557}
6558
6559void helper_stgi(void)
6560{
6561 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6562 env->hflags2 |= HF2_GIF_MASK;
6563}
6564
6565void helper_clgi(void)
6566{
6567 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6568 env->hflags2 &= ~HF2_GIF_MASK;
6569}
6570
6571void helper_skinit(void)
6572{
6573 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6574 /* XXX: not implemented */
6575 raise_exception(EXCP06_ILLOP);
6576}
6577
6578void helper_invlpga(int aflag)
6579{
6580 target_ulong addr;
6581 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6582
6583 if (aflag == 2)
6584 addr = EAX;
6585 else
6586 addr = (uint32_t)EAX;
6587
6588 /* XXX: could use the ASID to see if it is needed to do the
6589 flush */
6590 tlb_flush_page(env, addr);
6591}
6592
6593void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6594{
6595 if (likely(!(env->hflags & HF_SVMI_MASK)))
6596 return;
6597#ifndef VBOX
6598 switch(type) {
6599 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6600 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6601 helper_vmexit(type, param);
6602 }
6603 break;
6604 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6605 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6606 helper_vmexit(type, param);
6607 }
6608 break;
6609 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6610 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6611 helper_vmexit(type, param);
6612 }
6613 break;
6614 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6615 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6616 helper_vmexit(type, param);
6617 }
6618 break;
6619 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6620 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6621 helper_vmexit(type, param);
6622 }
6623 break;
6624 case SVM_EXIT_MSR:
6625 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6626 /* FIXME: this should be read in at vmrun (faster this way?) */
6627 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6628 uint32_t t0, t1;
6629 switch((uint32_t)ECX) {
6630 case 0 ... 0x1fff:
6631 t0 = (ECX * 2) % 8;
6632 t1 = ECX / 8;
6633 break;
6634 case 0xc0000000 ... 0xc0001fff:
6635 t0 = (8192 + ECX - 0xc0000000) * 2;
6636 t1 = (t0 / 8);
6637 t0 %= 8;
6638 break;
6639 case 0xc0010000 ... 0xc0011fff:
6640 t0 = (16384 + ECX - 0xc0010000) * 2;
6641 t1 = (t0 / 8);
6642 t0 %= 8;
6643 break;
6644 default:
6645 helper_vmexit(type, param);
6646 t0 = 0;
6647 t1 = 0;
6648 break;
6649 }
6650 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6651 helper_vmexit(type, param);
6652 }
6653 break;
6654 default:
6655 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6656 helper_vmexit(type, param);
6657 }
6658 break;
6659 }
6660#else /* VBOX */
6661 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6662#endif /* VBOX */
6663}
6664
6665void helper_svm_check_io(uint32_t port, uint32_t param,
6666 uint32_t next_eip_addend)
6667{
6668 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6669 /* FIXME: this should be read in at vmrun (faster this way?) */
6670 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6671 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6672 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6673 /* next EIP */
6674 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6675 env->eip + next_eip_addend);
6676 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6677 }
6678 }
6679}
6680
6681/* Note: currently only 32 bits of exit_code are used */
6682void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6683{
6684 uint32_t int_ctl;
6685
6686 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6687 exit_code, exit_info_1,
6688 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6689 EIP);
6690
6691 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6692 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6693 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6694 } else {
6695 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6696 }
6697
6698 /* Save the VM state in the vmcb */
6699 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6700 &env->segs[R_ES]);
6701 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6702 &env->segs[R_CS]);
6703 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6704 &env->segs[R_SS]);
6705 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6706 &env->segs[R_DS]);
6707
6708 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6709 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6710
6711 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6712 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6713
6714 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6715 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6716 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6717 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6718 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6719
6720 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6721 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6722 int_ctl |= env->v_tpr & V_TPR_MASK;
6723 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6724 int_ctl |= V_IRQ_MASK;
6725 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6726
6727 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6728 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6729 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6730 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6731 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6732 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6733 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6734
6735 /* Reload the host state from vm_hsave */
6736 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6737 env->hflags &= ~HF_SVMI_MASK;
6738 env->intercept = 0;
6739 env->intercept_exceptions = 0;
6740 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6741 env->tsc_offset = 0;
6742
6743 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6744 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6745
6746 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6747 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6748
6749 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6750 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6751 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6752 /* we need to set the efer after the crs so the hidden flags get
6753 set properly */
6754 cpu_load_efer(env,
6755 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6756 env->eflags = 0;
6757 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6758 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6759 CC_OP = CC_OP_EFLAGS;
6760
6761 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6762 env, R_ES);
6763 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6764 env, R_CS);
6765 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6766 env, R_SS);
6767 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6768 env, R_DS);
6769
6770 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6771 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6772 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6773
6774 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6775 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6776
6777 /* other setups */
6778 cpu_x86_set_cpl(env, 0);
6779 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6780 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6781
6782 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6783 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6784 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6785 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6786 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6787
6788 env->hflags2 &= ~HF2_GIF_MASK;
6789 /* FIXME: Resets the current ASID register to zero (host ASID). */
6790
6791 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6792
6793 /* Clears the TSC_OFFSET inside the processor. */
6794
6795 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6796 from the page table indicated the host's CR3. If the PDPEs contain
6797 illegal state, the processor causes a shutdown. */
6798
6799 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6800 env->cr[0] |= CR0_PE_MASK;
6801 env->eflags &= ~VM_MASK;
6802
6803 /* Disables all breakpoints in the host DR7 register. */
6804
6805 /* Checks the reloaded host state for consistency. */
6806
6807 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6808 host's code segment or non-canonical (in the case of long mode), a
6809 #GP fault is delivered inside the host.) */
6810
6811 /* remove any pending exception */
6812 env->exception_index = -1;
6813 env->error_code = 0;
6814 env->old_exception = -1;
6815
6816 cpu_loop_exit();
6817}
6818
6819#endif
6820
6821/* MMX/SSE */
6822/* XXX: optimize by storing fptt and fptags in the static cpu state */
6823void helper_enter_mmx(void)
6824{
6825 env->fpstt = 0;
6826 *(uint32_t *)(env->fptags) = 0;
6827 *(uint32_t *)(env->fptags + 4) = 0;
6828}
6829
6830void helper_emms(void)
6831{
6832 /* set to empty state */
6833 *(uint32_t *)(env->fptags) = 0x01010101;
6834 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6835}
6836
6837/* XXX: suppress */
6838void helper_movq(void *d, void *s)
6839{
6840 *(uint64_t *)d = *(uint64_t *)s;
6841}
6842
6843#define SHIFT 0
6844#include "ops_sse.h"
6845
6846#define SHIFT 1
6847#include "ops_sse.h"
6848
6849#define SHIFT 0
6850#include "helper_template.h"
6851#undef SHIFT
6852
6853#define SHIFT 1
6854#include "helper_template.h"
6855#undef SHIFT
6856
6857#define SHIFT 2
6858#include "helper_template.h"
6859#undef SHIFT
6860
6861#ifdef TARGET_X86_64
6862
6863#define SHIFT 3
6864#include "helper_template.h"
6865#undef SHIFT
6866
6867#endif
6868
6869/* bit operations */
6870target_ulong helper_bsf(target_ulong t0)
6871{
6872 int count;
6873 target_ulong res;
6874
6875 res = t0;
6876 count = 0;
6877 while ((res & 1) == 0) {
6878 count++;
6879 res >>= 1;
6880 }
6881 return count;
6882}
6883
6884target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6885{
6886 int count;
6887 target_ulong res, mask;
6888
6889 if (wordsize > 0 && t0 == 0) {
6890 return wordsize;
6891 }
6892 res = t0;
6893 count = TARGET_LONG_BITS - 1;
6894 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6895 while ((res & mask) == 0) {
6896 count--;
6897 res <<= 1;
6898 }
6899 if (wordsize > 0) {
6900 return wordsize - 1 - count;
6901 }
6902 return count;
6903}
6904
6905target_ulong helper_bsr(target_ulong t0)
6906{
6907 return helper_lzcnt(t0, 0);
6908}
6909
6910static int compute_all_eflags(void)
6911{
6912 return CC_SRC;
6913}
6914
6915static int compute_c_eflags(void)
6916{
6917 return CC_SRC & CC_C;
6918}
6919
6920uint32_t helper_cc_compute_all(int op)
6921{
6922 switch (op) {
6923 default: /* should never happen */ return 0;
6924
6925 case CC_OP_EFLAGS: return compute_all_eflags();
6926
6927 case CC_OP_MULB: return compute_all_mulb();
6928 case CC_OP_MULW: return compute_all_mulw();
6929 case CC_OP_MULL: return compute_all_mull();
6930
6931 case CC_OP_ADDB: return compute_all_addb();
6932 case CC_OP_ADDW: return compute_all_addw();
6933 case CC_OP_ADDL: return compute_all_addl();
6934
6935 case CC_OP_ADCB: return compute_all_adcb();
6936 case CC_OP_ADCW: return compute_all_adcw();
6937 case CC_OP_ADCL: return compute_all_adcl();
6938
6939 case CC_OP_SUBB: return compute_all_subb();
6940 case CC_OP_SUBW: return compute_all_subw();
6941 case CC_OP_SUBL: return compute_all_subl();
6942
6943 case CC_OP_SBBB: return compute_all_sbbb();
6944 case CC_OP_SBBW: return compute_all_sbbw();
6945 case CC_OP_SBBL: return compute_all_sbbl();
6946
6947 case CC_OP_LOGICB: return compute_all_logicb();
6948 case CC_OP_LOGICW: return compute_all_logicw();
6949 case CC_OP_LOGICL: return compute_all_logicl();
6950
6951 case CC_OP_INCB: return compute_all_incb();
6952 case CC_OP_INCW: return compute_all_incw();
6953 case CC_OP_INCL: return compute_all_incl();
6954
6955 case CC_OP_DECB: return compute_all_decb();
6956 case CC_OP_DECW: return compute_all_decw();
6957 case CC_OP_DECL: return compute_all_decl();
6958
6959 case CC_OP_SHLB: return compute_all_shlb();
6960 case CC_OP_SHLW: return compute_all_shlw();
6961 case CC_OP_SHLL: return compute_all_shll();
6962
6963 case CC_OP_SARB: return compute_all_sarb();
6964 case CC_OP_SARW: return compute_all_sarw();
6965 case CC_OP_SARL: return compute_all_sarl();
6966
6967#ifdef TARGET_X86_64
6968 case CC_OP_MULQ: return compute_all_mulq();
6969
6970 case CC_OP_ADDQ: return compute_all_addq();
6971
6972 case CC_OP_ADCQ: return compute_all_adcq();
6973
6974 case CC_OP_SUBQ: return compute_all_subq();
6975
6976 case CC_OP_SBBQ: return compute_all_sbbq();
6977
6978 case CC_OP_LOGICQ: return compute_all_logicq();
6979
6980 case CC_OP_INCQ: return compute_all_incq();
6981
6982 case CC_OP_DECQ: return compute_all_decq();
6983
6984 case CC_OP_SHLQ: return compute_all_shlq();
6985
6986 case CC_OP_SARQ: return compute_all_sarq();
6987#endif
6988 }
6989}
6990
6991uint32_t helper_cc_compute_c(int op)
6992{
6993 switch (op) {
6994 default: /* should never happen */ return 0;
6995
6996 case CC_OP_EFLAGS: return compute_c_eflags();
6997
6998 case CC_OP_MULB: return compute_c_mull();
6999 case CC_OP_MULW: return compute_c_mull();
7000 case CC_OP_MULL: return compute_c_mull();
7001
7002 case CC_OP_ADDB: return compute_c_addb();
7003 case CC_OP_ADDW: return compute_c_addw();
7004 case CC_OP_ADDL: return compute_c_addl();
7005
7006 case CC_OP_ADCB: return compute_c_adcb();
7007 case CC_OP_ADCW: return compute_c_adcw();
7008 case CC_OP_ADCL: return compute_c_adcl();
7009
7010 case CC_OP_SUBB: return compute_c_subb();
7011 case CC_OP_SUBW: return compute_c_subw();
7012 case CC_OP_SUBL: return compute_c_subl();
7013
7014 case CC_OP_SBBB: return compute_c_sbbb();
7015 case CC_OP_SBBW: return compute_c_sbbw();
7016 case CC_OP_SBBL: return compute_c_sbbl();
7017
7018 case CC_OP_LOGICB: return compute_c_logicb();
7019 case CC_OP_LOGICW: return compute_c_logicw();
7020 case CC_OP_LOGICL: return compute_c_logicl();
7021
7022 case CC_OP_INCB: return compute_c_incl();
7023 case CC_OP_INCW: return compute_c_incl();
7024 case CC_OP_INCL: return compute_c_incl();
7025
7026 case CC_OP_DECB: return compute_c_incl();
7027 case CC_OP_DECW: return compute_c_incl();
7028 case CC_OP_DECL: return compute_c_incl();
7029
7030 case CC_OP_SHLB: return compute_c_shlb();
7031 case CC_OP_SHLW: return compute_c_shlw();
7032 case CC_OP_SHLL: return compute_c_shll();
7033
7034 case CC_OP_SARB: return compute_c_sarl();
7035 case CC_OP_SARW: return compute_c_sarl();
7036 case CC_OP_SARL: return compute_c_sarl();
7037
7038#ifdef TARGET_X86_64
7039 case CC_OP_MULQ: return compute_c_mull();
7040
7041 case CC_OP_ADDQ: return compute_c_addq();
7042
7043 case CC_OP_ADCQ: return compute_c_adcq();
7044
7045 case CC_OP_SUBQ: return compute_c_subq();
7046
7047 case CC_OP_SBBQ: return compute_c_sbbq();
7048
7049 case CC_OP_LOGICQ: return compute_c_logicq();
7050
7051 case CC_OP_INCQ: return compute_c_incl();
7052
7053 case CC_OP_DECQ: return compute_c_incl();
7054
7055 case CC_OP_SHLQ: return compute_c_shlq();
7056
7057 case CC_OP_SARQ: return compute_c_sarl();
7058#endif
7059 }
7060}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette