VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 36138

Last change on this file since 36138 was 36125, checked in by vboxsync, 14 years ago

recompiler: Removing traces of attempts at making the recompiler compile with the microsoft compiler. (untested)

  • Property svn:eol-style set to native
File size: 60.9 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#define CPU_NO_GLOBAL_REGS
32#include "exec.h"
33#include "disas.h"
34#include "tcg.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#include <sys/ucontext.h>
48#endif
49
50#if defined(__sparc__) && !defined(HOST_SOLARIS)
51// Work around ugly bugs in glibc that mangle global register contents
52#undef env
53#define env cpu_single_env
54#endif
55
56int tb_invalidated_flag;
57
58//#define DEBUG_EXEC
59//#define DEBUG_SIGNAL
60
61
62void cpu_loop_exit(void)
63{
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
66 regs_to_env();
67 longjmp(env->jmp_env, 1);
68}
69
70#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
71#define reg_T2
72#endif
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80 struct ucontext *uc = puc;
81#endif
82
83 env = env1;
84
85 /* XXX: restore cpu registers saved in host registers */
86
87#if !defined(CONFIG_SOFTMMU)
88 if (puc) {
89 /* XXX: use siglongjmp ? */
90 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
91 }
92#endif
93 longjmp(env->jmp_env, 1);
94}
95
96/* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99{
100 unsigned long next_tb;
101 TranslationBlock *tb;
102
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
107
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 env->current_tb = tb;
111 /* execute the generated code */
112#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
113 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
114#else
115 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116#endif
117
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 CPU_PC_FROM_TB(env, tb);
122 }
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
125}
126
127static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
130{
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
134
135 tb_invalidated_flag = 0;
136
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
138
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
162 }
163 }
164 ptb1 = &tb->phys_hash_next;
165 }
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_gen_code(env, pc, cs_base, flags, 0);
169
170 found:
171 /* we add the TB in the virtual pc hash table */
172 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
173 return tb;
174}
175
176static inline TranslationBlock *tb_find_fast(void)
177{
178 TranslationBlock *tb;
179 target_ulong cs_base, pc;
180 uint64_t flags;
181
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
184 is executed. */
185#if defined(TARGET_I386)
186 flags = env->hflags;
187 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
188 cs_base = env->segs[R_CS].base;
189 pc = cs_base + env->eip;
190#elif defined(TARGET_ARM)
191 flags = env->thumb | (env->vfp.vec_len << 1)
192 | (env->vfp.vec_stride << 4);
193 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
194 flags |= (1 << 6);
195 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
196 flags |= (1 << 7);
197 flags |= (env->condexec_bits << 8);
198 cs_base = 0;
199 pc = env->regs[15];
200#elif defined(TARGET_SPARC)
201#ifdef TARGET_SPARC64
202 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
203 flags = ((env->pstate & PS_AM) << 2)
204 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
205 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
206#else
207 // FPU enable . Supervisor
208 flags = (env->psref << 4) | env->psrs;
209#endif
210 cs_base = env->npc;
211 pc = env->pc;
212#elif defined(TARGET_PPC)
213 flags = env->hflags;
214 cs_base = 0;
215 pc = env->nip;
216#elif defined(TARGET_MIPS)
217 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
218 cs_base = 0;
219 pc = env->active_tc.PC;
220#elif defined(TARGET_M68K)
221 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
222 | (env->sr & SR_S) /* Bit 13 */
223 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
224 cs_base = 0;
225 pc = env->pc;
226#elif defined(TARGET_SH4)
227 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
228 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
229 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
230 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
231 cs_base = 0;
232 pc = env->pc;
233#elif defined(TARGET_ALPHA)
234 flags = env->ps;
235 cs_base = 0;
236 pc = env->pc;
237#elif defined(TARGET_CRIS)
238 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
239 flags |= env->dslot;
240 cs_base = 0;
241 pc = env->pc;
242#else
243#error unsupported CPU
244#endif
245 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
246 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
247 tb->flags != flags)) {
248 tb = tb_find_slow(pc, cs_base, flags);
249 }
250 return tb;
251}
252
253/* main execution loop */
254
255#ifdef VBOX
256
257int cpu_exec(CPUState *env1)
258{
259#define DECLARE_HOST_REGS 1
260#include "hostregs_helper.h"
261 int ret = 0, interrupt_request;
262 TranslationBlock *tb;
263 uint8_t *tc_ptr;
264 unsigned long next_tb;
265
266 cpu_single_env = env1;
267
268 /* first we save global registers */
269#define SAVE_HOST_REGS 1
270#include "hostregs_helper.h"
271 env = env1;
272
273 env_to_regs();
274#if defined(TARGET_I386)
275 /* put eflags in CPU temporary format */
276 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
277 DF = 1 - (2 * ((env->eflags >> 10) & 1));
278 CC_OP = CC_OP_EFLAGS;
279 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
280#elif defined(TARGET_SPARC)
281#elif defined(TARGET_M68K)
282 env->cc_op = CC_OP_FLAGS;
283 env->cc_dest = env->sr & 0xf;
284 env->cc_x = (env->sr >> 4) & 1;
285#elif defined(TARGET_ALPHA)
286#elif defined(TARGET_ARM)
287#elif defined(TARGET_PPC)
288#elif defined(TARGET_MIPS)
289#elif defined(TARGET_SH4)
290#elif defined(TARGET_CRIS)
291 /* XXXXX */
292#else
293#error unsupported target CPU
294#endif
295#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
296 env->exception_index = -1;
297#endif
298
299 /* prepare setjmp context for exception handling */
300 for(;;) {
301 if (setjmp(env->jmp_env) == 0)
302 {
303 env->current_tb = NULL;
304
305 /*
306 * Check for fatal errors first
307 */
308 if (env->interrupt_request & CPU_INTERRUPT_RC) {
309 env->exception_index = EXCP_RC;
310 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
311 ret = env->exception_index;
312 cpu_loop_exit();
313 }
314
315 /* if an exception is pending, we execute it here */
316 if (env->exception_index >= 0) {
317 Assert(!env->user_mode_only);
318 if (env->exception_index >= EXCP_INTERRUPT) {
319 /* exit request from the cpu execution loop */
320 ret = env->exception_index;
321 break;
322 } else {
323 /* simulate a real cpu exception. On i386, it can
324 trigger new exceptions, but we do not handle
325 double or triple faults yet. */
326 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
327 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
328 do_interrupt(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip, 0);
332 /* successfully delivered */
333 env->old_exception = -1;
334 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
335 }
336 env->exception_index = -1;
337 }
338
339 next_tb = 0; /* force lookup of first TB */
340 for(;;)
341 {
342 interrupt_request = env->interrupt_request;
343#ifndef VBOX
344 if (__builtin_expect(interrupt_request, 0))
345#else
346 if (RT_UNLIKELY(interrupt_request != 0))
347#endif
348 {
349 /** @todo: reconcile with what QEMU really does */
350
351 /* Single instruction exec request, we execute it and return (one way or the other).
352 The caller will always reschedule after doing this operation! */
353 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
354 {
355 /* not in flight are we? (if we are, we trapped) */
356 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
357 {
358 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
359 env->exception_index = EXCP_SINGLE_INSTR;
360 if (emulate_single_instr(env) == -1)
361 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
362
363 /* When we receive an external interrupt during execution of this single
364 instruction, then we should stay here. We will leave when we're ready
365 for raw-mode or when interrupted by pending EMT requests. */
366 interrupt_request = env->interrupt_request; /* reload this! */
367 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
368 || !(env->eflags & IF_MASK)
369 || (env->hflags & HF_INHIBIT_IRQ_MASK)
370 || (env->state & CPU_RAW_HWACC)
371 )
372 {
373 env->exception_index = ret = EXCP_SINGLE_INSTR;
374 cpu_loop_exit();
375 }
376 }
377 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
378 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
379 }
380
381 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
382 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
383 !(env->hflags & HF_SMM_MASK)) {
384 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
385 do_smm_enter();
386 next_tb = 0;
387 }
388 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
389 (env->eflags & IF_MASK) &&
390 !(env->hflags & HF_INHIBIT_IRQ_MASK))
391 {
392 /* if hardware interrupt pending, we execute it */
393 int intno;
394 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
395 intno = cpu_get_pic_interrupt(env);
396 if (intno >= 0)
397 {
398 Log(("do_interrupt %d\n", intno));
399 do_interrupt(intno, 0, 0, 0, 1);
400 }
401 /* ensure that no TB jump will be modified as
402 the program flow was changed */
403 next_tb = 0;
404 }
405 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
406 {
407 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
408 /* ensure that no TB jump will be modified as
409 the program flow was changed */
410 next_tb = 0;
411 }
412 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
413 if (interrupt_request & CPU_INTERRUPT_EXIT)
414 {
415 env->exception_index = EXCP_INTERRUPT;
416 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
417 ret = env->exception_index;
418 cpu_loop_exit();
419 }
420 if (interrupt_request & CPU_INTERRUPT_RC)
421 {
422 env->exception_index = EXCP_RC;
423 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
424 ret = env->exception_index;
425 cpu_loop_exit();
426 }
427 }
428
429 /*
430 * Check if we the CPU state allows us to execute the code in raw-mode.
431 */
432 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
433 if (remR3CanExecuteRaw(env,
434 env->eip + env->segs[R_CS].base,
435 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
436 &env->exception_index))
437 {
438 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
439 ret = env->exception_index;
440 cpu_loop_exit();
441 }
442 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
443
444 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
445 spin_lock(&tb_lock);
446 tb = tb_find_fast();
447 /* Note: we do it here to avoid a gcc bug on Mac OS X when
448 doing it in tb_find_slow */
449 if (tb_invalidated_flag) {
450 /* as some TB could have been invalidated because
451 of memory exceptions while generating the code, we
452 must recompute the hash index here */
453 next_tb = 0;
454 tb_invalidated_flag = 0;
455 }
456
457 /* see if we can patch the calling TB. When the TB
458 spans two pages, we cannot safely do a direct
459 jump. */
460 if (next_tb != 0
461 && !(tb->cflags & CF_RAW_MODE)
462 && tb->page_addr[1] == -1)
463 {
464 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
465 }
466 spin_unlock(&tb_lock);
467 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
468
469 env->current_tb = tb;
470 while (env->current_tb) {
471 tc_ptr = tb->tc_ptr;
472 /* execute the generated code */
473 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
474#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
475 tcg_qemu_tb_exec(tc_ptr, next_tb);
476#else
477 next_tb = tcg_qemu_tb_exec(tc_ptr);
478#endif
479 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
480 env->current_tb = NULL;
481 if ((next_tb & 3) == 2) {
482 /* Instruction counter expired. */
483 int insns_left;
484 tb = (TranslationBlock *)(long)(next_tb & ~3);
485 /* Restore PC. */
486 CPU_PC_FROM_TB(env, tb);
487 insns_left = env->icount_decr.u32;
488 if (env->icount_extra && insns_left >= 0) {
489 /* Refill decrementer and continue execution. */
490 env->icount_extra += insns_left;
491 if (env->icount_extra > 0xffff) {
492 insns_left = 0xffff;
493 } else {
494 insns_left = env->icount_extra;
495 }
496 env->icount_extra -= insns_left;
497 env->icount_decr.u16.low = insns_left;
498 } else {
499 if (insns_left > 0) {
500 /* Execute remaining instructions. */
501 cpu_exec_nocache(insns_left, tb);
502 }
503 env->exception_index = EXCP_INTERRUPT;
504 next_tb = 0;
505 cpu_loop_exit();
506 }
507 }
508 }
509
510 /* reset soft MMU for next block (it can currently
511 only be set by a memory fault) */
512#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
513 if (env->hflags & HF_SOFTMMU_MASK) {
514 env->hflags &= ~HF_SOFTMMU_MASK;
515 /* do not allow linking to another block */
516 next_tb = 0;
517 }
518#endif
519 } /* for(;;) */
520 } else {
521 env_to_regs();
522 }
523#ifdef VBOX_HIGH_RES_TIMERS_HACK
524 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
525 unnecessary (like crashing during emulate single instruction).
526 Note! Don't use env1->pVM here, the code wouldn't run with
527 gcc-4.4/amd64 anymore, see #3883. */
528 env->current_tb = NULL;
529 if ( !(env->interrupt_request & ( CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
530 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
531 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
532 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
533 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
534 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
535 TMR3TimerQueuesDo(env->pVM);
536 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
537 }
538#endif
539 } /* for(;;) */
540
541#if defined(TARGET_I386)
542 /* restore flags in standard format */
543 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
544#else
545#error unsupported target CPU
546#endif
547#include "hostregs_helper.h"
548 return ret;
549}
550
551#else /* !VBOX */
552int cpu_exec(CPUState *env1)
553{
554#define DECLARE_HOST_REGS 1
555#include "hostregs_helper.h"
556 int ret, interrupt_request;
557 TranslationBlock *tb;
558 uint8_t *tc_ptr;
559 unsigned long next_tb;
560
561 if (cpu_halted(env1) == EXCP_HALTED)
562 return EXCP_HALTED;
563
564 cpu_single_env = env1;
565
566 /* first we save global registers */
567#define SAVE_HOST_REGS 1
568#include "hostregs_helper.h"
569 env = env1;
570
571 env_to_regs();
572#if defined(TARGET_I386)
573 /* put eflags in CPU temporary format */
574 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
575 DF = 1 - (2 * ((env->eflags >> 10) & 1));
576 CC_OP = CC_OP_EFLAGS;
577 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
578#elif defined(TARGET_SPARC)
579#elif defined(TARGET_M68K)
580 env->cc_op = CC_OP_FLAGS;
581 env->cc_dest = env->sr & 0xf;
582 env->cc_x = (env->sr >> 4) & 1;
583#elif defined(TARGET_ALPHA)
584#elif defined(TARGET_ARM)
585#elif defined(TARGET_PPC)
586#elif defined(TARGET_MIPS)
587#elif defined(TARGET_SH4)
588#elif defined(TARGET_CRIS)
589 /* XXXXX */
590#else
591#error unsupported target CPU
592#endif
593 env->exception_index = -1;
594
595 /* prepare setjmp context for exception handling */
596 for(;;) {
597 if (setjmp(env->jmp_env) == 0) {
598 env->current_tb = NULL;
599 /* if an exception is pending, we execute it here */
600 if (env->exception_index >= 0) {
601 if (env->exception_index >= EXCP_INTERRUPT) {
602 /* exit request from the cpu execution loop */
603 ret = env->exception_index;
604 break;
605 } else if (env->user_mode_only) {
606 /* if user mode only, we simulate a fake exception
607 which will be handled outside the cpu execution
608 loop */
609#if defined(TARGET_I386)
610 do_interrupt_user(env->exception_index,
611 env->exception_is_int,
612 env->error_code,
613 env->exception_next_eip);
614 /* successfully delivered */
615 env->old_exception = -1;
616#endif
617 ret = env->exception_index;
618 break;
619 } else {
620#if defined(TARGET_I386)
621 /* simulate a real cpu exception. On i386, it can
622 trigger new exceptions, but we do not handle
623 double or triple faults yet. */
624 do_interrupt(env->exception_index,
625 env->exception_is_int,
626 env->error_code,
627 env->exception_next_eip, 0);
628 /* successfully delivered */
629 env->old_exception = -1;
630#elif defined(TARGET_PPC)
631 do_interrupt(env);
632#elif defined(TARGET_MIPS)
633 do_interrupt(env);
634#elif defined(TARGET_SPARC)
635 do_interrupt(env);
636#elif defined(TARGET_ARM)
637 do_interrupt(env);
638#elif defined(TARGET_SH4)
639 do_interrupt(env);
640#elif defined(TARGET_ALPHA)
641 do_interrupt(env);
642#elif defined(TARGET_CRIS)
643 do_interrupt(env);
644#elif defined(TARGET_M68K)
645 do_interrupt(0);
646#endif
647 }
648 env->exception_index = -1;
649 }
650#ifdef USE_KQEMU
651 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
652 int ret;
653 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
654 ret = kqemu_cpu_exec(env);
655 /* put eflags in CPU temporary format */
656 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
657 DF = 1 - (2 * ((env->eflags >> 10) & 1));
658 CC_OP = CC_OP_EFLAGS;
659 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
660 if (ret == 1) {
661 /* exception */
662 longjmp(env->jmp_env, 1);
663 } else if (ret == 2) {
664 /* softmmu execution needed */
665 } else {
666 if (env->interrupt_request != 0) {
667 /* hardware interrupt will be executed just after */
668 } else {
669 /* otherwise, we restart */
670 longjmp(env->jmp_env, 1);
671 }
672 }
673 }
674#endif
675
676 next_tb = 0; /* force lookup of first TB */
677 for(;;) {
678 interrupt_request = env->interrupt_request;
679 if (unlikely(interrupt_request) &&
680 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
681 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
682 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
683 env->exception_index = EXCP_DEBUG;
684 cpu_loop_exit();
685 }
686#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
687 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
688 if (interrupt_request & CPU_INTERRUPT_HALT) {
689 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
690 env->halted = 1;
691 env->exception_index = EXCP_HLT;
692 cpu_loop_exit();
693 }
694#endif
695#if defined(TARGET_I386)
696 if (env->hflags2 & HF2_GIF_MASK) {
697 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
698 !(env->hflags & HF_SMM_MASK)) {
699 svm_check_intercept(SVM_EXIT_SMI);
700 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
701 do_smm_enter();
702 next_tb = 0;
703 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
704 !(env->hflags2 & HF2_NMI_MASK)) {
705 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
706 env->hflags2 |= HF2_NMI_MASK;
707 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
708 next_tb = 0;
709 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
710 (((env->hflags2 & HF2_VINTR_MASK) &&
711 (env->hflags2 & HF2_HIF_MASK)) ||
712 (!(env->hflags2 & HF2_VINTR_MASK) &&
713 (env->eflags & IF_MASK &&
714 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
715 int intno;
716 svm_check_intercept(SVM_EXIT_INTR);
717 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
718 intno = cpu_get_pic_interrupt(env);
719 if (loglevel & CPU_LOG_TB_IN_ASM) {
720 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
721 }
722 do_interrupt(intno, 0, 0, 0, 1);
723 /* ensure that no TB jump will be modified as
724 the program flow was changed */
725 next_tb = 0;
726#if !defined(CONFIG_USER_ONLY)
727 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
728 (env->eflags & IF_MASK) &&
729 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
730 int intno;
731 /* FIXME: this should respect TPR */
732 svm_check_intercept(SVM_EXIT_VINTR);
733 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
734 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
735 if (loglevel & CPU_LOG_TB_IN_ASM)
736 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
737 do_interrupt(intno, 0, 0, 0, 1);
738 next_tb = 0;
739#endif
740 }
741 }
742#elif defined(TARGET_PPC)
743#if 0
744 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
745 cpu_ppc_reset(env);
746 }
747#endif
748 if (interrupt_request & CPU_INTERRUPT_HARD) {
749 ppc_hw_interrupt(env);
750 if (env->pending_interrupts == 0)
751 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
752 next_tb = 0;
753 }
754#elif defined(TARGET_MIPS)
755 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
756 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
757 (env->CP0_Status & (1 << CP0St_IE)) &&
758 !(env->CP0_Status & (1 << CP0St_EXL)) &&
759 !(env->CP0_Status & (1 << CP0St_ERL)) &&
760 !(env->hflags & MIPS_HFLAG_DM)) {
761 /* Raise it */
762 env->exception_index = EXCP_EXT_INTERRUPT;
763 env->error_code = 0;
764 do_interrupt(env);
765 next_tb = 0;
766 }
767#elif defined(TARGET_SPARC)
768 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
769 (env->psret != 0)) {
770 int pil = env->interrupt_index & 15;
771 int type = env->interrupt_index & 0xf0;
772
773 if (((type == TT_EXTINT) &&
774 (pil == 15 || pil > env->psrpil)) ||
775 type != TT_EXTINT) {
776 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
777 env->exception_index = env->interrupt_index;
778 do_interrupt(env);
779 env->interrupt_index = 0;
780#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
781 cpu_check_irqs(env);
782#endif
783 next_tb = 0;
784 }
785 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
786 //do_interrupt(0, 0, 0, 0, 0);
787 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
788 }
789#elif defined(TARGET_ARM)
790 if (interrupt_request & CPU_INTERRUPT_FIQ
791 && !(env->uncached_cpsr & CPSR_F)) {
792 env->exception_index = EXCP_FIQ;
793 do_interrupt(env);
794 next_tb = 0;
795 }
796 /* ARMv7-M interrupt return works by loading a magic value
797 into the PC. On real hardware the load causes the
798 return to occur. The qemu implementation performs the
799 jump normally, then does the exception return when the
800 CPU tries to execute code at the magic address.
801 This will cause the magic PC value to be pushed to
802 the stack if an interrupt occurred at the wrong time.
803 We avoid this by disabling interrupts when
804 pc contains a magic address. */
805 if (interrupt_request & CPU_INTERRUPT_HARD
806 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
807 || !(env->uncached_cpsr & CPSR_I))) {
808 env->exception_index = EXCP_IRQ;
809 do_interrupt(env);
810 next_tb = 0;
811 }
812#elif defined(TARGET_SH4)
813 if (interrupt_request & CPU_INTERRUPT_HARD) {
814 do_interrupt(env);
815 next_tb = 0;
816 }
817#elif defined(TARGET_ALPHA)
818 if (interrupt_request & CPU_INTERRUPT_HARD) {
819 do_interrupt(env);
820 next_tb = 0;
821 }
822#elif defined(TARGET_CRIS)
823 if (interrupt_request & CPU_INTERRUPT_HARD
824 && (env->pregs[PR_CCS] & I_FLAG)) {
825 env->exception_index = EXCP_IRQ;
826 do_interrupt(env);
827 next_tb = 0;
828 }
829 if (interrupt_request & CPU_INTERRUPT_NMI
830 && (env->pregs[PR_CCS] & M_FLAG)) {
831 env->exception_index = EXCP_NMI;
832 do_interrupt(env);
833 next_tb = 0;
834 }
835#elif defined(TARGET_M68K)
836 if (interrupt_request & CPU_INTERRUPT_HARD
837 && ((env->sr & SR_I) >> SR_I_SHIFT)
838 < env->pending_level) {
839 /* Real hardware gets the interrupt vector via an
840 IACK cycle at this point. Current emulated
841 hardware doesn't rely on this, so we
842 provide/save the vector when the interrupt is
843 first signalled. */
844 env->exception_index = env->pending_vector;
845 do_interrupt(1);
846 next_tb = 0;
847 }
848#endif
849 /* Don't use the cached interrupt_request value,
850 do_interrupt may have updated the EXITTB flag. */
851 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
852 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
853 /* ensure that no TB jump will be modified as
854 the program flow was changed */
855 next_tb = 0;
856 }
857 if (interrupt_request & CPU_INTERRUPT_EXIT) {
858 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
859 env->exception_index = EXCP_INTERRUPT;
860 cpu_loop_exit();
861 }
862 }
863#ifdef DEBUG_EXEC
864 if ((loglevel & CPU_LOG_TB_CPU)) {
865 /* restore flags in standard format */
866 regs_to_env();
867#if defined(TARGET_I386)
868 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
869 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
870 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
871#elif defined(TARGET_ARM)
872 cpu_dump_state(env, logfile, fprintf, 0);
873#elif defined(TARGET_SPARC)
874 cpu_dump_state(env, logfile, fprintf, 0);
875#elif defined(TARGET_PPC)
876 cpu_dump_state(env, logfile, fprintf, 0);
877#elif defined(TARGET_M68K)
878 cpu_m68k_flush_flags(env, env->cc_op);
879 env->cc_op = CC_OP_FLAGS;
880 env->sr = (env->sr & 0xffe0)
881 | env->cc_dest | (env->cc_x << 4);
882 cpu_dump_state(env, logfile, fprintf, 0);
883#elif defined(TARGET_MIPS)
884 cpu_dump_state(env, logfile, fprintf, 0);
885#elif defined(TARGET_SH4)
886 cpu_dump_state(env, logfile, fprintf, 0);
887#elif defined(TARGET_ALPHA)
888 cpu_dump_state(env, logfile, fprintf, 0);
889#elif defined(TARGET_CRIS)
890 cpu_dump_state(env, logfile, fprintf, 0);
891#else
892#error unsupported target CPU
893#endif
894 }
895#endif
896 spin_lock(&tb_lock);
897 tb = tb_find_fast();
898 /* Note: we do it here to avoid a gcc bug on Mac OS X when
899 doing it in tb_find_slow */
900 if (tb_invalidated_flag) {
901 /* as some TB could have been invalidated because
902 of memory exceptions while generating the code, we
903 must recompute the hash index here */
904 next_tb = 0;
905 tb_invalidated_flag = 0;
906 }
907#ifdef DEBUG_EXEC
908 if ((loglevel & CPU_LOG_EXEC)) {
909 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
910 (long)tb->tc_ptr, tb->pc,
911 lookup_symbol(tb->pc));
912 }
913#endif
914 /* see if we can patch the calling TB. When the TB
915 spans two pages, we cannot safely do a direct
916 jump. */
917 {
918 if (next_tb != 0 &&
919#ifdef USE_KQEMU
920 (env->kqemu_enabled != 2) &&
921#endif
922 tb->page_addr[1] == -1) {
923 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
924 }
925 }
926 spin_unlock(&tb_lock);
927 env->current_tb = tb;
928 while (env->current_tb) {
929 tc_ptr = tb->tc_ptr;
930 /* execute the generated code */
931#if defined(__sparc__) && !defined(HOST_SOLARIS)
932#undef env
933 env = cpu_single_env;
934#define env cpu_single_env
935#endif
936 next_tb = tcg_qemu_tb_exec(tc_ptr);
937 env->current_tb = NULL;
938 if ((next_tb & 3) == 2) {
939 /* Instruction counter expired. */
940 int insns_left;
941 tb = (TranslationBlock *)(long)(next_tb & ~3);
942 /* Restore PC. */
943 CPU_PC_FROM_TB(env, tb);
944 insns_left = env->icount_decr.u32;
945 if (env->icount_extra && insns_left >= 0) {
946 /* Refill decrementer and continue execution. */
947 env->icount_extra += insns_left;
948 if (env->icount_extra > 0xffff) {
949 insns_left = 0xffff;
950 } else {
951 insns_left = env->icount_extra;
952 }
953 env->icount_extra -= insns_left;
954 env->icount_decr.u16.low = insns_left;
955 } else {
956 if (insns_left > 0) {
957 /* Execute remaining instructions. */
958 cpu_exec_nocache(insns_left, tb);
959 }
960 env->exception_index = EXCP_INTERRUPT;
961 next_tb = 0;
962 cpu_loop_exit();
963 }
964 }
965 }
966 /* reset soft MMU for next block (it can currently
967 only be set by a memory fault) */
968#if defined(USE_KQEMU)
969#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
970 if (kqemu_is_ok(env) &&
971 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
972 cpu_loop_exit();
973 }
974#endif
975 } /* for(;;) */
976 } else {
977 env_to_regs();
978 }
979 } /* for(;;) */
980
981
982#if defined(TARGET_I386)
983 /* restore flags in standard format */
984 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
985#elif defined(TARGET_ARM)
986 /* XXX: Save/restore host fpu exception state?. */
987#elif defined(TARGET_SPARC)
988#elif defined(TARGET_PPC)
989#elif defined(TARGET_M68K)
990 cpu_m68k_flush_flags(env, env->cc_op);
991 env->cc_op = CC_OP_FLAGS;
992 env->sr = (env->sr & 0xffe0)
993 | env->cc_dest | (env->cc_x << 4);
994#elif defined(TARGET_MIPS)
995#elif defined(TARGET_SH4)
996#elif defined(TARGET_ALPHA)
997#elif defined(TARGET_CRIS)
998 /* XXXXX */
999#else
1000#error unsupported target CPU
1001#endif
1002
1003 /* restore global registers */
1004#include "hostregs_helper.h"
1005
1006 /* fail safe : never use cpu_single_env outside cpu_exec() */
1007 cpu_single_env = NULL;
1008 return ret;
1009}
1010#endif /* !VBOX */
1011
1012/* must only be called from the generated code as an exception can be
1013 generated */
1014void tb_invalidate_page_range(target_ulong start, target_ulong end)
1015{
1016 /* XXX: cannot enable it yet because it yields to MMU exception
1017 where NIP != read address on PowerPC */
1018#if 0
1019 target_ulong phys_addr;
1020 phys_addr = get_phys_addr_code(env, start);
1021 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1022#endif
1023}
1024
1025#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1026
1027void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1028{
1029 CPUX86State *saved_env;
1030
1031 saved_env = env;
1032 env = s;
1033 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1034 selector &= 0xffff;
1035 cpu_x86_load_seg_cache(env, seg_reg, selector,
1036 (selector << 4), 0xffff, 0);
1037 } else {
1038 load_seg(seg_reg, selector);
1039 }
1040 env = saved_env;
1041}
1042
1043void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1044{
1045 CPUX86State *saved_env;
1046
1047 saved_env = env;
1048 env = s;
1049
1050 helper_fsave((target_ulong)ptr, data32);
1051
1052 env = saved_env;
1053}
1054
1055void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1056{
1057 CPUX86State *saved_env;
1058
1059 saved_env = env;
1060 env = s;
1061
1062 helper_frstor((target_ulong)ptr, data32);
1063
1064 env = saved_env;
1065}
1066
1067#endif /* TARGET_I386 */
1068
1069#if !defined(CONFIG_SOFTMMU)
1070
1071#if defined(TARGET_I386)
1072
1073/* 'pc' is the host PC at which the exception was raised. 'address' is
1074 the effective address of the memory exception. 'is_write' is 1 if a
1075 write caused the exception and otherwise 0'. 'old_set' is the
1076 signal set which should be restored */
1077static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078 int is_write, sigset_t *old_set,
1079 void *puc)
1080{
1081 TranslationBlock *tb;
1082 int ret;
1083
1084 if (cpu_single_env)
1085 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086#if defined(DEBUG_SIGNAL)
1087 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc, address, is_write, *(unsigned long *)old_set);
1089#endif
1090 /* XXX: locking issue */
1091 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092 return 1;
1093 }
1094
1095 /* see if it is an MMU fault */
1096 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1097 ((env->hflags & HF_CPL_MASK) == 3), 0);
1098 if (ret < 0)
1099 return 0; /* not an MMU fault */
1100 if (ret == 0)
1101 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb = tb_find_pc(pc);
1104 if (tb) {
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb, env, pc, puc);
1108 }
1109 if (ret == 1) {
1110#if 0
1111 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1112 env->eip, env->cr[2], env->error_code);
1113#endif
1114 /* we restore the process signal mask as the sigreturn should
1115 do it (XXX: use sigsetjmp) */
1116 sigprocmask(SIG_SETMASK, old_set, NULL);
1117 raise_exception_err(env->exception_index, env->error_code);
1118 } else {
1119 /* activate soft MMU for this block */
1120 env->hflags |= HF_SOFTMMU_MASK;
1121 cpu_resume_from_signal(env, puc);
1122 }
1123 /* never comes here */
1124 return 1;
1125}
1126
1127#elif defined(TARGET_ARM)
1128static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1129 int is_write, sigset_t *old_set,
1130 void *puc)
1131{
1132 TranslationBlock *tb;
1133 int ret;
1134
1135 if (cpu_single_env)
1136 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1137#if defined(DEBUG_SIGNAL)
1138 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1139 pc, address, is_write, *(unsigned long *)old_set);
1140#endif
1141 /* XXX: locking issue */
1142 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1143 return 1;
1144 }
1145 /* see if it is an MMU fault */
1146 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1147 if (ret < 0)
1148 return 0; /* not an MMU fault */
1149 if (ret == 0)
1150 return 1; /* the MMU fault was handled without causing real CPU fault */
1151 /* now we have a real cpu fault */
1152 tb = tb_find_pc(pc);
1153 if (tb) {
1154 /* the PC is inside the translated code. It means that we have
1155 a virtual CPU fault */
1156 cpu_restore_state(tb, env, pc, puc);
1157 }
1158 /* we restore the process signal mask as the sigreturn should
1159 do it (XXX: use sigsetjmp) */
1160 sigprocmask(SIG_SETMASK, old_set, NULL);
1161 cpu_loop_exit();
1162}
1163#elif defined(TARGET_SPARC)
1164static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1165 int is_write, sigset_t *old_set,
1166 void *puc)
1167{
1168 TranslationBlock *tb;
1169 int ret;
1170
1171 if (cpu_single_env)
1172 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1173#if defined(DEBUG_SIGNAL)
1174 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1175 pc, address, is_write, *(unsigned long *)old_set);
1176#endif
1177 /* XXX: locking issue */
1178 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1179 return 1;
1180 }
1181 /* see if it is an MMU fault */
1182 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1183 if (ret < 0)
1184 return 0; /* not an MMU fault */
1185 if (ret == 0)
1186 return 1; /* the MMU fault was handled without causing real CPU fault */
1187 /* now we have a real cpu fault */
1188 tb = tb_find_pc(pc);
1189 if (tb) {
1190 /* the PC is inside the translated code. It means that we have
1191 a virtual CPU fault */
1192 cpu_restore_state(tb, env, pc, puc);
1193 }
1194 /* we restore the process signal mask as the sigreturn should
1195 do it (XXX: use sigsetjmp) */
1196 sigprocmask(SIG_SETMASK, old_set, NULL);
1197 cpu_loop_exit();
1198}
1199#elif defined (TARGET_PPC)
1200static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1201 int is_write, sigset_t *old_set,
1202 void *puc)
1203{
1204 TranslationBlock *tb;
1205 int ret;
1206
1207 if (cpu_single_env)
1208 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1209#if defined(DEBUG_SIGNAL)
1210 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1211 pc, address, is_write, *(unsigned long *)old_set);
1212#endif
1213 /* XXX: locking issue */
1214 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1215 return 1;
1216 }
1217
1218 /* see if it is an MMU fault */
1219 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1220 if (ret < 0)
1221 return 0; /* not an MMU fault */
1222 if (ret == 0)
1223 return 1; /* the MMU fault was handled without causing real CPU fault */
1224
1225 /* now we have a real cpu fault */
1226 tb = tb_find_pc(pc);
1227 if (tb) {
1228 /* the PC is inside the translated code. It means that we have
1229 a virtual CPU fault */
1230 cpu_restore_state(tb, env, pc, puc);
1231 }
1232 if (ret == 1) {
1233#if 0
1234 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1235 env->nip, env->error_code, tb);
1236#endif
1237 /* we restore the process signal mask as the sigreturn should
1238 do it (XXX: use sigsetjmp) */
1239 sigprocmask(SIG_SETMASK, old_set, NULL);
1240 do_raise_exception_err(env->exception_index, env->error_code);
1241 } else {
1242 /* activate soft MMU for this block */
1243 cpu_resume_from_signal(env, puc);
1244 }
1245 /* never comes here */
1246 return 1;
1247}
1248
1249#elif defined(TARGET_M68K)
1250static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1251 int is_write, sigset_t *old_set,
1252 void *puc)
1253{
1254 TranslationBlock *tb;
1255 int ret;
1256
1257 if (cpu_single_env)
1258 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1259#if defined(DEBUG_SIGNAL)
1260 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1261 pc, address, is_write, *(unsigned long *)old_set);
1262#endif
1263 /* XXX: locking issue */
1264 if (is_write && page_unprotect(address, pc, puc)) {
1265 return 1;
1266 }
1267 /* see if it is an MMU fault */
1268 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1269 if (ret < 0)
1270 return 0; /* not an MMU fault */
1271 if (ret == 0)
1272 return 1; /* the MMU fault was handled without causing real CPU fault */
1273 /* now we have a real cpu fault */
1274 tb = tb_find_pc(pc);
1275 if (tb) {
1276 /* the PC is inside the translated code. It means that we have
1277 a virtual CPU fault */
1278 cpu_restore_state(tb, env, pc, puc);
1279 }
1280 /* we restore the process signal mask as the sigreturn should
1281 do it (XXX: use sigsetjmp) */
1282 sigprocmask(SIG_SETMASK, old_set, NULL);
1283 cpu_loop_exit();
1284 /* never comes here */
1285 return 1;
1286}
1287
1288#elif defined (TARGET_MIPS)
1289static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1290 int is_write, sigset_t *old_set,
1291 void *puc)
1292{
1293 TranslationBlock *tb;
1294 int ret;
1295
1296 if (cpu_single_env)
1297 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1298#if defined(DEBUG_SIGNAL)
1299 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1300 pc, address, is_write, *(unsigned long *)old_set);
1301#endif
1302 /* XXX: locking issue */
1303 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1304 return 1;
1305 }
1306
1307 /* see if it is an MMU fault */
1308 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1309 if (ret < 0)
1310 return 0; /* not an MMU fault */
1311 if (ret == 0)
1312 return 1; /* the MMU fault was handled without causing real CPU fault */
1313
1314 /* now we have a real cpu fault */
1315 tb = tb_find_pc(pc);
1316 if (tb) {
1317 /* the PC is inside the translated code. It means that we have
1318 a virtual CPU fault */
1319 cpu_restore_state(tb, env, pc, puc);
1320 }
1321 if (ret == 1) {
1322#if 0
1323 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1324 env->nip, env->error_code, tb);
1325#endif
1326 /* we restore the process signal mask as the sigreturn should
1327 do it (XXX: use sigsetjmp) */
1328 sigprocmask(SIG_SETMASK, old_set, NULL);
1329 do_raise_exception_err(env->exception_index, env->error_code);
1330 } else {
1331 /* activate soft MMU for this block */
1332 cpu_resume_from_signal(env, puc);
1333 }
1334 /* never comes here */
1335 return 1;
1336}
1337
1338#elif defined (TARGET_SH4)
1339static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1340 int is_write, sigset_t *old_set,
1341 void *puc)
1342{
1343 TranslationBlock *tb;
1344 int ret;
1345
1346 if (cpu_single_env)
1347 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1348#if defined(DEBUG_SIGNAL)
1349 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1350 pc, address, is_write, *(unsigned long *)old_set);
1351#endif
1352 /* XXX: locking issue */
1353 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1354 return 1;
1355 }
1356
1357 /* see if it is an MMU fault */
1358 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1359 if (ret < 0)
1360 return 0; /* not an MMU fault */
1361 if (ret == 0)
1362 return 1; /* the MMU fault was handled without causing real CPU fault */
1363
1364 /* now we have a real cpu fault */
1365 tb = tb_find_pc(pc);
1366 if (tb) {
1367 /* the PC is inside the translated code. It means that we have
1368 a virtual CPU fault */
1369 cpu_restore_state(tb, env, pc, puc);
1370 }
1371#if 0
1372 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1373 env->nip, env->error_code, tb);
1374#endif
1375 /* we restore the process signal mask as the sigreturn should
1376 do it (XXX: use sigsetjmp) */
1377 sigprocmask(SIG_SETMASK, old_set, NULL);
1378 cpu_loop_exit();
1379 /* never comes here */
1380 return 1;
1381}
1382#else
1383#error unsupported target CPU
1384#endif
1385
1386#if defined(__i386__)
1387
1388#if defined(__APPLE__)
1389# include <sys/ucontext.h>
1390
1391# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1392# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1393# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1394#else
1395# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1396# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1397# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1398#endif
1399
1400int cpu_signal_handler(int host_signum, void *pinfo,
1401 void *puc)
1402{
1403 siginfo_t *info = pinfo;
1404 struct ucontext *uc = puc;
1405 unsigned long pc;
1406 int trapno;
1407
1408#ifndef REG_EIP
1409/* for glibc 2.1 */
1410#define REG_EIP EIP
1411#define REG_ERR ERR
1412#define REG_TRAPNO TRAPNO
1413#endif
1414 pc = uc->uc_mcontext.gregs[REG_EIP];
1415 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1416#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1417 if (trapno == 0x00 || trapno == 0x05) {
1418 /* send division by zero or bound exception */
1419 cpu_send_trap(pc, trapno, uc);
1420 return 1;
1421 } else
1422#endif
1423 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1424 trapno == 0xe ?
1425 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1426 &uc->uc_sigmask, puc);
1427}
1428
1429#elif defined(__x86_64__)
1430
1431int cpu_signal_handler(int host_signum, void *pinfo,
1432 void *puc)
1433{
1434 siginfo_t *info = pinfo;
1435 struct ucontext *uc = puc;
1436 unsigned long pc;
1437
1438 pc = uc->uc_mcontext.gregs[REG_RIP];
1439 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1440 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1441 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1442 &uc->uc_sigmask, puc);
1443}
1444
1445#elif defined(__powerpc__)
1446
1447/***********************************************************************
1448 * signal context platform-specific definitions
1449 * From Wine
1450 */
1451#ifdef linux
1452/* All Registers access - only for local access */
1453# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1454/* Gpr Registers access */
1455# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1456# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1457# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1458# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1459# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1460# define LR_sig(context) REG_sig(link, context) /* Link register */
1461# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1462/* Float Registers access */
1463# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1464# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1465/* Exception Registers access */
1466# define DAR_sig(context) REG_sig(dar, context)
1467# define DSISR_sig(context) REG_sig(dsisr, context)
1468# define TRAP_sig(context) REG_sig(trap, context)
1469#endif /* linux */
1470
1471#ifdef __APPLE__
1472# include <sys/ucontext.h>
1473typedef struct ucontext SIGCONTEXT;
1474/* All Registers access - only for local access */
1475# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1476# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1477# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1478# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1479/* Gpr Registers access */
1480# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1481# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1482# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1483# define CTR_sig(context) REG_sig(ctr, context)
1484# define XER_sig(context) REG_sig(xer, context) /* Link register */
1485# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1486# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1487/* Float Registers access */
1488# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1489# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1490/* Exception Registers access */
1491# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1492# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1493# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1494#endif /* __APPLE__ */
1495
1496int cpu_signal_handler(int host_signum, void *pinfo,
1497 void *puc)
1498{
1499 siginfo_t *info = pinfo;
1500 struct ucontext *uc = puc;
1501 unsigned long pc;
1502 int is_write;
1503
1504 pc = IAR_sig(uc);
1505 is_write = 0;
1506#if 0
1507 /* ppc 4xx case */
1508 if (DSISR_sig(uc) & 0x00800000)
1509 is_write = 1;
1510#else
1511 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1512 is_write = 1;
1513#endif
1514 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1515 is_write, &uc->uc_sigmask, puc);
1516}
1517
1518#elif defined(__alpha__)
1519
1520int cpu_signal_handler(int host_signum, void *pinfo,
1521 void *puc)
1522{
1523 siginfo_t *info = pinfo;
1524 struct ucontext *uc = puc;
1525 uint32_t *pc = uc->uc_mcontext.sc_pc;
1526 uint32_t insn = *pc;
1527 int is_write = 0;
1528
1529 /* XXX: need kernel patch to get write flag faster */
1530 switch (insn >> 26) {
1531 case 0x0d: // stw
1532 case 0x0e: // stb
1533 case 0x0f: // stq_u
1534 case 0x24: // stf
1535 case 0x25: // stg
1536 case 0x26: // sts
1537 case 0x27: // stt
1538 case 0x2c: // stl
1539 case 0x2d: // stq
1540 case 0x2e: // stl_c
1541 case 0x2f: // stq_c
1542 is_write = 1;
1543 }
1544
1545 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1546 is_write, &uc->uc_sigmask, puc);
1547}
1548#elif defined(__sparc__)
1549
1550int cpu_signal_handler(int host_signum, void *pinfo,
1551 void *puc)
1552{
1553 siginfo_t *info = pinfo;
1554 uint32_t *regs = (uint32_t *)(info + 1);
1555 void *sigmask = (regs + 20);
1556 unsigned long pc;
1557 int is_write;
1558 uint32_t insn;
1559
1560 /* XXX: is there a standard glibc define ? */
1561 pc = regs[1];
1562 /* XXX: need kernel patch to get write flag faster */
1563 is_write = 0;
1564 insn = *(uint32_t *)pc;
1565 if ((insn >> 30) == 3) {
1566 switch((insn >> 19) & 0x3f) {
1567 case 0x05: // stb
1568 case 0x06: // sth
1569 case 0x04: // st
1570 case 0x07: // std
1571 case 0x24: // stf
1572 case 0x27: // stdf
1573 case 0x25: // stfsr
1574 is_write = 1;
1575 break;
1576 }
1577 }
1578 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1579 is_write, sigmask, NULL);
1580}
1581
1582#elif defined(__arm__)
1583
1584int cpu_signal_handler(int host_signum, void *pinfo,
1585 void *puc)
1586{
1587 siginfo_t *info = pinfo;
1588 struct ucontext *uc = puc;
1589 unsigned long pc;
1590 int is_write;
1591
1592 pc = uc->uc_mcontext.gregs[R15];
1593 /* XXX: compute is_write */
1594 is_write = 0;
1595 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1596 is_write,
1597 &uc->uc_sigmask, puc);
1598}
1599
1600#elif defined(__mc68000)
1601
1602int cpu_signal_handler(int host_signum, void *pinfo,
1603 void *puc)
1604{
1605 siginfo_t *info = pinfo;
1606 struct ucontext *uc = puc;
1607 unsigned long pc;
1608 int is_write;
1609
1610 pc = uc->uc_mcontext.gregs[16];
1611 /* XXX: compute is_write */
1612 is_write = 0;
1613 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1614 is_write,
1615 &uc->uc_sigmask, puc);
1616}
1617
1618#elif defined(__ia64)
1619
1620#ifndef __ISR_VALID
1621 /* This ought to be in <bits/siginfo.h>... */
1622# define __ISR_VALID 1
1623#endif
1624
1625int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1626{
1627 siginfo_t *info = pinfo;
1628 struct ucontext *uc = puc;
1629 unsigned long ip;
1630 int is_write = 0;
1631
1632 ip = uc->uc_mcontext.sc_ip;
1633 switch (host_signum) {
1634 case SIGILL:
1635 case SIGFPE:
1636 case SIGSEGV:
1637 case SIGBUS:
1638 case SIGTRAP:
1639 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1640 /* ISR.W (write-access) is bit 33: */
1641 is_write = (info->si_isr >> 33) & 1;
1642 break;
1643
1644 default:
1645 break;
1646 }
1647 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1648 is_write,
1649 &uc->uc_sigmask, puc);
1650}
1651
1652#elif defined(__s390__)
1653
1654int cpu_signal_handler(int host_signum, void *pinfo,
1655 void *puc)
1656{
1657 siginfo_t *info = pinfo;
1658 struct ucontext *uc = puc;
1659 unsigned long pc;
1660 int is_write;
1661
1662 pc = uc->uc_mcontext.psw.addr;
1663 /* XXX: compute is_write */
1664 is_write = 0;
1665 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1666 is_write,
1667 &uc->uc_sigmask, puc);
1668}
1669
1670#else
1671
1672#error host CPU specific signal handler needed
1673
1674#endif
1675
1676#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette