VirtualBox

source: vbox/trunk/src/recompiler_new/cpu-exec.c@ 16894

Last change on this file since 16894 was 16455, checked in by vboxsync, 16 years ago

REM: segment forced sync, cleanups

  • Property svn:eol-style set to native
File size: 60.3 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
112 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
113#else
114 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115#endif
116
117 if ((next_tb & 3) == 2) {
118 /* Restore PC. This may happen if async event occurs before
119 the TB starts executing. */
120 CPU_PC_FROM_TB(env, tb);
121 }
122 tb_phys_invalidate(tb, -1);
123 tb_free(tb);
124}
125
126static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
129{
130 TranslationBlock *tb, **ptb1;
131 unsigned int h;
132 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133
134 tb_invalidated_flag = 0;
135
136 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
137
138 /* find translated block using physical mappings */
139 phys_pc = get_phys_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_phys_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
161 }
162 }
163 ptb1 = &tb->phys_hash_next;
164 }
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168
169 found:
170 /* we add the TB in the virtual pc hash table */
171 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 return tb;
173}
174
175#ifndef VBOX
176static inline TranslationBlock *tb_find_fast(void)
177#else
178DECLINLINE(TranslationBlock *) tb_find_fast(void)
179#endif
180{
181 TranslationBlock *tb;
182 target_ulong cs_base, pc;
183 uint64_t flags;
184
185 /* we record a subset of the CPU state. It will
186 always be the same before a given translated block
187 is executed. */
188#if defined(TARGET_I386)
189 flags = env->hflags;
190 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
191 cs_base = env->segs[R_CS].base;
192 pc = cs_base + env->eip;
193#elif defined(TARGET_ARM)
194 flags = env->thumb | (env->vfp.vec_len << 1)
195 | (env->vfp.vec_stride << 4);
196 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
197 flags |= (1 << 6);
198 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
199 flags |= (1 << 7);
200 flags |= (env->condexec_bits << 8);
201 cs_base = 0;
202 pc = env->regs[15];
203#elif defined(TARGET_SPARC)
204#ifdef TARGET_SPARC64
205 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
206 flags = ((env->pstate & PS_AM) << 2)
207 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
208 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
209#else
210 // FPU enable . Supervisor
211 flags = (env->psref << 4) | env->psrs;
212#endif
213 cs_base = env->npc;
214 pc = env->pc;
215#elif defined(TARGET_PPC)
216 flags = env->hflags;
217 cs_base = 0;
218 pc = env->nip;
219#elif defined(TARGET_MIPS)
220 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
221 cs_base = 0;
222 pc = env->active_tc.PC;
223#elif defined(TARGET_M68K)
224 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
225 | (env->sr & SR_S) /* Bit 13 */
226 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
227 cs_base = 0;
228 pc = env->pc;
229#elif defined(TARGET_SH4)
230 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
231 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
232 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
233 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_ALPHA)
237 flags = env->ps;
238 cs_base = 0;
239 pc = env->pc;
240#elif defined(TARGET_CRIS)
241 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
242 flags |= env->dslot;
243 cs_base = 0;
244 pc = env->pc;
245#else
246#error unsupported CPU
247#endif
248 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
249 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
250 tb->flags != flags)) {
251 tb = tb_find_slow(pc, cs_base, flags);
252 }
253 return tb;
254}
255
256/* main execution loop */
257
258#ifdef VBOX
259
260int cpu_exec(CPUState *env1)
261{
262#define DECLARE_HOST_REGS 1
263#include "hostregs_helper.h"
264 int ret, interrupt_request;
265 TranslationBlock *tb;
266 uint8_t *tc_ptr;
267 unsigned long next_tb;
268
269 cpu_single_env = env1;
270
271 /* first we save global registers */
272#define SAVE_HOST_REGS 1
273#include "hostregs_helper.h"
274 env = env1;
275
276 env_to_regs();
277#if defined(TARGET_I386)
278 /* put eflags in CPU temporary format */
279 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
280 DF = 1 - (2 * ((env->eflags >> 10) & 1));
281 CC_OP = CC_OP_EFLAGS;
282 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
283#elif defined(TARGET_SPARC)
284#elif defined(TARGET_M68K)
285 env->cc_op = CC_OP_FLAGS;
286 env->cc_dest = env->sr & 0xf;
287 env->cc_x = (env->sr >> 4) & 1;
288#elif defined(TARGET_ALPHA)
289#elif defined(TARGET_ARM)
290#elif defined(TARGET_PPC)
291#elif defined(TARGET_MIPS)
292#elif defined(TARGET_SH4)
293#elif defined(TARGET_CRIS)
294 /* XXXXX */
295#else
296#error unsupported target CPU
297#endif
298#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
299 env->exception_index = -1;
300#endif
301
302 /* prepare setjmp context for exception handling */
303 for(;;) {
304 if (setjmp(env->jmp_env) == 0)
305 {
306 env->current_tb = NULL;
307 VMMR3Unlock(env->pVM);
308 VMMR3Lock(env->pVM);
309
310 /*
311 * Check for fatal errors first
312 */
313 if (env->interrupt_request & CPU_INTERRUPT_RC) {
314 env->exception_index = EXCP_RC;
315 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
316 ret = env->exception_index;
317 cpu_loop_exit();
318 }
319
320 /* if an exception is pending, we execute it here */
321 if (env->exception_index >= 0) {
322 Assert(!env->user_mode_only);
323 if (env->exception_index >= EXCP_INTERRUPT) {
324 /* exit request from the cpu execution loop */
325 ret = env->exception_index;
326 break;
327 } else {
328 /* simulate a real cpu exception. On i386, it can
329 trigger new exceptions, but we do not handle
330 double or triple faults yet. */
331 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
332 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
333 do_interrupt(env->exception_index,
334 env->exception_is_int,
335 env->error_code,
336 env->exception_next_eip, 0);
337 /* successfully delivered */
338 env->old_exception = -1;
339 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
340 }
341 env->exception_index = -1;
342 }
343
344 next_tb = 0; /* force lookup of first TB */
345 for(;;)
346 {
347 interrupt_request = env->interrupt_request;
348#ifndef VBOX
349 if (__builtin_expect(interrupt_request, 0))
350#else
351 if (RT_UNLIKELY(interrupt_request != 0))
352#endif
353 {
354 /** @todo: reconscille with what QEMU really does */
355
356 /* Single instruction exec request, we execute it and return (one way or the other).
357 The caller will always reschedule after doing this operation! */
358 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
359 {
360 /* not in flight are we? (if we are, we trapped) */
361 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
362 {
363 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
364 env->exception_index = EXCP_SINGLE_INSTR;
365 if (emulate_single_instr(env) == -1)
366 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", env->eip));
367
368 /* When we receive an external interrupt during execution of this single
369 instruction, then we should stay here. We will leave when we're ready
370 for raw-mode or when interrupted by pending EMT requests. */
371 interrupt_request = env->interrupt_request; /* reload this! */
372 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
373 || !(env->eflags & IF_MASK)
374 || (env->hflags & HF_INHIBIT_IRQ_MASK)
375 || (env->state & CPU_RAW_HWACC)
376 )
377 {
378 env->exception_index = ret = EXCP_SINGLE_INSTR;
379 cpu_loop_exit();
380 }
381 }
382 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
383 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
384 }
385
386 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
387 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
388 !(env->hflags & HF_SMM_MASK)) {
389 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
390 do_smm_enter();
391 next_tb = 0;
392 }
393 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
394 (env->eflags & IF_MASK) &&
395 !(env->hflags & HF_INHIBIT_IRQ_MASK))
396 {
397 /* if hardware interrupt pending, we execute it */
398 int intno;
399 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
400 intno = cpu_get_pic_interrupt(env);
401 if (intno >= 0)
402 {
403 Log(("do_interrupt %d\n", intno));
404 do_interrupt(intno, 0, 0, 0, 1);
405 }
406 /* ensure that no TB jump will be modified as
407 the program flow was changed */
408 next_tb = 0;
409 }
410 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
411 {
412 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
413 /* ensure that no TB jump will be modified as
414 the program flow was changed */
415 next_tb = 0;
416 }
417 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
418 if (interrupt_request & CPU_INTERRUPT_EXIT)
419 {
420 env->exception_index = EXCP_INTERRUPT;
421 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
422 ret = env->exception_index;
423 cpu_loop_exit();
424 }
425 if (interrupt_request & CPU_INTERRUPT_RC)
426 {
427 env->exception_index = EXCP_RC;
428 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
429 ret = env->exception_index;
430 cpu_loop_exit();
431 }
432 }
433
434 /*
435 * Check if we the CPU state allows us to execute the code in raw-mode.
436 */
437 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
438 if (remR3CanExecuteRaw(env,
439 env->eip + env->segs[R_CS].base,
440 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
441 &env->exception_index))
442 {
443 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
444 ret = env->exception_index;
445 cpu_loop_exit();
446 }
447 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
448
449 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
450 spin_lock(&tb_lock);
451 tb = tb_find_fast();
452 /* Note: we do it here to avoid a gcc bug on Mac OS X when
453 doing it in tb_find_slow */
454 if (tb_invalidated_flag) {
455 /* as some TB could have been invalidated because
456 of memory exceptions while generating the code, we
457 must recompute the hash index here */
458 next_tb = 0;
459 tb_invalidated_flag = 0;
460 }
461
462 /* see if we can patch the calling TB. When the TB
463 spans two pages, we cannot safely do a direct
464 jump. */
465 if (next_tb != 0
466 && !(tb->cflags & CF_RAW_MODE)
467 && tb->page_addr[1] == -1)
468 {
469 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
470 }
471 spin_unlock(&tb_lock);
472 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
473
474 env->current_tb = tb;
475 while (env->current_tb) {
476 tc_ptr = tb->tc_ptr;
477 /* execute the generated code */
478 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
479#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
480 tcg_qemu_tb_exec(tc_ptr, next_tb);
481#else
482 next_tb = tcg_qemu_tb_exec(tc_ptr);
483#endif
484 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
485 env->current_tb = NULL;
486 if ((next_tb & 3) == 2) {
487 /* Instruction counter expired. */
488 int insns_left;
489 tb = (TranslationBlock *)(long)(next_tb & ~3);
490 /* Restore PC. */
491 CPU_PC_FROM_TB(env, tb);
492 insns_left = env->icount_decr.u32;
493 if (env->icount_extra && insns_left >= 0) {
494 /* Refill decrementer and continue execution. */
495 env->icount_extra += insns_left;
496 if (env->icount_extra > 0xffff) {
497 insns_left = 0xffff;
498 } else {
499 insns_left = env->icount_extra;
500 }
501 env->icount_extra -= insns_left;
502 env->icount_decr.u16.low = insns_left;
503 } else {
504 if (insns_left > 0) {
505 /* Execute remaining instructions. */
506 cpu_exec_nocache(insns_left, tb);
507 }
508 env->exception_index = EXCP_INTERRUPT;
509 next_tb = 0;
510 cpu_loop_exit();
511 }
512 }
513 }
514
515 /* reset soft MMU for next block (it can currently
516 only be set by a memory fault) */
517#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
518 if (env->hflags & HF_SOFTMMU_MASK) {
519 env->hflags &= ~HF_SOFTMMU_MASK;
520 /* do not allow linking to another block */
521 next_tb = 0;
522 }
523#endif
524 } /* for(;;) */
525 } else {
526 env_to_regs();
527 }
528#ifdef VBOX_HIGH_RES_TIMERS_HACK
529 /* NULL the current_tb here so cpu_interrupt() doesn't do
530 anything unnecessary (like crashing during emulate single instruction). */
531 env->current_tb = NULL;
532 TMTimerPoll(env1->pVM);
533#endif
534 } /* for(;;) */
535
536#if defined(TARGET_I386)
537 /* restore flags in standard format */
538 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
539#else
540#error unsupported target CPU
541#endif
542#include "hostregs_helper.h"
543 return ret;
544}
545
546#else /* !VBOX */
547int cpu_exec(CPUState *env1)
548{
549#define DECLARE_HOST_REGS 1
550#include "hostregs_helper.h"
551 int ret, interrupt_request;
552 TranslationBlock *tb;
553 uint8_t *tc_ptr;
554 unsigned long next_tb;
555
556 if (cpu_halted(env1) == EXCP_HALTED)
557 return EXCP_HALTED;
558
559 cpu_single_env = env1;
560
561 /* first we save global registers */
562#define SAVE_HOST_REGS 1
563#include "hostregs_helper.h"
564 env = env1;
565
566 env_to_regs();
567#if defined(TARGET_I386)
568 /* put eflags in CPU temporary format */
569 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
570 DF = 1 - (2 * ((env->eflags >> 10) & 1));
571 CC_OP = CC_OP_EFLAGS;
572 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
573#elif defined(TARGET_SPARC)
574#elif defined(TARGET_M68K)
575 env->cc_op = CC_OP_FLAGS;
576 env->cc_dest = env->sr & 0xf;
577 env->cc_x = (env->sr >> 4) & 1;
578#elif defined(TARGET_ALPHA)
579#elif defined(TARGET_ARM)
580#elif defined(TARGET_PPC)
581#elif defined(TARGET_MIPS)
582#elif defined(TARGET_SH4)
583#elif defined(TARGET_CRIS)
584 /* XXXXX */
585#else
586#error unsupported target CPU
587#endif
588 env->exception_index = -1;
589
590 /* prepare setjmp context for exception handling */
591 for(;;) {
592 if (setjmp(env->jmp_env) == 0) {
593 env->current_tb = NULL;
594 /* if an exception is pending, we execute it here */
595 if (env->exception_index >= 0) {
596 if (env->exception_index >= EXCP_INTERRUPT) {
597 /* exit request from the cpu execution loop */
598 ret = env->exception_index;
599 break;
600 } else if (env->user_mode_only) {
601 /* if user mode only, we simulate a fake exception
602 which will be handled outside the cpu execution
603 loop */
604#if defined(TARGET_I386)
605 do_interrupt_user(env->exception_index,
606 env->exception_is_int,
607 env->error_code,
608 env->exception_next_eip);
609 /* successfully delivered */
610 env->old_exception = -1;
611#endif
612 ret = env->exception_index;
613 break;
614 } else {
615#if defined(TARGET_I386)
616 /* simulate a real cpu exception. On i386, it can
617 trigger new exceptions, but we do not handle
618 double or triple faults yet. */
619 do_interrupt(env->exception_index,
620 env->exception_is_int,
621 env->error_code,
622 env->exception_next_eip, 0);
623 /* successfully delivered */
624 env->old_exception = -1;
625#elif defined(TARGET_PPC)
626 do_interrupt(env);
627#elif defined(TARGET_MIPS)
628 do_interrupt(env);
629#elif defined(TARGET_SPARC)
630 do_interrupt(env);
631#elif defined(TARGET_ARM)
632 do_interrupt(env);
633#elif defined(TARGET_SH4)
634 do_interrupt(env);
635#elif defined(TARGET_ALPHA)
636 do_interrupt(env);
637#elif defined(TARGET_CRIS)
638 do_interrupt(env);
639#elif defined(TARGET_M68K)
640 do_interrupt(0);
641#endif
642 }
643 env->exception_index = -1;
644 }
645#ifdef USE_KQEMU
646 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
647 int ret;
648 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
649 ret = kqemu_cpu_exec(env);
650 /* put eflags in CPU temporary format */
651 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
652 DF = 1 - (2 * ((env->eflags >> 10) & 1));
653 CC_OP = CC_OP_EFLAGS;
654 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
655 if (ret == 1) {
656 /* exception */
657 longjmp(env->jmp_env, 1);
658 } else if (ret == 2) {
659 /* softmmu execution needed */
660 } else {
661 if (env->interrupt_request != 0) {
662 /* hardware interrupt will be executed just after */
663 } else {
664 /* otherwise, we restart */
665 longjmp(env->jmp_env, 1);
666 }
667 }
668 }
669#endif
670
671 next_tb = 0; /* force lookup of first TB */
672 for(;;) {
673 interrupt_request = env->interrupt_request;
674 if (unlikely(interrupt_request) &&
675 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
676 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
677 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
678 env->exception_index = EXCP_DEBUG;
679 cpu_loop_exit();
680 }
681#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
682 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
683 if (interrupt_request & CPU_INTERRUPT_HALT) {
684 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
685 env->halted = 1;
686 env->exception_index = EXCP_HLT;
687 cpu_loop_exit();
688 }
689#endif
690#if defined(TARGET_I386)
691 if (env->hflags2 & HF2_GIF_MASK) {
692 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
693 !(env->hflags & HF_SMM_MASK)) {
694 svm_check_intercept(SVM_EXIT_SMI);
695 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
696 do_smm_enter();
697 next_tb = 0;
698 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
699 !(env->hflags2 & HF2_NMI_MASK)) {
700 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
701 env->hflags2 |= HF2_NMI_MASK;
702 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
703 next_tb = 0;
704 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
705 (((env->hflags2 & HF2_VINTR_MASK) &&
706 (env->hflags2 & HF2_HIF_MASK)) ||
707 (!(env->hflags2 & HF2_VINTR_MASK) &&
708 (env->eflags & IF_MASK &&
709 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
710 int intno;
711 svm_check_intercept(SVM_EXIT_INTR);
712 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
713 intno = cpu_get_pic_interrupt(env);
714 if (loglevel & CPU_LOG_TB_IN_ASM) {
715 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
716 }
717 do_interrupt(intno, 0, 0, 0, 1);
718 /* ensure that no TB jump will be modified as
719 the program flow was changed */
720 next_tb = 0;
721#if !defined(CONFIG_USER_ONLY)
722 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
723 (env->eflags & IF_MASK) &&
724 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
725 int intno;
726 /* FIXME: this should respect TPR */
727 svm_check_intercept(SVM_EXIT_VINTR);
728 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
729 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
730 if (loglevel & CPU_LOG_TB_IN_ASM)
731 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
732 do_interrupt(intno, 0, 0, 0, 1);
733 next_tb = 0;
734#endif
735 }
736 }
737#elif defined(TARGET_PPC)
738#if 0
739 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
740 cpu_ppc_reset(env);
741 }
742#endif
743 if (interrupt_request & CPU_INTERRUPT_HARD) {
744 ppc_hw_interrupt(env);
745 if (env->pending_interrupts == 0)
746 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
747 next_tb = 0;
748 }
749#elif defined(TARGET_MIPS)
750 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
751 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
752 (env->CP0_Status & (1 << CP0St_IE)) &&
753 !(env->CP0_Status & (1 << CP0St_EXL)) &&
754 !(env->CP0_Status & (1 << CP0St_ERL)) &&
755 !(env->hflags & MIPS_HFLAG_DM)) {
756 /* Raise it */
757 env->exception_index = EXCP_EXT_INTERRUPT;
758 env->error_code = 0;
759 do_interrupt(env);
760 next_tb = 0;
761 }
762#elif defined(TARGET_SPARC)
763 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
764 (env->psret != 0)) {
765 int pil = env->interrupt_index & 15;
766 int type = env->interrupt_index & 0xf0;
767
768 if (((type == TT_EXTINT) &&
769 (pil == 15 || pil > env->psrpil)) ||
770 type != TT_EXTINT) {
771 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
772 env->exception_index = env->interrupt_index;
773 do_interrupt(env);
774 env->interrupt_index = 0;
775#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
776 cpu_check_irqs(env);
777#endif
778 next_tb = 0;
779 }
780 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
781 //do_interrupt(0, 0, 0, 0, 0);
782 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
783 }
784#elif defined(TARGET_ARM)
785 if (interrupt_request & CPU_INTERRUPT_FIQ
786 && !(env->uncached_cpsr & CPSR_F)) {
787 env->exception_index = EXCP_FIQ;
788 do_interrupt(env);
789 next_tb = 0;
790 }
791 /* ARMv7-M interrupt return works by loading a magic value
792 into the PC. On real hardware the load causes the
793 return to occur. The qemu implementation performs the
794 jump normally, then does the exception return when the
795 CPU tries to execute code at the magic address.
796 This will cause the magic PC value to be pushed to
797 the stack if an interrupt occured at the wrong time.
798 We avoid this by disabling interrupts when
799 pc contains a magic address. */
800 if (interrupt_request & CPU_INTERRUPT_HARD
801 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
802 || !(env->uncached_cpsr & CPSR_I))) {
803 env->exception_index = EXCP_IRQ;
804 do_interrupt(env);
805 next_tb = 0;
806 }
807#elif defined(TARGET_SH4)
808 if (interrupt_request & CPU_INTERRUPT_HARD) {
809 do_interrupt(env);
810 next_tb = 0;
811 }
812#elif defined(TARGET_ALPHA)
813 if (interrupt_request & CPU_INTERRUPT_HARD) {
814 do_interrupt(env);
815 next_tb = 0;
816 }
817#elif defined(TARGET_CRIS)
818 if (interrupt_request & CPU_INTERRUPT_HARD
819 && (env->pregs[PR_CCS] & I_FLAG)) {
820 env->exception_index = EXCP_IRQ;
821 do_interrupt(env);
822 next_tb = 0;
823 }
824 if (interrupt_request & CPU_INTERRUPT_NMI
825 && (env->pregs[PR_CCS] & M_FLAG)) {
826 env->exception_index = EXCP_NMI;
827 do_interrupt(env);
828 next_tb = 0;
829 }
830#elif defined(TARGET_M68K)
831 if (interrupt_request & CPU_INTERRUPT_HARD
832 && ((env->sr & SR_I) >> SR_I_SHIFT)
833 < env->pending_level) {
834 /* Real hardware gets the interrupt vector via an
835 IACK cycle at this point. Current emulated
836 hardware doesn't rely on this, so we
837 provide/save the vector when the interrupt is
838 first signalled. */
839 env->exception_index = env->pending_vector;
840 do_interrupt(1);
841 next_tb = 0;
842 }
843#endif
844 /* Don't use the cached interupt_request value,
845 do_interrupt may have updated the EXITTB flag. */
846 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
847 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
848 /* ensure that no TB jump will be modified as
849 the program flow was changed */
850 next_tb = 0;
851 }
852 if (interrupt_request & CPU_INTERRUPT_EXIT) {
853 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
854 env->exception_index = EXCP_INTERRUPT;
855 cpu_loop_exit();
856 }
857 }
858#ifdef DEBUG_EXEC
859 if ((loglevel & CPU_LOG_TB_CPU)) {
860 /* restore flags in standard format */
861 regs_to_env();
862#if defined(TARGET_I386)
863 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
864 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
865 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
866#elif defined(TARGET_ARM)
867 cpu_dump_state(env, logfile, fprintf, 0);
868#elif defined(TARGET_SPARC)
869 cpu_dump_state(env, logfile, fprintf, 0);
870#elif defined(TARGET_PPC)
871 cpu_dump_state(env, logfile, fprintf, 0);
872#elif defined(TARGET_M68K)
873 cpu_m68k_flush_flags(env, env->cc_op);
874 env->cc_op = CC_OP_FLAGS;
875 env->sr = (env->sr & 0xffe0)
876 | env->cc_dest | (env->cc_x << 4);
877 cpu_dump_state(env, logfile, fprintf, 0);
878#elif defined(TARGET_MIPS)
879 cpu_dump_state(env, logfile, fprintf, 0);
880#elif defined(TARGET_SH4)
881 cpu_dump_state(env, logfile, fprintf, 0);
882#elif defined(TARGET_ALPHA)
883 cpu_dump_state(env, logfile, fprintf, 0);
884#elif defined(TARGET_CRIS)
885 cpu_dump_state(env, logfile, fprintf, 0);
886#else
887#error unsupported target CPU
888#endif
889 }
890#endif
891 spin_lock(&tb_lock);
892 tb = tb_find_fast();
893 /* Note: we do it here to avoid a gcc bug on Mac OS X when
894 doing it in tb_find_slow */
895 if (tb_invalidated_flag) {
896 /* as some TB could have been invalidated because
897 of memory exceptions while generating the code, we
898 must recompute the hash index here */
899 next_tb = 0;
900 tb_invalidated_flag = 0;
901 }
902#ifdef DEBUG_EXEC
903 if ((loglevel & CPU_LOG_EXEC)) {
904 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
905 (long)tb->tc_ptr, tb->pc,
906 lookup_symbol(tb->pc));
907 }
908#endif
909 /* see if we can patch the calling TB. When the TB
910 spans two pages, we cannot safely do a direct
911 jump. */
912 {
913 if (next_tb != 0 &&
914#ifdef USE_KQEMU
915 (env->kqemu_enabled != 2) &&
916#endif
917 tb->page_addr[1] == -1) {
918 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
919 }
920 }
921 spin_unlock(&tb_lock);
922 env->current_tb = tb;
923 while (env->current_tb) {
924 tc_ptr = tb->tc_ptr;
925 /* execute the generated code */
926#if defined(__sparc__) && !defined(HOST_SOLARIS)
927#undef env
928 env = cpu_single_env;
929#define env cpu_single_env
930#endif
931 next_tb = tcg_qemu_tb_exec(tc_ptr);
932 env->current_tb = NULL;
933 if ((next_tb & 3) == 2) {
934 /* Instruction counter expired. */
935 int insns_left;
936 tb = (TranslationBlock *)(long)(next_tb & ~3);
937 /* Restore PC. */
938 CPU_PC_FROM_TB(env, tb);
939 insns_left = env->icount_decr.u32;
940 if (env->icount_extra && insns_left >= 0) {
941 /* Refill decrementer and continue execution. */
942 env->icount_extra += insns_left;
943 if (env->icount_extra > 0xffff) {
944 insns_left = 0xffff;
945 } else {
946 insns_left = env->icount_extra;
947 }
948 env->icount_extra -= insns_left;
949 env->icount_decr.u16.low = insns_left;
950 } else {
951 if (insns_left > 0) {
952 /* Execute remaining instructions. */
953 cpu_exec_nocache(insns_left, tb);
954 }
955 env->exception_index = EXCP_INTERRUPT;
956 next_tb = 0;
957 cpu_loop_exit();
958 }
959 }
960 }
961 /* reset soft MMU for next block (it can currently
962 only be set by a memory fault) */
963#if defined(USE_KQEMU)
964#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
965 if (kqemu_is_ok(env) &&
966 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
967 cpu_loop_exit();
968 }
969#endif
970 } /* for(;;) */
971 } else {
972 env_to_regs();
973 }
974 } /* for(;;) */
975
976
977#if defined(TARGET_I386)
978 /* restore flags in standard format */
979 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
980#elif defined(TARGET_ARM)
981 /* XXX: Save/restore host fpu exception state?. */
982#elif defined(TARGET_SPARC)
983#elif defined(TARGET_PPC)
984#elif defined(TARGET_M68K)
985 cpu_m68k_flush_flags(env, env->cc_op);
986 env->cc_op = CC_OP_FLAGS;
987 env->sr = (env->sr & 0xffe0)
988 | env->cc_dest | (env->cc_x << 4);
989#elif defined(TARGET_MIPS)
990#elif defined(TARGET_SH4)
991#elif defined(TARGET_ALPHA)
992#elif defined(TARGET_CRIS)
993 /* XXXXX */
994#else
995#error unsupported target CPU
996#endif
997
998 /* restore global registers */
999#include "hostregs_helper.h"
1000
1001 /* fail safe : never use cpu_single_env outside cpu_exec() */
1002 cpu_single_env = NULL;
1003 return ret;
1004}
1005#endif /* !VBOX */
1006
1007/* must only be called from the generated code as an exception can be
1008 generated */
1009void tb_invalidate_page_range(target_ulong start, target_ulong end)
1010{
1011 /* XXX: cannot enable it yet because it yields to MMU exception
1012 where NIP != read address on PowerPC */
1013#if 0
1014 target_ulong phys_addr;
1015 phys_addr = get_phys_addr_code(env, start);
1016 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1017#endif
1018}
1019
1020#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1021
1022void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1023{
1024 CPUX86State *saved_env;
1025
1026 saved_env = env;
1027 env = s;
1028 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1029 selector &= 0xffff;
1030 cpu_x86_load_seg_cache(env, seg_reg, selector,
1031 (selector << 4), 0xffff, 0);
1032 } else {
1033 load_seg(seg_reg, selector);
1034 }
1035 env = saved_env;
1036}
1037
1038void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1039{
1040 CPUX86State *saved_env;
1041
1042 saved_env = env;
1043 env = s;
1044
1045 helper_fsave((target_ulong)ptr, data32);
1046
1047 env = saved_env;
1048}
1049
1050void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1051{
1052 CPUX86State *saved_env;
1053
1054 saved_env = env;
1055 env = s;
1056
1057 helper_frstor((target_ulong)ptr, data32);
1058
1059 env = saved_env;
1060}
1061
1062#endif /* TARGET_I386 */
1063
1064#if !defined(CONFIG_SOFTMMU)
1065
1066#if defined(TARGET_I386)
1067
1068/* 'pc' is the host PC at which the exception was raised. 'address' is
1069 the effective address of the memory exception. 'is_write' is 1 if a
1070 write caused the exception and otherwise 0'. 'old_set' is the
1071 signal set which should be restored */
1072static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1073 int is_write, sigset_t *old_set,
1074 void *puc)
1075{
1076 TranslationBlock *tb;
1077 int ret;
1078
1079 if (cpu_single_env)
1080 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1081#if defined(DEBUG_SIGNAL)
1082 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1083 pc, address, is_write, *(unsigned long *)old_set);
1084#endif
1085 /* XXX: locking issue */
1086 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1087 return 1;
1088 }
1089
1090 /* see if it is an MMU fault */
1091 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1092 ((env->hflags & HF_CPL_MASK) == 3), 0);
1093 if (ret < 0)
1094 return 0; /* not an MMU fault */
1095 if (ret == 0)
1096 return 1; /* the MMU fault was handled without causing real CPU fault */
1097 /* now we have a real cpu fault */
1098 tb = tb_find_pc(pc);
1099 if (tb) {
1100 /* the PC is inside the translated code. It means that we have
1101 a virtual CPU fault */
1102 cpu_restore_state(tb, env, pc, puc);
1103 }
1104 if (ret == 1) {
1105#if 0
1106 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1107 env->eip, env->cr[2], env->error_code);
1108#endif
1109 /* we restore the process signal mask as the sigreturn should
1110 do it (XXX: use sigsetjmp) */
1111 sigprocmask(SIG_SETMASK, old_set, NULL);
1112 raise_exception_err(env->exception_index, env->error_code);
1113 } else {
1114 /* activate soft MMU for this block */
1115 env->hflags |= HF_SOFTMMU_MASK;
1116 cpu_resume_from_signal(env, puc);
1117 }
1118 /* never comes here */
1119 return 1;
1120}
1121
1122#elif defined(TARGET_ARM)
1123static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1124 int is_write, sigset_t *old_set,
1125 void *puc)
1126{
1127 TranslationBlock *tb;
1128 int ret;
1129
1130 if (cpu_single_env)
1131 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1132#if defined(DEBUG_SIGNAL)
1133 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1134 pc, address, is_write, *(unsigned long *)old_set);
1135#endif
1136 /* XXX: locking issue */
1137 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1138 return 1;
1139 }
1140 /* see if it is an MMU fault */
1141 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1142 if (ret < 0)
1143 return 0; /* not an MMU fault */
1144 if (ret == 0)
1145 return 1; /* the MMU fault was handled without causing real CPU fault */
1146 /* now we have a real cpu fault */
1147 tb = tb_find_pc(pc);
1148 if (tb) {
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb, env, pc, puc);
1152 }
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK, old_set, NULL);
1156 cpu_loop_exit();
1157}
1158#elif defined(TARGET_SPARC)
1159static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1160 int is_write, sigset_t *old_set,
1161 void *puc)
1162{
1163 TranslationBlock *tb;
1164 int ret;
1165
1166 if (cpu_single_env)
1167 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1168#if defined(DEBUG_SIGNAL)
1169 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1170 pc, address, is_write, *(unsigned long *)old_set);
1171#endif
1172 /* XXX: locking issue */
1173 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1174 return 1;
1175 }
1176 /* see if it is an MMU fault */
1177 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1178 if (ret < 0)
1179 return 0; /* not an MMU fault */
1180 if (ret == 0)
1181 return 1; /* the MMU fault was handled without causing real CPU fault */
1182 /* now we have a real cpu fault */
1183 tb = tb_find_pc(pc);
1184 if (tb) {
1185 /* the PC is inside the translated code. It means that we have
1186 a virtual CPU fault */
1187 cpu_restore_state(tb, env, pc, puc);
1188 }
1189 /* we restore the process signal mask as the sigreturn should
1190 do it (XXX: use sigsetjmp) */
1191 sigprocmask(SIG_SETMASK, old_set, NULL);
1192 cpu_loop_exit();
1193}
1194#elif defined (TARGET_PPC)
1195static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1196 int is_write, sigset_t *old_set,
1197 void *puc)
1198{
1199 TranslationBlock *tb;
1200 int ret;
1201
1202 if (cpu_single_env)
1203 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1204#if defined(DEBUG_SIGNAL)
1205 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1206 pc, address, is_write, *(unsigned long *)old_set);
1207#endif
1208 /* XXX: locking issue */
1209 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1210 return 1;
1211 }
1212
1213 /* see if it is an MMU fault */
1214 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1215 if (ret < 0)
1216 return 0; /* not an MMU fault */
1217 if (ret == 0)
1218 return 1; /* the MMU fault was handled without causing real CPU fault */
1219
1220 /* now we have a real cpu fault */
1221 tb = tb_find_pc(pc);
1222 if (tb) {
1223 /* the PC is inside the translated code. It means that we have
1224 a virtual CPU fault */
1225 cpu_restore_state(tb, env, pc, puc);
1226 }
1227 if (ret == 1) {
1228#if 0
1229 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1230 env->nip, env->error_code, tb);
1231#endif
1232 /* we restore the process signal mask as the sigreturn should
1233 do it (XXX: use sigsetjmp) */
1234 sigprocmask(SIG_SETMASK, old_set, NULL);
1235 do_raise_exception_err(env->exception_index, env->error_code);
1236 } else {
1237 /* activate soft MMU for this block */
1238 cpu_resume_from_signal(env, puc);
1239 }
1240 /* never comes here */
1241 return 1;
1242}
1243
1244#elif defined(TARGET_M68K)
1245static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1246 int is_write, sigset_t *old_set,
1247 void *puc)
1248{
1249 TranslationBlock *tb;
1250 int ret;
1251
1252 if (cpu_single_env)
1253 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1254#if defined(DEBUG_SIGNAL)
1255 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1256 pc, address, is_write, *(unsigned long *)old_set);
1257#endif
1258 /* XXX: locking issue */
1259 if (is_write && page_unprotect(address, pc, puc)) {
1260 return 1;
1261 }
1262 /* see if it is an MMU fault */
1263 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1264 if (ret < 0)
1265 return 0; /* not an MMU fault */
1266 if (ret == 0)
1267 return 1; /* the MMU fault was handled without causing real CPU fault */
1268 /* now we have a real cpu fault */
1269 tb = tb_find_pc(pc);
1270 if (tb) {
1271 /* the PC is inside the translated code. It means that we have
1272 a virtual CPU fault */
1273 cpu_restore_state(tb, env, pc, puc);
1274 }
1275 /* we restore the process signal mask as the sigreturn should
1276 do it (XXX: use sigsetjmp) */
1277 sigprocmask(SIG_SETMASK, old_set, NULL);
1278 cpu_loop_exit();
1279 /* never comes here */
1280 return 1;
1281}
1282
1283#elif defined (TARGET_MIPS)
1284static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1285 int is_write, sigset_t *old_set,
1286 void *puc)
1287{
1288 TranslationBlock *tb;
1289 int ret;
1290
1291 if (cpu_single_env)
1292 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1293#if defined(DEBUG_SIGNAL)
1294 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1295 pc, address, is_write, *(unsigned long *)old_set);
1296#endif
1297 /* XXX: locking issue */
1298 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1299 return 1;
1300 }
1301
1302 /* see if it is an MMU fault */
1303 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1304 if (ret < 0)
1305 return 0; /* not an MMU fault */
1306 if (ret == 0)
1307 return 1; /* the MMU fault was handled without causing real CPU fault */
1308
1309 /* now we have a real cpu fault */
1310 tb = tb_find_pc(pc);
1311 if (tb) {
1312 /* the PC is inside the translated code. It means that we have
1313 a virtual CPU fault */
1314 cpu_restore_state(tb, env, pc, puc);
1315 }
1316 if (ret == 1) {
1317#if 0
1318 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1319 env->nip, env->error_code, tb);
1320#endif
1321 /* we restore the process signal mask as the sigreturn should
1322 do it (XXX: use sigsetjmp) */
1323 sigprocmask(SIG_SETMASK, old_set, NULL);
1324 do_raise_exception_err(env->exception_index, env->error_code);
1325 } else {
1326 /* activate soft MMU for this block */
1327 cpu_resume_from_signal(env, puc);
1328 }
1329 /* never comes here */
1330 return 1;
1331}
1332
1333#elif defined (TARGET_SH4)
1334static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1335 int is_write, sigset_t *old_set,
1336 void *puc)
1337{
1338 TranslationBlock *tb;
1339 int ret;
1340
1341 if (cpu_single_env)
1342 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1343#if defined(DEBUG_SIGNAL)
1344 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1345 pc, address, is_write, *(unsigned long *)old_set);
1346#endif
1347 /* XXX: locking issue */
1348 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1349 return 1;
1350 }
1351
1352 /* see if it is an MMU fault */
1353 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1354 if (ret < 0)
1355 return 0; /* not an MMU fault */
1356 if (ret == 0)
1357 return 1; /* the MMU fault was handled without causing real CPU fault */
1358
1359 /* now we have a real cpu fault */
1360 tb = tb_find_pc(pc);
1361 if (tb) {
1362 /* the PC is inside the translated code. It means that we have
1363 a virtual CPU fault */
1364 cpu_restore_state(tb, env, pc, puc);
1365 }
1366#if 0
1367 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1368 env->nip, env->error_code, tb);
1369#endif
1370 /* we restore the process signal mask as the sigreturn should
1371 do it (XXX: use sigsetjmp) */
1372 sigprocmask(SIG_SETMASK, old_set, NULL);
1373 cpu_loop_exit();
1374 /* never comes here */
1375 return 1;
1376}
1377#else
1378#error unsupported target CPU
1379#endif
1380
1381#if defined(__i386__)
1382
1383#if defined(__APPLE__)
1384# include <sys/ucontext.h>
1385
1386# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1387# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1388# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1389#else
1390# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1391# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1392# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1393#endif
1394
1395int cpu_signal_handler(int host_signum, void *pinfo,
1396 void *puc)
1397{
1398 siginfo_t *info = pinfo;
1399 struct ucontext *uc = puc;
1400 unsigned long pc;
1401 int trapno;
1402
1403#ifndef REG_EIP
1404/* for glibc 2.1 */
1405#define REG_EIP EIP
1406#define REG_ERR ERR
1407#define REG_TRAPNO TRAPNO
1408#endif
1409 pc = uc->uc_mcontext.gregs[REG_EIP];
1410 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1411#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1412 if (trapno == 0x00 || trapno == 0x05) {
1413 /* send division by zero or bound exception */
1414 cpu_send_trap(pc, trapno, uc);
1415 return 1;
1416 } else
1417#endif
1418 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1419 trapno == 0xe ?
1420 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1421 &uc->uc_sigmask, puc);
1422}
1423
1424#elif defined(__x86_64__)
1425
1426int cpu_signal_handler(int host_signum, void *pinfo,
1427 void *puc)
1428{
1429 siginfo_t *info = pinfo;
1430 struct ucontext *uc = puc;
1431 unsigned long pc;
1432
1433 pc = uc->uc_mcontext.gregs[REG_RIP];
1434 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1435 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1436 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1437 &uc->uc_sigmask, puc);
1438}
1439
1440#elif defined(__powerpc__)
1441
1442/***********************************************************************
1443 * signal context platform-specific definitions
1444 * From Wine
1445 */
1446#ifdef linux
1447/* All Registers access - only for local access */
1448# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1449/* Gpr Registers access */
1450# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1451# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1452# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1453# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1454# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1455# define LR_sig(context) REG_sig(link, context) /* Link register */
1456# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1457/* Float Registers access */
1458# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1459# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1460/* Exception Registers access */
1461# define DAR_sig(context) REG_sig(dar, context)
1462# define DSISR_sig(context) REG_sig(dsisr, context)
1463# define TRAP_sig(context) REG_sig(trap, context)
1464#endif /* linux */
1465
1466#ifdef __APPLE__
1467# include <sys/ucontext.h>
1468typedef struct ucontext SIGCONTEXT;
1469/* All Registers access - only for local access */
1470# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1471# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1472# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1473# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1474/* Gpr Registers access */
1475# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1476# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1477# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1478# define CTR_sig(context) REG_sig(ctr, context)
1479# define XER_sig(context) REG_sig(xer, context) /* Link register */
1480# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1481# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1482/* Float Registers access */
1483# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1484# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1485/* Exception Registers access */
1486# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1487# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1488# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1489#endif /* __APPLE__ */
1490
1491int cpu_signal_handler(int host_signum, void *pinfo,
1492 void *puc)
1493{
1494 siginfo_t *info = pinfo;
1495 struct ucontext *uc = puc;
1496 unsigned long pc;
1497 int is_write;
1498
1499 pc = IAR_sig(uc);
1500 is_write = 0;
1501#if 0
1502 /* ppc 4xx case */
1503 if (DSISR_sig(uc) & 0x00800000)
1504 is_write = 1;
1505#else
1506 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1507 is_write = 1;
1508#endif
1509 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1510 is_write, &uc->uc_sigmask, puc);
1511}
1512
1513#elif defined(__alpha__)
1514
1515int cpu_signal_handler(int host_signum, void *pinfo,
1516 void *puc)
1517{
1518 siginfo_t *info = pinfo;
1519 struct ucontext *uc = puc;
1520 uint32_t *pc = uc->uc_mcontext.sc_pc;
1521 uint32_t insn = *pc;
1522 int is_write = 0;
1523
1524 /* XXX: need kernel patch to get write flag faster */
1525 switch (insn >> 26) {
1526 case 0x0d: // stw
1527 case 0x0e: // stb
1528 case 0x0f: // stq_u
1529 case 0x24: // stf
1530 case 0x25: // stg
1531 case 0x26: // sts
1532 case 0x27: // stt
1533 case 0x2c: // stl
1534 case 0x2d: // stq
1535 case 0x2e: // stl_c
1536 case 0x2f: // stq_c
1537 is_write = 1;
1538 }
1539
1540 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1541 is_write, &uc->uc_sigmask, puc);
1542}
1543#elif defined(__sparc__)
1544
1545int cpu_signal_handler(int host_signum, void *pinfo,
1546 void *puc)
1547{
1548 siginfo_t *info = pinfo;
1549 uint32_t *regs = (uint32_t *)(info + 1);
1550 void *sigmask = (regs + 20);
1551 unsigned long pc;
1552 int is_write;
1553 uint32_t insn;
1554
1555 /* XXX: is there a standard glibc define ? */
1556 pc = regs[1];
1557 /* XXX: need kernel patch to get write flag faster */
1558 is_write = 0;
1559 insn = *(uint32_t *)pc;
1560 if ((insn >> 30) == 3) {
1561 switch((insn >> 19) & 0x3f) {
1562 case 0x05: // stb
1563 case 0x06: // sth
1564 case 0x04: // st
1565 case 0x07: // std
1566 case 0x24: // stf
1567 case 0x27: // stdf
1568 case 0x25: // stfsr
1569 is_write = 1;
1570 break;
1571 }
1572 }
1573 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1574 is_write, sigmask, NULL);
1575}
1576
1577#elif defined(__arm__)
1578
1579int cpu_signal_handler(int host_signum, void *pinfo,
1580 void *puc)
1581{
1582 siginfo_t *info = pinfo;
1583 struct ucontext *uc = puc;
1584 unsigned long pc;
1585 int is_write;
1586
1587 pc = uc->uc_mcontext.gregs[R15];
1588 /* XXX: compute is_write */
1589 is_write = 0;
1590 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1591 is_write,
1592 &uc->uc_sigmask, puc);
1593}
1594
1595#elif defined(__mc68000)
1596
1597int cpu_signal_handler(int host_signum, void *pinfo,
1598 void *puc)
1599{
1600 siginfo_t *info = pinfo;
1601 struct ucontext *uc = puc;
1602 unsigned long pc;
1603 int is_write;
1604
1605 pc = uc->uc_mcontext.gregs[16];
1606 /* XXX: compute is_write */
1607 is_write = 0;
1608 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1609 is_write,
1610 &uc->uc_sigmask, puc);
1611}
1612
1613#elif defined(__ia64)
1614
1615#ifndef __ISR_VALID
1616 /* This ought to be in <bits/siginfo.h>... */
1617# define __ISR_VALID 1
1618#endif
1619
1620int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1621{
1622 siginfo_t *info = pinfo;
1623 struct ucontext *uc = puc;
1624 unsigned long ip;
1625 int is_write = 0;
1626
1627 ip = uc->uc_mcontext.sc_ip;
1628 switch (host_signum) {
1629 case SIGILL:
1630 case SIGFPE:
1631 case SIGSEGV:
1632 case SIGBUS:
1633 case SIGTRAP:
1634 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1635 /* ISR.W (write-access) is bit 33: */
1636 is_write = (info->si_isr >> 33) & 1;
1637 break;
1638
1639 default:
1640 break;
1641 }
1642 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1643 is_write,
1644 &uc->uc_sigmask, puc);
1645}
1646
1647#elif defined(__s390__)
1648
1649int cpu_signal_handler(int host_signum, void *pinfo,
1650 void *puc)
1651{
1652 siginfo_t *info = pinfo;
1653 struct ucontext *uc = puc;
1654 unsigned long pc;
1655 int is_write;
1656
1657 pc = uc->uc_mcontext.psw.addr;
1658 /* XXX: compute is_write */
1659 is_write = 0;
1660 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1661 is_write,
1662 &uc->uc_sigmask, puc);
1663}
1664
1665#else
1666
1667#error host CPU specific signal handler needed
1668
1669#endif
1670
1671#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette