VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 36314

Last change on this file since 36314 was 36175, checked in by vboxsync, 14 years ago

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

  • Property svn:eol-style set to native
File size: 69.4 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#ifdef __linux__
47#include <sys/ucontext.h>
48#endif
49#endif
50
51#if defined(__sparc__) && !defined(HOST_SOLARIS)
52// Work around ugly bugs in glibc that mangle global register contents
53#undef env
54#define env cpu_single_env
55#endif
56
57int tb_invalidated_flag;
58
59//#define DEBUG_EXEC
60//#define DEBUG_SIGNAL
61
62int qemu_cpu_has_work(CPUState *env)
63{
64 return cpu_has_work(env);
65}
66
67void cpu_loop_exit(void)
68{
69 /* NOTE: the register at this point must be saved by hand because
70 longjmp restore them */
71 regs_to_env();
72 longjmp(env->jmp_env, 1);
73}
74
75/* exit the current TB from a signal handler. The host registers are
76 restored in a state compatible with the CPU emulator
77 */
78void cpu_resume_from_signal(CPUState *env1, void *puc)
79{
80#if !defined(CONFIG_SOFTMMU)
81#ifdef __linux__
82 struct ucontext *uc = puc;
83#elif defined(__OpenBSD__)
84 struct sigcontext *uc = puc;
85#endif
86#endif
87
88 env = env1;
89
90 /* XXX: restore cpu registers saved in host registers */
91
92#if !defined(CONFIG_SOFTMMU)
93 if (puc) {
94 /* XXX: use siglongjmp ? */
95#ifdef __linux__
96 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
97#elif defined(__OpenBSD__)
98 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
99#endif
100 }
101#endif
102 env->exception_index = -1;
103 longjmp(env->jmp_env, 1);
104}
105
106/* Execute the code without caching the generated code. An interpreter
107 could be used if available. */
108static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
109{
110 unsigned long next_tb;
111 TranslationBlock *tb;
112
113 /* Should never happen.
114 We only end up here when an existing TB is too long. */
115 if (max_cycles > CF_COUNT_MASK)
116 max_cycles = CF_COUNT_MASK;
117
118 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
119 max_cycles);
120 env->current_tb = tb;
121 /* execute the generated code */
122#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
123 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
124#else
125 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
126#endif
127
128 if ((next_tb & 3) == 2) {
129 /* Restore PC. This may happen if async event occurs before
130 the TB starts executing. */
131 cpu_pc_from_tb(env, tb);
132 }
133 tb_phys_invalidate(tb, -1);
134 tb_free(tb);
135}
136
137static TranslationBlock *tb_find_slow(target_ulong pc,
138 target_ulong cs_base,
139 uint64_t flags)
140{
141 TranslationBlock *tb, **ptb1;
142 unsigned int h;
143 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
144
145 tb_invalidated_flag = 0;
146
147 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
148
149 /* find translated block using physical mappings */
150 phys_pc = get_phys_addr_code(env, pc);
151 phys_page1 = phys_pc & TARGET_PAGE_MASK;
152 phys_page2 = -1;
153 h = tb_phys_hash_func(phys_pc);
154 ptb1 = &tb_phys_hash[h];
155 for(;;) {
156 tb = *ptb1;
157 if (!tb)
158 goto not_found;
159 if (tb->pc == pc &&
160 tb->page_addr[0] == phys_page1 &&
161 tb->cs_base == cs_base &&
162 tb->flags == flags) {
163 /* check next page if needed */
164 if (tb->page_addr[1] != -1) {
165 virt_page2 = (pc & TARGET_PAGE_MASK) +
166 TARGET_PAGE_SIZE;
167 phys_page2 = get_phys_addr_code(env, virt_page2);
168 if (tb->page_addr[1] == phys_page2)
169 goto found;
170 } else {
171 goto found;
172 }
173 }
174 ptb1 = &tb->phys_hash_next;
175 }
176 not_found:
177 /* if no translated code available, then translate it now */
178 tb = tb_gen_code(env, pc, cs_base, flags, 0);
179
180 found:
181 /* we add the TB in the virtual pc hash table */
182 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
183 return tb;
184}
185
186static inline TranslationBlock *tb_find_fast(void)
187{
188 TranslationBlock *tb;
189 target_ulong cs_base, pc;
190 int flags;
191
192 /* we record a subset of the CPU state. It will
193 always be the same before a given translated block
194 is executed. */
195 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
196 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
197 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
198 tb->flags != flags)) {
199 tb = tb_find_slow(pc, cs_base, flags);
200 }
201 return tb;
202}
203
204static CPUDebugExcpHandler *debug_excp_handler;
205
206CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
207{
208 CPUDebugExcpHandler *old_handler = debug_excp_handler;
209
210 debug_excp_handler = handler;
211 return old_handler;
212}
213
214static void cpu_handle_debug_exception(CPUState *env)
215{
216 CPUWatchpoint *wp;
217
218 if (!env->watchpoint_hit)
219 TAILQ_FOREACH(wp, &env->watchpoints, entry)
220 wp->flags &= ~BP_WATCHPOINT_HIT;
221
222 if (debug_excp_handler)
223 debug_excp_handler(env);
224}
225
226/* main execution loop */
227
228#ifdef VBOX
229
230int cpu_exec(CPUState *env1)
231{
232#define DECLARE_HOST_REGS 1
233#include "hostregs_helper.h"
234 int ret = 0, interrupt_request;
235 TranslationBlock *tb;
236 uint8_t *tc_ptr;
237 unsigned long next_tb;
238
239 cpu_single_env = env1;
240
241 /* first we save global registers */
242#define SAVE_HOST_REGS 1
243#include "hostregs_helper.h"
244 env = env1;
245
246 env_to_regs();
247#if defined(TARGET_I386)
248 /* put eflags in CPU temporary format */
249 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
250 DF = 1 - (2 * ((env->eflags >> 10) & 1));
251 CC_OP = CC_OP_EFLAGS;
252 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
253#elif defined(TARGET_SPARC)
254#elif defined(TARGET_M68K)
255 env->cc_op = CC_OP_FLAGS;
256 env->cc_dest = env->sr & 0xf;
257 env->cc_x = (env->sr >> 4) & 1;
258#elif defined(TARGET_ALPHA)
259#elif defined(TARGET_ARM)
260#elif defined(TARGET_PPC)
261#elif defined(TARGET_MIPS)
262#elif defined(TARGET_SH4)
263#elif defined(TARGET_CRIS)
264 /* XXXXX */
265#else
266#error unsupported target CPU
267#endif
268#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
269 env->exception_index = -1;
270#endif
271
272 /* prepare setjmp context for exception handling */
273 for(;;) {
274 if (setjmp(env->jmp_env) == 0)
275 {
276 env->current_tb = NULL;
277
278 /*
279 * Check for fatal errors first
280 */
281 if (env->interrupt_request & CPU_INTERRUPT_RC) {
282 env->exception_index = EXCP_RC;
283 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
284 ret = env->exception_index;
285 cpu_loop_exit();
286 }
287
288 /* if an exception is pending, we execute it here */
289 if (env->exception_index >= 0) {
290 if (env->exception_index >= EXCP_INTERRUPT) {
291 /* exit request from the cpu execution loop */
292 ret = env->exception_index;
293 if (ret == EXCP_DEBUG)
294 cpu_handle_debug_exception(env);
295 break;
296 } else {
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
301 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip));
302 do_interrupt(env->exception_index,
303 env->exception_is_int,
304 env->error_code,
305 env->exception_next_eip, 0);
306 /* successfully delivered */
307 env->old_exception = -1;
308 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
309 }
310 env->exception_index = -1;
311 }
312
313 next_tb = 0; /* force lookup of first TB */
314 for(;;)
315 {
316 interrupt_request = env->interrupt_request;
317 if (unlikely(interrupt_request)) {
318 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
319 /* Mask out external interrupts for this step. */
320 interrupt_request &= ~(CPU_INTERRUPT_HARD |
321 CPU_INTERRUPT_FIQ |
322 CPU_INTERRUPT_SMI |
323 CPU_INTERRUPT_NMI);
324 }
325 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
326 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
327 env->exception_index = EXCP_DEBUG;
328 cpu_loop_exit();
329 }
330 /** @todo: reconcile with what QEMU really does */
331
332 /* Single instruction exec request, we execute it and return (one way or the other).
333 The caller will always reschedule after doing this operation! */
334 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
335 {
336 /* not in flight are we? (if we are, we trapped) */
337 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
338 {
339 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
340 env->exception_index = EXCP_SINGLE_INSTR;
341 if (emulate_single_instr(env) == -1)
342 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
343
344 /* When we receive an external interrupt during execution of this single
345 instruction, then we should stay here. We will leave when we're ready
346 for raw-mode or when interrupted by pending EMT requests. */
347 interrupt_request = env->interrupt_request; /* reload this! */
348 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
349 || !(env->eflags & IF_MASK)
350 || (env->hflags & HF_INHIBIT_IRQ_MASK)
351 || (env->state & CPU_RAW_HWACC)
352 )
353 {
354 env->exception_index = ret = EXCP_SINGLE_INSTR;
355 cpu_loop_exit();
356 }
357 }
358 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
359 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
360 }
361
362 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
363 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
364 !(env->hflags & HF_SMM_MASK)) {
365 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
366 do_smm_enter();
367 next_tb = 0;
368 }
369 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
370 (env->eflags & IF_MASK) &&
371 !(env->hflags & HF_INHIBIT_IRQ_MASK))
372 {
373 /* if hardware interrupt pending, we execute it */
374 int intno;
375 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
376 intno = cpu_get_pic_interrupt(env);
377 if (intno >= 0)
378 {
379 Log(("do_interrupt %d\n", intno));
380 do_interrupt(intno, 0, 0, 0, 1);
381 }
382 /* ensure that no TB jump will be modified as
383 the program flow was changed */
384 next_tb = 0;
385 }
386 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
387 {
388 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
389 /* ensure that no TB jump will be modified as
390 the program flow was changed */
391 next_tb = 0;
392 }
393 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
394 if (interrupt_request & CPU_INTERRUPT_RC)
395 {
396 env->exception_index = EXCP_RC;
397 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
398 ret = env->exception_index;
399 cpu_loop_exit();
400 }
401 }
402 if (unlikely(env->exit_request)) {
403 env->exit_request = 0;
404 env->exception_index = EXCP_INTERRUPT;
405 cpu_loop_exit();
406 }
407
408 /*
409 * Check if we the CPU state allows us to execute the code in raw-mode.
410 */
411 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
412 if (remR3CanExecuteRaw(env,
413 env->eip + env->segs[R_CS].base,
414 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
415 &env->exception_index))
416 {
417 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
418 ret = env->exception_index;
419 cpu_loop_exit();
420 }
421 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
422
423 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
424 spin_lock(&tb_lock);
425 tb = tb_find_fast();
426 /* Note: we do it here to avoid a gcc bug on Mac OS X when
427 doing it in tb_find_slow */
428 if (tb_invalidated_flag) {
429 /* as some TB could have been invalidated because
430 of memory exceptions while generating the code, we
431 must recompute the hash index here */
432 next_tb = 0;
433 tb_invalidated_flag = 0;
434 }
435
436 /* see if we can patch the calling TB. When the TB
437 spans two pages, we cannot safely do a direct
438 jump. */
439 if (next_tb != 0
440 && !(tb->cflags & CF_RAW_MODE)
441 && tb->page_addr[1] == -1)
442 {
443 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
444 }
445 spin_unlock(&tb_lock);
446 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
447
448 env->current_tb = tb;
449
450 /* cpu_interrupt might be called while translating the
451 TB, but before it is linked into a potentially
452 infinite loop and becomes env->current_tb. Avoid
453 starting execution if there is a pending interrupt. */
454 if (unlikely (env->exit_request))
455 env->current_tb = NULL;
456
457 while (env->current_tb) {
458 tc_ptr = tb->tc_ptr;
459 /* execute the generated code */
460 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
461#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
462 tcg_qemu_tb_exec(tc_ptr, next_tb);
463#else
464 next_tb = tcg_qemu_tb_exec(tc_ptr);
465#endif
466 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
467 env->current_tb = NULL;
468 if ((next_tb & 3) == 2) {
469 /* Instruction counter expired. */
470 int insns_left;
471 tb = (TranslationBlock *)(long)(next_tb & ~3);
472 /* Restore PC. */
473 cpu_pc_from_tb(env, tb);
474 insns_left = env->icount_decr.u32;
475 if (env->icount_extra && insns_left >= 0) {
476 /* Refill decrementer and continue execution. */
477 env->icount_extra += insns_left;
478 if (env->icount_extra > 0xffff) {
479 insns_left = 0xffff;
480 } else {
481 insns_left = env->icount_extra;
482 }
483 env->icount_extra -= insns_left;
484 env->icount_decr.u16.low = insns_left;
485 } else {
486 if (insns_left > 0) {
487 /* Execute remaining instructions. */
488 cpu_exec_nocache(insns_left, tb);
489 }
490 env->exception_index = EXCP_INTERRUPT;
491 next_tb = 0;
492 cpu_loop_exit();
493 }
494 }
495 }
496
497 /* reset soft MMU for next block (it can currently
498 only be set by a memory fault) */
499#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
500 if (env->hflags & HF_SOFTMMU_MASK) {
501 env->hflags &= ~HF_SOFTMMU_MASK;
502 /* do not allow linking to another block */
503 next_tb = 0;
504 }
505#endif
506 } /* for(;;) */
507 } else {
508 env_to_regs();
509 }
510#ifdef VBOX_HIGH_RES_TIMERS_HACK
511 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
512 unnecessary (like crashing during emulate single instruction).
513 Note! Don't use env1->pVM here, the code wouldn't run with
514 gcc-4.4/amd64 anymore, see #3883. */
515 env->current_tb = NULL;
516 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
517 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
518 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
519 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
520 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
521 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
522 TMR3TimerQueuesDo(env->pVM);
523 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
524 }
525#endif
526 } /* for(;;) */
527
528#if defined(TARGET_I386)
529 /* restore flags in standard format */
530 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
531#else
532#error unsupported target CPU
533#endif
534#include "hostregs_helper.h"
535 return ret;
536}
537
538#else /* !VBOX */
539int cpu_exec(CPUState *env1)
540{
541#define DECLARE_HOST_REGS 1
542#include "hostregs_helper.h"
543 int ret, interrupt_request;
544 TranslationBlock *tb;
545 uint8_t *tc_ptr;
546 unsigned long next_tb;
547
548 if (cpu_halted(env1) == EXCP_HALTED)
549 return EXCP_HALTED;
550
551 cpu_single_env = env1;
552
553 /* first we save global registers */
554#define SAVE_HOST_REGS 1
555#include "hostregs_helper.h"
556 env = env1;
557
558 env_to_regs();
559#if defined(TARGET_I386)
560 /* put eflags in CPU temporary format */
561 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
562 DF = 1 - (2 * ((env->eflags >> 10) & 1));
563 CC_OP = CC_OP_EFLAGS;
564 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
565#elif defined(TARGET_SPARC)
566#elif defined(TARGET_M68K)
567 env->cc_op = CC_OP_FLAGS;
568 env->cc_dest = env->sr & 0xf;
569 env->cc_x = (env->sr >> 4) & 1;
570#elif defined(TARGET_ALPHA)
571#elif defined(TARGET_ARM)
572#elif defined(TARGET_PPC)
573#elif defined(TARGET_MICROBLAZE)
574#elif defined(TARGET_MIPS)
575#elif defined(TARGET_SH4)
576#elif defined(TARGET_CRIS)
577 /* XXXXX */
578#else
579#error unsupported target CPU
580#endif
581 env->exception_index = -1;
582
583 /* prepare setjmp context for exception handling */
584 for(;;) {
585 if (setjmp(env->jmp_env) == 0) {
586#if defined(__sparc__) && !defined(HOST_SOLARIS)
587#undef env
588 env = cpu_single_env;
589#define env cpu_single_env
590#endif
591 env->current_tb = NULL;
592 /* if an exception is pending, we execute it here */
593 if (env->exception_index >= 0) {
594 if (env->exception_index >= EXCP_INTERRUPT) {
595 /* exit request from the cpu execution loop */
596 ret = env->exception_index;
597 if (ret == EXCP_DEBUG)
598 cpu_handle_debug_exception(env);
599 break;
600 } else {
601#if defined(CONFIG_USER_ONLY)
602 /* if user mode only, we simulate a fake exception
603 which will be handled outside the cpu execution
604 loop */
605#if defined(TARGET_I386)
606 do_interrupt_user(env->exception_index,
607 env->exception_is_int,
608 env->error_code,
609 env->exception_next_eip);
610 /* successfully delivered */
611 env->old_exception = -1;
612#endif
613 ret = env->exception_index;
614 break;
615#else
616#if defined(TARGET_I386)
617 /* simulate a real cpu exception. On i386, it can
618 trigger new exceptions, but we do not handle
619 double or triple faults yet. */
620 do_interrupt(env->exception_index,
621 env->exception_is_int,
622 env->error_code,
623 env->exception_next_eip, 0);
624 /* successfully delivered */
625 env->old_exception = -1;
626#elif defined(TARGET_PPC)
627 do_interrupt(env);
628#elif defined(TARGET_MICROBLAZE)
629 do_interrupt(env);
630#elif defined(TARGET_MIPS)
631 do_interrupt(env);
632#elif defined(TARGET_SPARC)
633 do_interrupt(env);
634#elif defined(TARGET_ARM)
635 do_interrupt(env);
636#elif defined(TARGET_SH4)
637 do_interrupt(env);
638#elif defined(TARGET_ALPHA)
639 do_interrupt(env);
640#elif defined(TARGET_CRIS)
641 do_interrupt(env);
642#elif defined(TARGET_M68K)
643 do_interrupt(0);
644#endif
645#endif
646 }
647 env->exception_index = -1;
648 }
649#ifdef CONFIG_KQEMU
650 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
651 int ret;
652 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
653 ret = kqemu_cpu_exec(env);
654 /* put eflags in CPU temporary format */
655 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
656 DF = 1 - (2 * ((env->eflags >> 10) & 1));
657 CC_OP = CC_OP_EFLAGS;
658 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
659 if (ret == 1) {
660 /* exception */
661 longjmp(env->jmp_env, 1);
662 } else if (ret == 2) {
663 /* softmmu execution needed */
664 } else {
665 if (env->interrupt_request != 0 || env->exit_request != 0) {
666 /* hardware interrupt will be executed just after */
667 } else {
668 /* otherwise, we restart */
669 longjmp(env->jmp_env, 1);
670 }
671 }
672 }
673#endif
674
675 if (kvm_enabled()) {
676 kvm_cpu_exec(env);
677 longjmp(env->jmp_env, 1);
678 }
679
680 next_tb = 0; /* force lookup of first TB */
681 for(;;) {
682 interrupt_request = env->interrupt_request;
683 if (unlikely(interrupt_request)) {
684 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
685 /* Mask out external interrupts for this step. */
686 interrupt_request &= ~(CPU_INTERRUPT_HARD |
687 CPU_INTERRUPT_FIQ |
688 CPU_INTERRUPT_SMI |
689 CPU_INTERRUPT_NMI);
690 }
691 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
692 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
693 env->exception_index = EXCP_DEBUG;
694 cpu_loop_exit();
695 }
696#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
697 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
698 defined(TARGET_MICROBLAZE)
699 if (interrupt_request & CPU_INTERRUPT_HALT) {
700 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
701 env->halted = 1;
702 env->exception_index = EXCP_HLT;
703 cpu_loop_exit();
704 }
705#endif
706#if defined(TARGET_I386)
707 if (interrupt_request & CPU_INTERRUPT_INIT) {
708 svm_check_intercept(SVM_EXIT_INIT);
709 do_cpu_init(env);
710 env->exception_index = EXCP_HALTED;
711 cpu_loop_exit();
712 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
713 do_cpu_sipi(env);
714 } else if (env->hflags2 & HF2_GIF_MASK) {
715 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
716 !(env->hflags & HF_SMM_MASK)) {
717 svm_check_intercept(SVM_EXIT_SMI);
718 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
719 do_smm_enter();
720 next_tb = 0;
721 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
722 !(env->hflags2 & HF2_NMI_MASK)) {
723 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
724 env->hflags2 |= HF2_NMI_MASK;
725 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
726 next_tb = 0;
727 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
728 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
729 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
730 next_tb = 0;
731 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
732 (((env->hflags2 & HF2_VINTR_MASK) &&
733 (env->hflags2 & HF2_HIF_MASK)) ||
734 (!(env->hflags2 & HF2_VINTR_MASK) &&
735 (env->eflags & IF_MASK &&
736 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
737 int intno;
738 svm_check_intercept(SVM_EXIT_INTR);
739 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
740 intno = cpu_get_pic_interrupt(env);
741 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
742#if defined(__sparc__) && !defined(HOST_SOLARIS)
743#undef env
744 env = cpu_single_env;
745#define env cpu_single_env
746#endif
747 do_interrupt(intno, 0, 0, 0, 1);
748 /* ensure that no TB jump will be modified as
749 the program flow was changed */
750 next_tb = 0;
751#if !defined(CONFIG_USER_ONLY)
752 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
753 (env->eflags & IF_MASK) &&
754 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
755 int intno;
756 /* FIXME: this should respect TPR */
757 svm_check_intercept(SVM_EXIT_VINTR);
758 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
759 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
760 do_interrupt(intno, 0, 0, 0, 1);
761 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
762 next_tb = 0;
763#endif
764 }
765 }
766#elif defined(TARGET_PPC)
767#if 0
768 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
769 cpu_ppc_reset(env);
770 }
771#endif
772 if (interrupt_request & CPU_INTERRUPT_HARD) {
773 ppc_hw_interrupt(env);
774 if (env->pending_interrupts == 0)
775 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
776 next_tb = 0;
777 }
778#elif defined(TARGET_MICROBLAZE)
779 if ((interrupt_request & CPU_INTERRUPT_HARD)
780 && (env->sregs[SR_MSR] & MSR_IE)
781 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
782 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
783 env->exception_index = EXCP_IRQ;
784 do_interrupt(env);
785 next_tb = 0;
786 }
787#elif defined(TARGET_MIPS)
788 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
789 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
790 (env->CP0_Status & (1 << CP0St_IE)) &&
791 !(env->CP0_Status & (1 << CP0St_EXL)) &&
792 !(env->CP0_Status & (1 << CP0St_ERL)) &&
793 !(env->hflags & MIPS_HFLAG_DM)) {
794 /* Raise it */
795 env->exception_index = EXCP_EXT_INTERRUPT;
796 env->error_code = 0;
797 do_interrupt(env);
798 next_tb = 0;
799 }
800#elif defined(TARGET_SPARC)
801 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
802 cpu_interrupts_enabled(env)) {
803 int pil = env->interrupt_index & 15;
804 int type = env->interrupt_index & 0xf0;
805
806 if (((type == TT_EXTINT) &&
807 (pil == 15 || pil > env->psrpil)) ||
808 type != TT_EXTINT) {
809 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
810 env->exception_index = env->interrupt_index;
811 do_interrupt(env);
812 env->interrupt_index = 0;
813#if !defined(CONFIG_USER_ONLY)
814 cpu_check_irqs(env);
815#endif
816 next_tb = 0;
817 }
818 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
819 //do_interrupt(0, 0, 0, 0, 0);
820 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
821 }
822#elif defined(TARGET_ARM)
823 if (interrupt_request & CPU_INTERRUPT_FIQ
824 && !(env->uncached_cpsr & CPSR_F)) {
825 env->exception_index = EXCP_FIQ;
826 do_interrupt(env);
827 next_tb = 0;
828 }
829 /* ARMv7-M interrupt return works by loading a magic value
830 into the PC. On real hardware the load causes the
831 return to occur. The qemu implementation performs the
832 jump normally, then does the exception return when the
833 CPU tries to execute code at the magic address.
834 This will cause the magic PC value to be pushed to
835 the stack if an interrupt occured at the wrong time.
836 We avoid this by disabling interrupts when
837 pc contains a magic address. */
838 if (interrupt_request & CPU_INTERRUPT_HARD
839 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
840 || !(env->uncached_cpsr & CPSR_I))) {
841 env->exception_index = EXCP_IRQ;
842 do_interrupt(env);
843 next_tb = 0;
844 }
845#elif defined(TARGET_SH4)
846 if (interrupt_request & CPU_INTERRUPT_HARD) {
847 do_interrupt(env);
848 next_tb = 0;
849 }
850#elif defined(TARGET_ALPHA)
851 if (interrupt_request & CPU_INTERRUPT_HARD) {
852 do_interrupt(env);
853 next_tb = 0;
854 }
855#elif defined(TARGET_CRIS)
856 if (interrupt_request & CPU_INTERRUPT_HARD
857 && (env->pregs[PR_CCS] & I_FLAG)) {
858 env->exception_index = EXCP_IRQ;
859 do_interrupt(env);
860 next_tb = 0;
861 }
862 if (interrupt_request & CPU_INTERRUPT_NMI
863 && (env->pregs[PR_CCS] & M_FLAG)) {
864 env->exception_index = EXCP_NMI;
865 do_interrupt(env);
866 next_tb = 0;
867 }
868#elif defined(TARGET_M68K)
869 if (interrupt_request & CPU_INTERRUPT_HARD
870 && ((env->sr & SR_I) >> SR_I_SHIFT)
871 < env->pending_level) {
872 /* Real hardware gets the interrupt vector via an
873 IACK cycle at this point. Current emulated
874 hardware doesn't rely on this, so we
875 provide/save the vector when the interrupt is
876 first signalled. */
877 env->exception_index = env->pending_vector;
878 do_interrupt(1);
879 next_tb = 0;
880 }
881#endif
882 /* Don't use the cached interupt_request value,
883 do_interrupt may have updated the EXITTB flag. */
884 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
885 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
886 /* ensure that no TB jump will be modified as
887 the program flow was changed */
888 next_tb = 0;
889 }
890 }
891 if (unlikely(env->exit_request)) {
892 env->exit_request = 0;
893 env->exception_index = EXCP_INTERRUPT;
894 cpu_loop_exit();
895 }
896#ifdef DEBUG_EXEC
897 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
898 /* restore flags in standard format */
899 regs_to_env();
900#if defined(TARGET_I386)
901 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
902 log_cpu_state(env, X86_DUMP_CCOP);
903 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
904#elif defined(TARGET_ARM)
905 log_cpu_state(env, 0);
906#elif defined(TARGET_SPARC)
907 log_cpu_state(env, 0);
908#elif defined(TARGET_PPC)
909 log_cpu_state(env, 0);
910#elif defined(TARGET_M68K)
911 cpu_m68k_flush_flags(env, env->cc_op);
912 env->cc_op = CC_OP_FLAGS;
913 env->sr = (env->sr & 0xffe0)
914 | env->cc_dest | (env->cc_x << 4);
915 log_cpu_state(env, 0);
916#elif defined(TARGET_MICROBLAZE)
917 log_cpu_state(env, 0);
918#elif defined(TARGET_MIPS)
919 log_cpu_state(env, 0);
920#elif defined(TARGET_SH4)
921 log_cpu_state(env, 0);
922#elif defined(TARGET_ALPHA)
923 log_cpu_state(env, 0);
924#elif defined(TARGET_CRIS)
925 log_cpu_state(env, 0);
926#else
927#error unsupported target CPU
928#endif
929 }
930#endif
931 spin_lock(&tb_lock);
932 tb = tb_find_fast();
933 /* Note: we do it here to avoid a gcc bug on Mac OS X when
934 doing it in tb_find_slow */
935 if (tb_invalidated_flag) {
936 /* as some TB could have been invalidated because
937 of memory exceptions while generating the code, we
938 must recompute the hash index here */
939 next_tb = 0;
940 tb_invalidated_flag = 0;
941 }
942#ifdef DEBUG_EXEC
943 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
944 (long)tb->tc_ptr, tb->pc,
945 lookup_symbol(tb->pc));
946#endif
947 /* see if we can patch the calling TB. When the TB
948 spans two pages, we cannot safely do a direct
949 jump. */
950 {
951 if (next_tb != 0 &&
952#ifdef CONFIG_KQEMU
953 (env->kqemu_enabled != 2) &&
954#endif
955 tb->page_addr[1] == -1) {
956 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
957 }
958 }
959 spin_unlock(&tb_lock);
960 env->current_tb = tb;
961
962 /* cpu_interrupt might be called while translating the
963 TB, but before it is linked into a potentially
964 infinite loop and becomes env->current_tb. Avoid
965 starting execution if there is a pending interrupt. */
966 if (unlikely (env->exit_request))
967 env->current_tb = NULL;
968
969 while (env->current_tb) {
970 tc_ptr = tb->tc_ptr;
971 /* execute the generated code */
972#if defined(__sparc__) && !defined(HOST_SOLARIS)
973#undef env
974 env = cpu_single_env;
975#define env cpu_single_env
976#endif
977 next_tb = tcg_qemu_tb_exec(tc_ptr);
978 env->current_tb = NULL;
979 if ((next_tb & 3) == 2) {
980 /* Instruction counter expired. */
981 int insns_left;
982 tb = (TranslationBlock *)(long)(next_tb & ~3);
983 /* Restore PC. */
984 cpu_pc_from_tb(env, tb);
985 insns_left = env->icount_decr.u32;
986 if (env->icount_extra && insns_left >= 0) {
987 /* Refill decrementer and continue execution. */
988 env->icount_extra += insns_left;
989 if (env->icount_extra > 0xffff) {
990 insns_left = 0xffff;
991 } else {
992 insns_left = env->icount_extra;
993 }
994 env->icount_extra -= insns_left;
995 env->icount_decr.u16.low = insns_left;
996 } else {
997 if (insns_left > 0) {
998 /* Execute remaining instructions. */
999 cpu_exec_nocache(insns_left, tb);
1000 }
1001 env->exception_index = EXCP_INTERRUPT;
1002 next_tb = 0;
1003 cpu_loop_exit();
1004 }
1005 }
1006 }
1007 /* reset soft MMU for next block (it can currently
1008 only be set by a memory fault) */
1009#if defined(CONFIG_KQEMU)
1010#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
1011 if (kqemu_is_ok(env) &&
1012 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
1013 cpu_loop_exit();
1014 }
1015#endif
1016 } /* for(;;) */
1017 } else {
1018 env_to_regs();
1019 }
1020 } /* for(;;) */
1021
1022
1023#if defined(TARGET_I386)
1024 /* restore flags in standard format */
1025 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
1026#elif defined(TARGET_ARM)
1027 /* XXX: Save/restore host fpu exception state?. */
1028#elif defined(TARGET_SPARC)
1029#elif defined(TARGET_PPC)
1030#elif defined(TARGET_M68K)
1031 cpu_m68k_flush_flags(env, env->cc_op);
1032 env->cc_op = CC_OP_FLAGS;
1033 env->sr = (env->sr & 0xffe0)
1034 | env->cc_dest | (env->cc_x << 4);
1035#elif defined(TARGET_MICROBLAZE)
1036#elif defined(TARGET_MIPS)
1037#elif defined(TARGET_SH4)
1038#elif defined(TARGET_ALPHA)
1039#elif defined(TARGET_CRIS)
1040 /* XXXXX */
1041#else
1042#error unsupported target CPU
1043#endif
1044
1045 /* restore global registers */
1046#include "hostregs_helper.h"
1047
1048 /* fail safe : never use cpu_single_env outside cpu_exec() */
1049 cpu_single_env = NULL;
1050 return ret;
1051}
1052
1053#endif /* !VBOX */
1054
1055/* must only be called from the generated code as an exception can be
1056 generated */
1057void tb_invalidate_page_range(target_ulong start, target_ulong end)
1058{
1059 /* XXX: cannot enable it yet because it yields to MMU exception
1060 where NIP != read address on PowerPC */
1061#if 0
1062 target_ulong phys_addr;
1063 phys_addr = get_phys_addr_code(env, start);
1064 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1065#endif
1066}
1067
1068#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1069
1070void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1071{
1072 CPUX86State *saved_env;
1073
1074 saved_env = env;
1075 env = s;
1076 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1077 selector &= 0xffff;
1078 cpu_x86_load_seg_cache(env, seg_reg, selector,
1079 (selector << 4), 0xffff, 0);
1080 } else {
1081 helper_load_seg(seg_reg, selector);
1082 }
1083 env = saved_env;
1084}
1085
1086void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
1087{
1088 CPUX86State *saved_env;
1089
1090 saved_env = env;
1091 env = s;
1092
1093 helper_fsave(ptr, data32);
1094
1095 env = saved_env;
1096}
1097
1098void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
1099{
1100 CPUX86State *saved_env;
1101
1102 saved_env = env;
1103 env = s;
1104
1105 helper_frstor(ptr, data32);
1106
1107 env = saved_env;
1108}
1109
1110#endif /* TARGET_I386 */
1111
1112#if !defined(CONFIG_SOFTMMU)
1113
1114#if defined(TARGET_I386)
1115
1116/* 'pc' is the host PC at which the exception was raised. 'address' is
1117 the effective address of the memory exception. 'is_write' is 1 if a
1118 write caused the exception and otherwise 0'. 'old_set' is the
1119 signal set which should be restored */
1120static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1121 int is_write, sigset_t *old_set,
1122 void *puc)
1123{
1124 TranslationBlock *tb;
1125 int ret;
1126
1127 if (cpu_single_env)
1128 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1129#if defined(DEBUG_SIGNAL)
1130 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1131 pc, address, is_write, *(unsigned long *)old_set);
1132#endif
1133 /* XXX: locking issue */
1134 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1135 return 1;
1136 }
1137
1138 /* see if it is an MMU fault */
1139 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1140 if (ret < 0)
1141 return 0; /* not an MMU fault */
1142 if (ret == 0)
1143 return 1; /* the MMU fault was handled without causing real CPU fault */
1144 /* now we have a real cpu fault */
1145 tb = tb_find_pc(pc);
1146 if (tb) {
1147 /* the PC is inside the translated code. It means that we have
1148 a virtual CPU fault */
1149 cpu_restore_state(tb, env, pc, puc);
1150 }
1151 if (ret == 1) {
1152#if 0
1153 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
1154 env->eip, env->cr[2], env->error_code);
1155#endif
1156 /* we restore the process signal mask as the sigreturn should
1157 do it (XXX: use sigsetjmp) */
1158 sigprocmask(SIG_SETMASK, old_set, NULL);
1159 raise_exception_err(env->exception_index, env->error_code);
1160 } else {
1161 /* activate soft MMU for this block */
1162 env->hflags |= HF_SOFTMMU_MASK;
1163 cpu_resume_from_signal(env, puc);
1164 }
1165 /* never comes here */
1166 return 1;
1167}
1168
1169#elif defined(TARGET_ARM)
1170static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1171 int is_write, sigset_t *old_set,
1172 void *puc)
1173{
1174 TranslationBlock *tb;
1175 int ret;
1176
1177 if (cpu_single_env)
1178 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1179#if defined(DEBUG_SIGNAL)
1180 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1181 pc, address, is_write, *(unsigned long *)old_set);
1182#endif
1183 /* XXX: locking issue */
1184 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1185 return 1;
1186 }
1187 /* see if it is an MMU fault */
1188 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1189 if (ret < 0)
1190 return 0; /* not an MMU fault */
1191 if (ret == 0)
1192 return 1; /* the MMU fault was handled without causing real CPU fault */
1193 /* now we have a real cpu fault */
1194 tb = tb_find_pc(pc);
1195 if (tb) {
1196 /* the PC is inside the translated code. It means that we have
1197 a virtual CPU fault */
1198 cpu_restore_state(tb, env, pc, puc);
1199 }
1200 /* we restore the process signal mask as the sigreturn should
1201 do it (XXX: use sigsetjmp) */
1202 sigprocmask(SIG_SETMASK, old_set, NULL);
1203 cpu_loop_exit();
1204 /* never comes here */
1205 return 1;
1206}
1207#elif defined(TARGET_SPARC)
1208static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1209 int is_write, sigset_t *old_set,
1210 void *puc)
1211{
1212 TranslationBlock *tb;
1213 int ret;
1214
1215 if (cpu_single_env)
1216 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1217#if defined(DEBUG_SIGNAL)
1218 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1219 pc, address, is_write, *(unsigned long *)old_set);
1220#endif
1221 /* XXX: locking issue */
1222 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1223 return 1;
1224 }
1225 /* see if it is an MMU fault */
1226 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1227 if (ret < 0)
1228 return 0; /* not an MMU fault */
1229 if (ret == 0)
1230 return 1; /* the MMU fault was handled without causing real CPU fault */
1231 /* now we have a real cpu fault */
1232 tb = tb_find_pc(pc);
1233 if (tb) {
1234 /* the PC is inside the translated code. It means that we have
1235 a virtual CPU fault */
1236 cpu_restore_state(tb, env, pc, puc);
1237 }
1238 /* we restore the process signal mask as the sigreturn should
1239 do it (XXX: use sigsetjmp) */
1240 sigprocmask(SIG_SETMASK, old_set, NULL);
1241 cpu_loop_exit();
1242 /* never comes here */
1243 return 1;
1244}
1245#elif defined (TARGET_PPC)
1246static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1247 int is_write, sigset_t *old_set,
1248 void *puc)
1249{
1250 TranslationBlock *tb;
1251 int ret;
1252
1253 if (cpu_single_env)
1254 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1255#if defined(DEBUG_SIGNAL)
1256 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1257 pc, address, is_write, *(unsigned long *)old_set);
1258#endif
1259 /* XXX: locking issue */
1260 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1261 return 1;
1262 }
1263
1264 /* see if it is an MMU fault */
1265 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1266 if (ret < 0)
1267 return 0; /* not an MMU fault */
1268 if (ret == 0)
1269 return 1; /* the MMU fault was handled without causing real CPU fault */
1270
1271 /* now we have a real cpu fault */
1272 tb = tb_find_pc(pc);
1273 if (tb) {
1274 /* the PC is inside the translated code. It means that we have
1275 a virtual CPU fault */
1276 cpu_restore_state(tb, env, pc, puc);
1277 }
1278 if (ret == 1) {
1279#if 0
1280 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1281 env->nip, env->error_code, tb);
1282#endif
1283 /* we restore the process signal mask as the sigreturn should
1284 do it (XXX: use sigsetjmp) */
1285 sigprocmask(SIG_SETMASK, old_set, NULL);
1286 cpu_loop_exit();
1287 } else {
1288 /* activate soft MMU for this block */
1289 cpu_resume_from_signal(env, puc);
1290 }
1291 /* never comes here */
1292 return 1;
1293}
1294
1295#elif defined(TARGET_M68K)
1296static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1297 int is_write, sigset_t *old_set,
1298 void *puc)
1299{
1300 TranslationBlock *tb;
1301 int ret;
1302
1303 if (cpu_single_env)
1304 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1305#if defined(DEBUG_SIGNAL)
1306 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1307 pc, address, is_write, *(unsigned long *)old_set);
1308#endif
1309 /* XXX: locking issue */
1310 if (is_write && page_unprotect(address, pc, puc)) {
1311 return 1;
1312 }
1313 /* see if it is an MMU fault */
1314 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1315 if (ret < 0)
1316 return 0; /* not an MMU fault */
1317 if (ret == 0)
1318 return 1; /* the MMU fault was handled without causing real CPU fault */
1319 /* now we have a real cpu fault */
1320 tb = tb_find_pc(pc);
1321 if (tb) {
1322 /* the PC is inside the translated code. It means that we have
1323 a virtual CPU fault */
1324 cpu_restore_state(tb, env, pc, puc);
1325 }
1326 /* we restore the process signal mask as the sigreturn should
1327 do it (XXX: use sigsetjmp) */
1328 sigprocmask(SIG_SETMASK, old_set, NULL);
1329 cpu_loop_exit();
1330 /* never comes here */
1331 return 1;
1332}
1333
1334#elif defined (TARGET_MIPS)
1335static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1336 int is_write, sigset_t *old_set,
1337 void *puc)
1338{
1339 TranslationBlock *tb;
1340 int ret;
1341
1342 if (cpu_single_env)
1343 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1344#if defined(DEBUG_SIGNAL)
1345 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1346 pc, address, is_write, *(unsigned long *)old_set);
1347#endif
1348 /* XXX: locking issue */
1349 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1350 return 1;
1351 }
1352
1353 /* see if it is an MMU fault */
1354 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1355 if (ret < 0)
1356 return 0; /* not an MMU fault */
1357 if (ret == 0)
1358 return 1; /* the MMU fault was handled without causing real CPU fault */
1359
1360 /* now we have a real cpu fault */
1361 tb = tb_find_pc(pc);
1362 if (tb) {
1363 /* the PC is inside the translated code. It means that we have
1364 a virtual CPU fault */
1365 cpu_restore_state(tb, env, pc, puc);
1366 }
1367 if (ret == 1) {
1368#if 0
1369 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1370 env->PC, env->error_code, tb);
1371#endif
1372 /* we restore the process signal mask as the sigreturn should
1373 do it (XXX: use sigsetjmp) */
1374 sigprocmask(SIG_SETMASK, old_set, NULL);
1375 cpu_loop_exit();
1376 } else {
1377 /* activate soft MMU for this block */
1378 cpu_resume_from_signal(env, puc);
1379 }
1380 /* never comes here */
1381 return 1;
1382}
1383
1384#elif defined (TARGET_MICROBLAZE)
1385static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1386 int is_write, sigset_t *old_set,
1387 void *puc)
1388{
1389 TranslationBlock *tb;
1390 int ret;
1391
1392 if (cpu_single_env)
1393 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1394#if defined(DEBUG_SIGNAL)
1395 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1396 pc, address, is_write, *(unsigned long *)old_set);
1397#endif
1398 /* XXX: locking issue */
1399 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1400 return 1;
1401 }
1402
1403 /* see if it is an MMU fault */
1404 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1405 if (ret < 0)
1406 return 0; /* not an MMU fault */
1407 if (ret == 0)
1408 return 1; /* the MMU fault was handled without causing real CPU fault */
1409
1410 /* now we have a real cpu fault */
1411 tb = tb_find_pc(pc);
1412 if (tb) {
1413 /* the PC is inside the translated code. It means that we have
1414 a virtual CPU fault */
1415 cpu_restore_state(tb, env, pc, puc);
1416 }
1417 if (ret == 1) {
1418#if 0
1419 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1420 env->PC, env->error_code, tb);
1421#endif
1422 /* we restore the process signal mask as the sigreturn should
1423 do it (XXX: use sigsetjmp) */
1424 sigprocmask(SIG_SETMASK, old_set, NULL);
1425 cpu_loop_exit();
1426 } else {
1427 /* activate soft MMU for this block */
1428 cpu_resume_from_signal(env, puc);
1429 }
1430 /* never comes here */
1431 return 1;
1432}
1433
1434#elif defined (TARGET_SH4)
1435static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1436 int is_write, sigset_t *old_set,
1437 void *puc)
1438{
1439 TranslationBlock *tb;
1440 int ret;
1441
1442 if (cpu_single_env)
1443 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1444#if defined(DEBUG_SIGNAL)
1445 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1446 pc, address, is_write, *(unsigned long *)old_set);
1447#endif
1448 /* XXX: locking issue */
1449 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1450 return 1;
1451 }
1452
1453 /* see if it is an MMU fault */
1454 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1455 if (ret < 0)
1456 return 0; /* not an MMU fault */
1457 if (ret == 0)
1458 return 1; /* the MMU fault was handled without causing real CPU fault */
1459
1460 /* now we have a real cpu fault */
1461 tb = tb_find_pc(pc);
1462 if (tb) {
1463 /* the PC is inside the translated code. It means that we have
1464 a virtual CPU fault */
1465 cpu_restore_state(tb, env, pc, puc);
1466 }
1467#if 0
1468 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1469 env->nip, env->error_code, tb);
1470#endif
1471 /* we restore the process signal mask as the sigreturn should
1472 do it (XXX: use sigsetjmp) */
1473 sigprocmask(SIG_SETMASK, old_set, NULL);
1474 cpu_loop_exit();
1475 /* never comes here */
1476 return 1;
1477}
1478
1479#elif defined (TARGET_ALPHA)
1480static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1481 int is_write, sigset_t *old_set,
1482 void *puc)
1483{
1484 TranslationBlock *tb;
1485 int ret;
1486
1487 if (cpu_single_env)
1488 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1489#if defined(DEBUG_SIGNAL)
1490 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1491 pc, address, is_write, *(unsigned long *)old_set);
1492#endif
1493 /* XXX: locking issue */
1494 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1495 return 1;
1496 }
1497
1498 /* see if it is an MMU fault */
1499 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1500 if (ret < 0)
1501 return 0; /* not an MMU fault */
1502 if (ret == 0)
1503 return 1; /* the MMU fault was handled without causing real CPU fault */
1504
1505 /* now we have a real cpu fault */
1506 tb = tb_find_pc(pc);
1507 if (tb) {
1508 /* the PC is inside the translated code. It means that we have
1509 a virtual CPU fault */
1510 cpu_restore_state(tb, env, pc, puc);
1511 }
1512#if 0
1513 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1514 env->nip, env->error_code, tb);
1515#endif
1516 /* we restore the process signal mask as the sigreturn should
1517 do it (XXX: use sigsetjmp) */
1518 sigprocmask(SIG_SETMASK, old_set, NULL);
1519 cpu_loop_exit();
1520 /* never comes here */
1521 return 1;
1522}
1523#elif defined (TARGET_CRIS)
1524static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1525 int is_write, sigset_t *old_set,
1526 void *puc)
1527{
1528 TranslationBlock *tb;
1529 int ret;
1530
1531 if (cpu_single_env)
1532 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1533#if defined(DEBUG_SIGNAL)
1534 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1535 pc, address, is_write, *(unsigned long *)old_set);
1536#endif
1537 /* XXX: locking issue */
1538 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1539 return 1;
1540 }
1541
1542 /* see if it is an MMU fault */
1543 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1544 if (ret < 0)
1545 return 0; /* not an MMU fault */
1546 if (ret == 0)
1547 return 1; /* the MMU fault was handled without causing real CPU fault */
1548
1549 /* now we have a real cpu fault */
1550 tb = tb_find_pc(pc);
1551 if (tb) {
1552 /* the PC is inside the translated code. It means that we have
1553 a virtual CPU fault */
1554 cpu_restore_state(tb, env, pc, puc);
1555 }
1556 /* we restore the process signal mask as the sigreturn should
1557 do it (XXX: use sigsetjmp) */
1558 sigprocmask(SIG_SETMASK, old_set, NULL);
1559 cpu_loop_exit();
1560 /* never comes here */
1561 return 1;
1562}
1563
1564#else
1565#error unsupported target CPU
1566#endif
1567
1568#if defined(__i386__)
1569
1570#if defined(__APPLE__)
1571# include <sys/ucontext.h>
1572
1573# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1574# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1575# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1576# define MASK_sig(context) ((context)->uc_sigmask)
1577#elif defined(__OpenBSD__)
1578# define EIP_sig(context) ((context)->sc_eip)
1579# define TRAP_sig(context) ((context)->sc_trapno)
1580# define ERROR_sig(context) ((context)->sc_err)
1581# define MASK_sig(context) ((context)->sc_mask)
1582#else
1583# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1584# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1585# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1586# define MASK_sig(context) ((context)->uc_sigmask)
1587#endif
1588
1589int cpu_signal_handler(int host_signum, void *pinfo,
1590 void *puc)
1591{
1592 siginfo_t *info = pinfo;
1593#if defined(__OpenBSD__)
1594 struct sigcontext *uc = puc;
1595#else
1596 struct ucontext *uc = puc;
1597#endif
1598 unsigned long pc;
1599 int trapno;
1600
1601#ifndef REG_EIP
1602/* for glibc 2.1 */
1603#define REG_EIP EIP
1604#define REG_ERR ERR
1605#define REG_TRAPNO TRAPNO
1606#endif
1607 pc = EIP_sig(uc);
1608 trapno = TRAP_sig(uc);
1609 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1610 trapno == 0xe ?
1611 (ERROR_sig(uc) >> 1) & 1 : 0,
1612 &MASK_sig(uc), puc);
1613}
1614
1615#elif defined(__x86_64__)
1616
1617#ifdef __NetBSD__
1618#define PC_sig(context) _UC_MACHINE_PC(context)
1619#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1620#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1621#define MASK_sig(context) ((context)->uc_sigmask)
1622#elif defined(__OpenBSD__)
1623#define PC_sig(context) ((context)->sc_rip)
1624#define TRAP_sig(context) ((context)->sc_trapno)
1625#define ERROR_sig(context) ((context)->sc_err)
1626#define MASK_sig(context) ((context)->sc_mask)
1627#else
1628#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1629#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1630#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1631#define MASK_sig(context) ((context)->uc_sigmask)
1632#endif
1633
1634int cpu_signal_handler(int host_signum, void *pinfo,
1635 void *puc)
1636{
1637 siginfo_t *info = pinfo;
1638 unsigned long pc;
1639#ifdef __NetBSD__
1640 ucontext_t *uc = puc;
1641#elif defined(__OpenBSD__)
1642 struct sigcontext *uc = puc;
1643#else
1644 struct ucontext *uc = puc;
1645#endif
1646
1647 pc = PC_sig(uc);
1648 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1649 TRAP_sig(uc) == 0xe ?
1650 (ERROR_sig(uc) >> 1) & 1 : 0,
1651 &MASK_sig(uc), puc);
1652}
1653
1654#elif defined(_ARCH_PPC)
1655
1656/***********************************************************************
1657 * signal context platform-specific definitions
1658 * From Wine
1659 */
1660#ifdef linux
1661/* All Registers access - only for local access */
1662# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1663/* Gpr Registers access */
1664# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1665# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1666# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1667# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1668# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1669# define LR_sig(context) REG_sig(link, context) /* Link register */
1670# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1671/* Float Registers access */
1672# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1673# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1674/* Exception Registers access */
1675# define DAR_sig(context) REG_sig(dar, context)
1676# define DSISR_sig(context) REG_sig(dsisr, context)
1677# define TRAP_sig(context) REG_sig(trap, context)
1678#endif /* linux */
1679
1680#ifdef __APPLE__
1681# include <sys/ucontext.h>
1682typedef struct ucontext SIGCONTEXT;
1683/* All Registers access - only for local access */
1684# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1685# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1686# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1687# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1688/* Gpr Registers access */
1689# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1690# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1691# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1692# define CTR_sig(context) REG_sig(ctr, context)
1693# define XER_sig(context) REG_sig(xer, context) /* Link register */
1694# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1695# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1696/* Float Registers access */
1697# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1698# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1699/* Exception Registers access */
1700# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1701# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1702# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1703#endif /* __APPLE__ */
1704
1705int cpu_signal_handler(int host_signum, void *pinfo,
1706 void *puc)
1707{
1708 siginfo_t *info = pinfo;
1709 struct ucontext *uc = puc;
1710 unsigned long pc;
1711 int is_write;
1712
1713 pc = IAR_sig(uc);
1714 is_write = 0;
1715#if 0
1716 /* ppc 4xx case */
1717 if (DSISR_sig(uc) & 0x00800000)
1718 is_write = 1;
1719#else
1720 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1721 is_write = 1;
1722#endif
1723 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1724 is_write, &uc->uc_sigmask, puc);
1725}
1726
1727#elif defined(__alpha__)
1728
1729int cpu_signal_handler(int host_signum, void *pinfo,
1730 void *puc)
1731{
1732 siginfo_t *info = pinfo;
1733 struct ucontext *uc = puc;
1734 uint32_t *pc = uc->uc_mcontext.sc_pc;
1735 uint32_t insn = *pc;
1736 int is_write = 0;
1737
1738 /* XXX: need kernel patch to get write flag faster */
1739 switch (insn >> 26) {
1740 case 0x0d: // stw
1741 case 0x0e: // stb
1742 case 0x0f: // stq_u
1743 case 0x24: // stf
1744 case 0x25: // stg
1745 case 0x26: // sts
1746 case 0x27: // stt
1747 case 0x2c: // stl
1748 case 0x2d: // stq
1749 case 0x2e: // stl_c
1750 case 0x2f: // stq_c
1751 is_write = 1;
1752 }
1753
1754 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1755 is_write, &uc->uc_sigmask, puc);
1756}
1757#elif defined(__sparc__)
1758
1759int cpu_signal_handler(int host_signum, void *pinfo,
1760 void *puc)
1761{
1762 siginfo_t *info = pinfo;
1763 int is_write;
1764 uint32_t insn;
1765#if !defined(__arch64__) || defined(HOST_SOLARIS)
1766 uint32_t *regs = (uint32_t *)(info + 1);
1767 void *sigmask = (regs + 20);
1768 /* XXX: is there a standard glibc define ? */
1769 unsigned long pc = regs[1];
1770#else
1771#ifdef __linux__
1772 struct sigcontext *sc = puc;
1773 unsigned long pc = sc->sigc_regs.tpc;
1774 void *sigmask = (void *)sc->sigc_mask;
1775#elif defined(__OpenBSD__)
1776 struct sigcontext *uc = puc;
1777 unsigned long pc = uc->sc_pc;
1778 void *sigmask = (void *)(long)uc->sc_mask;
1779#endif
1780#endif
1781
1782 /* XXX: need kernel patch to get write flag faster */
1783 is_write = 0;
1784 insn = *(uint32_t *)pc;
1785 if ((insn >> 30) == 3) {
1786 switch((insn >> 19) & 0x3f) {
1787 case 0x05: // stb
1788 case 0x15: // stba
1789 case 0x06: // sth
1790 case 0x16: // stha
1791 case 0x04: // st
1792 case 0x14: // sta
1793 case 0x07: // std
1794 case 0x17: // stda
1795 case 0x0e: // stx
1796 case 0x1e: // stxa
1797 case 0x24: // stf
1798 case 0x34: // stfa
1799 case 0x27: // stdf
1800 case 0x37: // stdfa
1801 case 0x26: // stqf
1802 case 0x36: // stqfa
1803 case 0x25: // stfsr
1804 case 0x3c: // casa
1805 case 0x3e: // casxa
1806 is_write = 1;
1807 break;
1808 }
1809 }
1810 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1811 is_write, sigmask, NULL);
1812}
1813
1814#elif defined(__arm__)
1815
1816int cpu_signal_handler(int host_signum, void *pinfo,
1817 void *puc)
1818{
1819 siginfo_t *info = pinfo;
1820 struct ucontext *uc = puc;
1821 unsigned long pc;
1822 int is_write;
1823
1824#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1825 pc = uc->uc_mcontext.gregs[R15];
1826#else
1827 pc = uc->uc_mcontext.arm_pc;
1828#endif
1829 /* XXX: compute is_write */
1830 is_write = 0;
1831 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1832 is_write,
1833 &uc->uc_sigmask, puc);
1834}
1835
1836#elif defined(__mc68000)
1837
1838int cpu_signal_handler(int host_signum, void *pinfo,
1839 void *puc)
1840{
1841 siginfo_t *info = pinfo;
1842 struct ucontext *uc = puc;
1843 unsigned long pc;
1844 int is_write;
1845
1846 pc = uc->uc_mcontext.gregs[16];
1847 /* XXX: compute is_write */
1848 is_write = 0;
1849 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1850 is_write,
1851 &uc->uc_sigmask, puc);
1852}
1853
1854#elif defined(__ia64)
1855
1856#ifndef __ISR_VALID
1857 /* This ought to be in <bits/siginfo.h>... */
1858# define __ISR_VALID 1
1859#endif
1860
1861int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1862{
1863 siginfo_t *info = pinfo;
1864 struct ucontext *uc = puc;
1865 unsigned long ip;
1866 int is_write = 0;
1867
1868 ip = uc->uc_mcontext.sc_ip;
1869 switch (host_signum) {
1870 case SIGILL:
1871 case SIGFPE:
1872 case SIGSEGV:
1873 case SIGBUS:
1874 case SIGTRAP:
1875 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1876 /* ISR.W (write-access) is bit 33: */
1877 is_write = (info->si_isr >> 33) & 1;
1878 break;
1879
1880 default:
1881 break;
1882 }
1883 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1884 is_write,
1885 &uc->uc_sigmask, puc);
1886}
1887
1888#elif defined(__s390__)
1889
1890int cpu_signal_handler(int host_signum, void *pinfo,
1891 void *puc)
1892{
1893 siginfo_t *info = pinfo;
1894 struct ucontext *uc = puc;
1895 unsigned long pc;
1896 int is_write;
1897
1898 pc = uc->uc_mcontext.psw.addr;
1899 /* XXX: compute is_write */
1900 is_write = 0;
1901 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1902 is_write, &uc->uc_sigmask, puc);
1903}
1904
1905#elif defined(__mips__)
1906
1907int cpu_signal_handler(int host_signum, void *pinfo,
1908 void *puc)
1909{
1910 siginfo_t *info = pinfo;
1911 struct ucontext *uc = puc;
1912 greg_t pc = uc->uc_mcontext.pc;
1913 int is_write;
1914
1915 /* XXX: compute is_write */
1916 is_write = 0;
1917 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1918 is_write, &uc->uc_sigmask, puc);
1919}
1920
1921#elif defined(__hppa__)
1922
1923int cpu_signal_handler(int host_signum, void *pinfo,
1924 void *puc)
1925{
1926 struct siginfo *info = pinfo;
1927 struct ucontext *uc = puc;
1928 unsigned long pc;
1929 int is_write;
1930
1931 pc = uc->uc_mcontext.sc_iaoq[0];
1932 /* FIXME: compute is_write */
1933 is_write = 0;
1934 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1935 is_write,
1936 &uc->uc_sigmask, puc);
1937}
1938
1939#else
1940
1941#error host CPU specific signal handler needed
1942
1943#endif
1944
1945#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette