VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 76559

Last change on this file since 76559 was 72493, checked in by vboxsync, 6 years ago

IEM,REM,++: Removed code related IEM_VERIFICATION_MODE and friends because it (1) adds aditional complexity and mess, (2) suffers bit rot as it's infrequently used, and (3) prevents using pVCpu->cpum.GstCtx directly.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 52.6 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34#include "qemu-barrier.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#ifdef __linux__
48#include <sys/ucontext.h>
49#endif
50#endif
51
52#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
53// Work around ugly bugs in glibc that mangle global register contents
54#undef env
55#define env cpu_single_env
56#endif
57
58int tb_invalidated_flag;
59
60//#define CONFIG_DEBUG_EXEC
61//#define DEBUG_SIGNAL
62
63int qemu_cpu_has_work(CPUState *env)
64{
65 return cpu_has_work(env);
66}
67
68void cpu_loop_exit(void)
69{
70 env->current_tb = NULL;
71 longjmp(env->jmp_env, 1);
72}
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80#ifdef __linux__
81 struct ucontext *uc = puc;
82#elif defined(__OpenBSD__)
83 struct sigcontext *uc = puc;
84#endif
85#endif
86
87 env = env1;
88
89 /* XXX: restore cpu registers saved in host registers */
90
91#if !defined(CONFIG_SOFTMMU)
92 if (puc) {
93 /* XXX: use siglongjmp ? */
94#ifdef __linux__
95#ifdef __ia64
96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
97#else
98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
99#endif
100#elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
102#endif
103 }
104#endif
105 env->exception_index = -1;
106 longjmp(env->jmp_env, 1);
107}
108
109/* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
112{
113 uintptr_t next_tb;
114 TranslationBlock *tb;
115
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles > CF_COUNT_MASK)
119 max_cycles = CF_COUNT_MASK;
120
121 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
122 max_cycles);
123 env->current_tb = tb;
124 /* execute the generated code */
125#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
126 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
127#else
128 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
129#endif
130 env->current_tb = NULL;
131
132 if ((next_tb & 3) == 2) {
133 /* Restore PC. This may happen if async event occurs before
134 the TB starts executing. */
135 cpu_pc_from_tb(env, tb);
136 }
137 tb_phys_invalidate(tb, -1);
138 tb_free(tb);
139}
140
141static TranslationBlock *tb_find_slow(target_ulong pc,
142 target_ulong cs_base,
143 uint64_t flags)
144{
145 TranslationBlock *tb, **ptb1;
146 unsigned int h;
147 tb_page_addr_t phys_pc, phys_page1, phys_page2;
148 target_ulong virt_page2;
149
150 tb_invalidated_flag = 0;
151
152 /* find translated block using physical mappings */
153 phys_pc = get_page_addr_code(env, pc);
154 phys_page1 = phys_pc & TARGET_PAGE_MASK;
155 phys_page2 = -1;
156 h = tb_phys_hash_func(phys_pc);
157 ptb1 = &tb_phys_hash[h];
158 for(;;) {
159 tb = *ptb1;
160 if (!tb)
161 goto not_found;
162 if (tb->pc == pc &&
163 tb->page_addr[0] == phys_page1 &&
164 tb->cs_base == cs_base &&
165 tb->flags == flags) {
166 /* check next page if needed */
167 if (tb->page_addr[1] != -1) {
168 virt_page2 = (pc & TARGET_PAGE_MASK) +
169 TARGET_PAGE_SIZE;
170 phys_page2 = get_page_addr_code(env, virt_page2);
171 if (tb->page_addr[1] == phys_page2)
172 goto found;
173 } else {
174 goto found;
175 }
176 }
177 ptb1 = &tb->phys_hash_next;
178 }
179 not_found:
180 /* if no translated code available, then translate it now */
181 tb = tb_gen_code(env, pc, cs_base, flags, 0);
182
183 found:
184 /* we add the TB in the virtual pc hash table */
185 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
186 return tb;
187}
188
189static inline TranslationBlock *tb_find_fast(void)
190{
191 TranslationBlock *tb;
192 target_ulong cs_base, pc;
193 int flags;
194
195 /* we record a subset of the CPU state. It will
196 always be the same before a given translated block
197 is executed. */
198 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
199 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
200 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
201 tb->flags != flags)) {
202 tb = tb_find_slow(pc, cs_base, flags);
203 }
204 return tb;
205}
206
207static CPUDebugExcpHandler *debug_excp_handler;
208
209CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
210{
211 CPUDebugExcpHandler *old_handler = debug_excp_handler;
212
213 debug_excp_handler = handler;
214 return old_handler;
215}
216
217static void cpu_handle_debug_exception(CPUState *env)
218{
219 CPUWatchpoint *wp;
220
221 if (!env->watchpoint_hit)
222 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
223 wp->flags &= ~BP_WATCHPOINT_HIT;
224
225 if (debug_excp_handler)
226 debug_excp_handler(env);
227}
228
229/* main execution loop */
230
231volatile sig_atomic_t exit_request;
232
233int cpu_exec(CPUState *env1)
234{
235 volatile host_reg_t saved_env_reg;
236 int ret VBOX_ONLY(= 0), interrupt_request;
237 TranslationBlock *tb;
238 uint8_t *tc_ptr;
239 uintptr_t next_tb;
240
241# ifndef VBOX
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
244# endif /* !VBOX */
245
246 cpu_single_env = env1;
247
248 /* the access to env below is actually saving the global register's
249 value, so that files not including target-xyz/exec.h are free to
250 use it. */
251 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
252 saved_env_reg = (host_reg_t) env;
253 barrier();
254 env = env1;
255
256 if (unlikely(exit_request)) {
257 env->exit_request = 1;
258 }
259
260#if defined(TARGET_I386)
261 if (!kvm_enabled()) {
262 /* put eflags in CPU temporary format */
263 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
264 DF = 1 - (2 * ((env->eflags >> 10) & 1));
265 CC_OP = CC_OP_EFLAGS;
266 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 }
268#elif defined(TARGET_SPARC)
269#elif defined(TARGET_M68K)
270 env->cc_op = CC_OP_FLAGS;
271 env->cc_dest = env->sr & 0xf;
272 env->cc_x = (env->sr >> 4) & 1;
273#elif defined(TARGET_ALPHA)
274#elif defined(TARGET_ARM)
275#elif defined(TARGET_PPC)
276#elif defined(TARGET_MICROBLAZE)
277#elif defined(TARGET_MIPS)
278#elif defined(TARGET_SH4)
279#elif defined(TARGET_CRIS)
280#elif defined(TARGET_S390X)
281 /* XXXXX */
282#else
283#error unsupported target CPU
284#endif
285#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
286 env->exception_index = -1;
287#endif /* !VBOX */
288
289 /* prepare setjmp context for exception handling */
290 for(;;) {
291 if (setjmp(env->jmp_env) == 0) {
292#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
293#undef env
294 env = cpu_single_env;
295#define env cpu_single_env
296#endif
297#ifdef VBOX
298 env->current_tb = NULL; /* probably not needed, but whatever... */
299
300 /*
301 * Check for fatal errors first
302 */
303 if (env->interrupt_request & CPU_INTERRUPT_RC) {
304 env->exception_index = EXCP_RC;
305 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
306 ret = env->exception_index;
307 cpu_loop_exit();
308 }
309#endif
310
311 /* if an exception is pending, we execute it here */
312 if (env->exception_index >= 0) {
313 if (env->exception_index >= EXCP_INTERRUPT) {
314 /* exit request from the cpu execution loop */
315 ret = env->exception_index;
316#ifdef VBOX /* because of the above stuff */
317 env->exception_index = -1;
318#endif
319 if (ret == EXCP_DEBUG)
320 cpu_handle_debug_exception(env);
321 break;
322 } else {
323#if defined(CONFIG_USER_ONLY)
324 /* if user mode only, we simulate a fake exception
325 which will be handled outside the cpu execution
326 loop */
327#if defined(TARGET_I386)
328 do_interrupt_user(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip);
332 /* successfully delivered */
333 env->old_exception = -1;
334#endif
335 ret = env->exception_index;
336 break;
337#else
338#if defined(TARGET_I386)
339 /* simulate a real cpu exception. On i386, it can
340 trigger new exceptions, but we do not handle
341 double or triple faults yet. */
342# ifdef VBOX
343 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
344 Log(("do_interrupt: vec=%#x int=%d pc=%04x:%RGv\n", env->exception_index, env->exception_is_int,
345 env->segs[R_CS].selector, (RTGCPTR)env->exception_next_eip));
346# endif /* VBOX */
347 do_interrupt(env->exception_index,
348 env->exception_is_int && env->exception_is_int != EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ,
349 env->error_code,
350 env->exception_next_eip,
351 env->exception_is_int == EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ);
352 /* successfully delivered */
353 env->old_exception = -1;
354# ifdef VBOX
355 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
356# endif /* VBOX */
357#elif defined(TARGET_PPC)
358 do_interrupt(env);
359#elif defined(TARGET_MICROBLAZE)
360 do_interrupt(env);
361#elif defined(TARGET_MIPS)
362 do_interrupt(env);
363#elif defined(TARGET_SPARC)
364 do_interrupt(env);
365#elif defined(TARGET_ARM)
366 do_interrupt(env);
367#elif defined(TARGET_SH4)
368 do_interrupt(env);
369#elif defined(TARGET_ALPHA)
370 do_interrupt(env);
371#elif defined(TARGET_CRIS)
372 do_interrupt(env);
373#elif defined(TARGET_M68K)
374 do_interrupt(0);
375#endif
376 env->exception_index = -1;
377#endif
378 }
379 }
380
381# ifndef VBOX
382 if (kvm_enabled()) {
383 kvm_cpu_exec(env);
384 longjmp(env->jmp_env, 1);
385 }
386# endif /* !VBOX */
387
388 next_tb = 0; /* force lookup of first TB */
389 for(;;) {
390 interrupt_request = env->interrupt_request;
391 if (unlikely(interrupt_request)) {
392 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
393 /* Mask out external interrupts for this step. */
394 interrupt_request &= ~(CPU_INTERRUPT_HARD |
395 CPU_INTERRUPT_FIQ |
396 CPU_INTERRUPT_SMI |
397 CPU_INTERRUPT_NMI);
398 }
399 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
400 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
401 env->exception_index = EXCP_DEBUG;
402 cpu_loop_exit();
403 }
404#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
405 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
406 defined(TARGET_MICROBLAZE)
407 if (interrupt_request & CPU_INTERRUPT_HALT) {
408 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
409 env->halted = 1;
410 env->exception_index = EXCP_HLT;
411 cpu_loop_exit();
412 }
413#endif
414#if defined(TARGET_I386)
415# ifdef VBOX
416 /* Memory registration may post a tlb flush request, process it ASAP. */
417 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)) {
418 tlb_flush(env, true); /* (clears the flush flag) */
419 }
420
421 /* Single instruction exec request, we execute it and return (one way or the other).
422 The caller will always reschedule after doing this operation! */
423 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
424 {
425 /* not in flight are we? (if we are, we trapped) */
426 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
427 {
428 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
429 env->exception_index = EXCP_SINGLE_INSTR;
430 if (emulate_single_instr(env) == -1)
431 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
432
433 /* When we receive an external interrupt during execution of this single
434 instruction, then we should stay here. We will leave when we're ready
435 for raw-mode or when interrupted by pending EMT requests. */
436 interrupt_request = env->interrupt_request; /* reload this! */
437 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
438 || !(env->eflags & IF_MASK)
439 || (env->hflags & HF_INHIBIT_IRQ_MASK)
440 || (env->state & CPU_RAW_HM)
441 )
442 {
443 env->exception_index = ret = EXCP_SINGLE_INSTR;
444 cpu_loop_exit();
445 }
446 }
447 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
448 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
449 }
450# endif /* VBOX */
451
452# ifndef VBOX /** @todo reconcile our code with the following... */
453 if (interrupt_request & CPU_INTERRUPT_INIT) {
454 svm_check_intercept(SVM_EXIT_INIT);
455 do_cpu_init(env);
456 env->exception_index = EXCP_HALTED;
457 cpu_loop_exit();
458 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
459 do_cpu_sipi(env);
460 } else if (env->hflags2 & HF2_GIF_MASK) {
461 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
462 !(env->hflags & HF_SMM_MASK)) {
463 svm_check_intercept(SVM_EXIT_SMI);
464 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
465 do_smm_enter();
466 next_tb = 0;
467 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
468 !(env->hflags2 & HF2_NMI_MASK)) {
469 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
470 env->hflags2 |= HF2_NMI_MASK;
471 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
472 next_tb = 0;
473 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
474 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
475 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
476 next_tb = 0;
477 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
478 (((env->hflags2 & HF2_VINTR_MASK) &&
479 (env->hflags2 & HF2_HIF_MASK)) ||
480 (!(env->hflags2 & HF2_VINTR_MASK) &&
481 (env->eflags & IF_MASK &&
482 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
483 int intno;
484 svm_check_intercept(SVM_EXIT_INTR);
485 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
486 intno = cpu_get_pic_interrupt(env);
487 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
488#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
489#undef env
490 env = cpu_single_env;
491#define env cpu_single_env
492#endif
493 do_interrupt(intno, 0, 0, 0, 1);
494 /* ensure that no TB jump will be modified as
495 the program flow was changed */
496 next_tb = 0;
497#if !defined(CONFIG_USER_ONLY)
498 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
499 (env->eflags & IF_MASK) &&
500 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
501 int intno;
502 /* FIXME: this should respect TPR */
503 svm_check_intercept(SVM_EXIT_VINTR);
504 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
505 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
506 do_interrupt(intno, 0, 0, 0, 1);
507 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
508 next_tb = 0;
509#endif
510 }
511 }
512# else /* VBOX */
513 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
514 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
515 !(env->hflags & HF_SMM_MASK)) {
516 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
517 do_smm_enter();
518 next_tb = 0;
519 }
520 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
521 (env->eflags & IF_MASK) &&
522 !(env->hflags & HF_INHIBIT_IRQ_MASK))
523 {
524 /* if hardware interrupt pending, we execute it */
525 int intno;
526 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
527 intno = cpu_get_pic_interrupt(env);
528 if (intno >= 0)
529 {
530 Log(("do_interrupt %d\n", intno));
531 do_interrupt(intno, 0, 0, 0, 1);
532 }
533 /* ensure that no TB jump will be modified as
534 the program flow was changed */
535 next_tb = 0;
536 }
537# endif /* VBOX */
538#elif defined(TARGET_PPC)
539#if 0
540 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
541 cpu_reset(env);
542 }
543#endif
544 if (interrupt_request & CPU_INTERRUPT_HARD) {
545 ppc_hw_interrupt(env);
546 if (env->pending_interrupts == 0)
547 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
548 next_tb = 0;
549 }
550#elif defined(TARGET_MICROBLAZE)
551 if ((interrupt_request & CPU_INTERRUPT_HARD)
552 && (env->sregs[SR_MSR] & MSR_IE)
553 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
554 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
555 env->exception_index = EXCP_IRQ;
556 do_interrupt(env);
557 next_tb = 0;
558 }
559#elif defined(TARGET_MIPS)
560 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
561 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
562 (env->CP0_Status & (1 << CP0St_IE)) &&
563 !(env->CP0_Status & (1 << CP0St_EXL)) &&
564 !(env->CP0_Status & (1 << CP0St_ERL)) &&
565 !(env->hflags & MIPS_HFLAG_DM)) {
566 /* Raise it */
567 env->exception_index = EXCP_EXT_INTERRUPT;
568 env->error_code = 0;
569 do_interrupt(env);
570 next_tb = 0;
571 }
572#elif defined(TARGET_SPARC)
573 if (interrupt_request & CPU_INTERRUPT_HARD) {
574 if (cpu_interrupts_enabled(env) &&
575 env->interrupt_index > 0) {
576 int pil = env->interrupt_index & 0xf;
577 int type = env->interrupt_index & 0xf0;
578
579 if (((type == TT_EXTINT) &&
580 cpu_pil_allowed(env, pil)) ||
581 type != TT_EXTINT) {
582 env->exception_index = env->interrupt_index;
583 do_interrupt(env);
584 next_tb = 0;
585 }
586 }
587 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
588 //do_interrupt(0, 0, 0, 0, 0);
589 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
590 }
591#elif defined(TARGET_ARM)
592 if (interrupt_request & CPU_INTERRUPT_FIQ
593 && !(env->uncached_cpsr & CPSR_F)) {
594 env->exception_index = EXCP_FIQ;
595 do_interrupt(env);
596 next_tb = 0;
597 }
598 /* ARMv7-M interrupt return works by loading a magic value
599 into the PC. On real hardware the load causes the
600 return to occur. The qemu implementation performs the
601 jump normally, then does the exception return when the
602 CPU tries to execute code at the magic address.
603 This will cause the magic PC value to be pushed to
604 the stack if an interrupt occured at the wrong time.
605 We avoid this by disabling interrupts when
606 pc contains a magic address. */
607 if (interrupt_request & CPU_INTERRUPT_HARD
608 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
609 || !(env->uncached_cpsr & CPSR_I))) {
610 env->exception_index = EXCP_IRQ;
611 do_interrupt(env);
612 next_tb = 0;
613 }
614#elif defined(TARGET_SH4)
615 if (interrupt_request & CPU_INTERRUPT_HARD) {
616 do_interrupt(env);
617 next_tb = 0;
618 }
619#elif defined(TARGET_ALPHA)
620 if (interrupt_request & CPU_INTERRUPT_HARD) {
621 do_interrupt(env);
622 next_tb = 0;
623 }
624#elif defined(TARGET_CRIS)
625 if (interrupt_request & CPU_INTERRUPT_HARD
626 && (env->pregs[PR_CCS] & I_FLAG)
627 && !env->locked_irq) {
628 env->exception_index = EXCP_IRQ;
629 do_interrupt(env);
630 next_tb = 0;
631 }
632 if (interrupt_request & CPU_INTERRUPT_NMI
633 && (env->pregs[PR_CCS] & M_FLAG)) {
634 env->exception_index = EXCP_NMI;
635 do_interrupt(env);
636 next_tb = 0;
637 }
638#elif defined(TARGET_M68K)
639 if (interrupt_request & CPU_INTERRUPT_HARD
640 && ((env->sr & SR_I) >> SR_I_SHIFT)
641 < env->pending_level) {
642 /* Real hardware gets the interrupt vector via an
643 IACK cycle at this point. Current emulated
644 hardware doesn't rely on this, so we
645 provide/save the vector when the interrupt is
646 first signalled. */
647 env->exception_index = env->pending_vector;
648 do_interrupt(1);
649 next_tb = 0;
650 }
651#endif
652 /* Don't use the cached interupt_request value,
653 do_interrupt may have updated the EXITTB flag. */
654 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
655#ifndef VBOX
656 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
657#else /* VBOX */
658 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
659#endif /* VBOX */
660 /* ensure that no TB jump will be modified as
661 the program flow was changed */
662 next_tb = 0;
663 }
664#ifdef VBOX
665 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
666 if (interrupt_request & CPU_INTERRUPT_RC) {
667 env->exception_index = EXCP_RC;
668 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
669 ret = env->exception_index;
670 cpu_loop_exit();
671 }
672 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT)) {
673 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~(CPU_INTERRUPT_EXTERNAL_EXIT));
674 env->exit_request = 1;
675 }
676#endif
677 }
678 if (unlikely(env->exit_request)) {
679 env->exit_request = 0;
680 env->exception_index = EXCP_INTERRUPT;
681 cpu_loop_exit();
682 }
683
684#ifdef VBOX
685 /*
686 * Check if we the CPU state allows us to execute the code in raw-mode.
687 */
688 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
689 if (remR3CanExecuteRaw(env,
690 env->eip + env->segs[R_CS].base,
691 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
692 &env->exception_index))
693 {
694 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
695 ret = env->exception_index;
696 cpu_loop_exit();
697 }
698 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
699#endif /* VBOX */
700
701#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
702 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
703 /* restore flags in standard format */
704#if defined(TARGET_I386)
705 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
706 log_cpu_state(env, X86_DUMP_CCOP);
707 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
708#elif defined(TARGET_M68K)
709 cpu_m68k_flush_flags(env, env->cc_op);
710 env->cc_op = CC_OP_FLAGS;
711 env->sr = (env->sr & 0xffe0)
712 | env->cc_dest | (env->cc_x << 4);
713 log_cpu_state(env, 0);
714#else
715 log_cpu_state(env, 0);
716#endif
717 }
718#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
719#ifdef VBOX
720 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
721#endif /*VBOX*/
722 spin_lock(&tb_lock);
723 tb = tb_find_fast();
724 /* Note: we do it here to avoid a gcc bug on Mac OS X when
725 doing it in tb_find_slow */
726 if (tb_invalidated_flag) {
727 /* as some TB could have been invalidated because
728 of memory exceptions while generating the code, we
729 must recompute the hash index here */
730 next_tb = 0;
731 tb_invalidated_flag = 0;
732 }
733#ifdef CONFIG_DEBUG_EXEC
734 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
735 (void *)tb->tc_ptr, tb->pc,
736 lookup_symbol(tb->pc));
737#endif
738 /* see if we can patch the calling TB. When the TB
739 spans two pages, we cannot safely do a direct
740 jump. */
741#ifndef VBOX
742 if (next_tb != 0 && tb->page_addr[1] == -1) {
743#else /* VBOX */
744 if (next_tb != 0 && !(tb->cflags & CF_RAW_MODE) && tb->page_addr[1] == -1) {
745#endif /* VBOX */
746 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
747 }
748 spin_unlock(&tb_lock);
749#ifdef VBOX
750 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
751#endif
752
753 /* cpu_interrupt might be called while translating the
754 TB, but before it is linked into a potentially
755 infinite loop and becomes env->current_tb. Avoid
756 starting execution if there is a pending interrupt. */
757 env->current_tb = tb;
758 barrier();
759 if (likely(!env->exit_request)) {
760 tc_ptr = tb->tc_ptr;
761 /* execute the generated code */
762#ifdef VBOX
763 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
764#endif
765#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
766#undef env
767 env = cpu_single_env;
768#define env cpu_single_env
769#endif
770 Log5(("REM: tb=%p tc_ptr=%p %04x:%08RGv\n", tb, tc_ptr, env->segs[R_CS].selector, (RTGCPTR)env->eip));
771#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
772 tcg_qemu_tb_exec(tc_ptr, next_tb);
773#else
774 next_tb = tcg_qemu_tb_exec(tc_ptr);
775#endif
776 if (next_tb)
777 Log5(("REM: next_tb=%p %04x:%08RGv\n", next_tb, env->segs[R_CS].selector, (RTGCPTR)env->eip));
778#ifdef VBOX
779 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
780#endif
781 if ((next_tb & 3) == 2) {
782 /* Instruction counter expired. */
783 int insns_left;
784 tb = (TranslationBlock *)(uintptr_t)(next_tb & ~3);
785 /* Restore PC. */
786 cpu_pc_from_tb(env, tb);
787 insns_left = env->icount_decr.u32;
788 if (env->icount_extra && insns_left >= 0) {
789 /* Refill decrementer and continue execution. */
790 env->icount_extra += insns_left;
791 if (env->icount_extra > 0xffff) {
792 insns_left = 0xffff;
793 } else {
794 insns_left = env->icount_extra;
795 }
796 env->icount_extra -= insns_left;
797 env->icount_decr.u16.low = insns_left;
798 } else {
799 if (insns_left > 0) {
800 /* Execute remaining instructions. */
801 cpu_exec_nocache(insns_left, tb);
802 }
803 env->exception_index = EXCP_INTERRUPT;
804 next_tb = 0;
805 cpu_loop_exit();
806 }
807 }
808 }
809 env->current_tb = NULL;
810 /* reset soft MMU for next block (it can currently
811 only be set by a memory fault) */
812 } /* for(;;) */
813 }
814#ifdef VBOX_HIGH_RES_TIMERS_HACK
815 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
816 unnecessary (like crashing during emulate single instruction).
817 Note! Don't use env1->pVM here, the code wouldn't run with
818 gcc-4.4/amd64 anymore, see #3883. */
819 env->current_tb = NULL;
820 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
821 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
822 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
823 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
824 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
825 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
826 TMR3TimerQueuesDo(env->pVM);
827 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
828 }
829#endif
830 } /* for(;;) */
831
832
833#if defined(TARGET_I386)
834 /* restore flags in standard format */
835 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
836#elif defined(TARGET_ARM)
837 /* XXX: Save/restore host fpu exception state?. */
838#elif defined(TARGET_SPARC)
839#elif defined(TARGET_PPC)
840#elif defined(TARGET_M68K)
841 cpu_m68k_flush_flags(env, env->cc_op);
842 env->cc_op = CC_OP_FLAGS;
843 env->sr = (env->sr & 0xffe0)
844 | env->cc_dest | (env->cc_x << 4);
845#elif defined(TARGET_MICROBLAZE)
846#elif defined(TARGET_MIPS)
847#elif defined(TARGET_SH4)
848#elif defined(TARGET_ALPHA)
849#elif defined(TARGET_CRIS)
850#elif defined(TARGET_S390X)
851 /* XXXXX */
852#else
853#error unsupported target CPU
854#endif
855
856 /* restore global registers */
857 barrier();
858 env = (void *) saved_env_reg;
859
860# ifndef VBOX /* we might be using elsewhere, we only have one. */
861 /* fail safe : never use cpu_single_env outside cpu_exec() */
862 cpu_single_env = NULL;
863# endif
864 return ret;
865}
866
867/* must only be called from the generated code as an exception can be
868 generated */
869void tb_invalidate_page_range(target_ulong start, target_ulong end)
870{
871 /* XXX: cannot enable it yet because it yields to MMU exception
872 where NIP != read address on PowerPC */
873#if 0
874 target_ulong phys_addr;
875 phys_addr = get_phys_addr_code(env, start);
876 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
877#endif
878}
879
880#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
881
882void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
883{
884 CPUX86State *saved_env;
885
886 saved_env = env;
887 env = s;
888 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
889 selector &= 0xffff;
890 cpu_x86_load_seg_cache(env, seg_reg, selector,
891 (selector << 4), 0xffff, 0);
892 } else {
893 helper_load_seg(seg_reg, selector);
894 }
895 env = saved_env;
896}
897
898void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
899{
900 CPUX86State *saved_env;
901
902 saved_env = env;
903 env = s;
904
905 helper_fsave(ptr, data32);
906
907 env = saved_env;
908}
909
910void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
911{
912 CPUX86State *saved_env;
913
914 saved_env = env;
915 env = s;
916
917 helper_frstor(ptr, data32);
918
919 env = saved_env;
920}
921
922#endif /* TARGET_I386 */
923
924#if !defined(CONFIG_SOFTMMU)
925
926#if defined(TARGET_I386)
927#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
928#else
929#define EXCEPTION_ACTION cpu_loop_exit()
930#endif
931
932/* 'pc' is the host PC at which the exception was raised. 'address' is
933 the effective address of the memory exception. 'is_write' is 1 if a
934 write caused the exception and otherwise 0'. 'old_set' is the
935 signal set which should be restored */
936static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
937 int is_write, sigset_t *old_set,
938 void *puc)
939{
940 TranslationBlock *tb;
941 int ret;
942
943 if (cpu_single_env)
944 env = cpu_single_env; /* XXX: find a correct solution for multithread */
945#if defined(DEBUG_SIGNAL)
946 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
947 pc, address, is_write, *(unsigned long *)old_set);
948#endif
949 /* XXX: locking issue */
950 if (is_write && page_unprotect(h2g(address), pc, puc)) {
951 return 1;
952 }
953
954 /* see if it is an MMU fault */
955 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
956 if (ret < 0)
957 return 0; /* not an MMU fault */
958 if (ret == 0)
959 return 1; /* the MMU fault was handled without causing real CPU fault */
960 /* now we have a real cpu fault */
961 tb = tb_find_pc(pc);
962 if (tb) {
963 /* the PC is inside the translated code. It means that we have
964 a virtual CPU fault */
965 cpu_restore_state(tb, env, pc, puc);
966 }
967
968 /* we restore the process signal mask as the sigreturn should
969 do it (XXX: use sigsetjmp) */
970 sigprocmask(SIG_SETMASK, old_set, NULL);
971 EXCEPTION_ACTION;
972
973 /* never comes here */
974 return 1;
975}
976
977#if defined(__i386__)
978
979#if defined(__APPLE__)
980# include <sys/ucontext.h>
981
982# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
983# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
984# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
985# define MASK_sig(context) ((context)->uc_sigmask)
986#elif defined (__NetBSD__)
987# include <ucontext.h>
988
989# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
990# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
991# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
992# define MASK_sig(context) ((context)->uc_sigmask)
993#elif defined (__FreeBSD__) || defined(__DragonFly__)
994# include <ucontext.h>
995
996# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
997# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
998# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
999# define MASK_sig(context) ((context)->uc_sigmask)
1000#elif defined(__OpenBSD__)
1001# define EIP_sig(context) ((context)->sc_eip)
1002# define TRAP_sig(context) ((context)->sc_trapno)
1003# define ERROR_sig(context) ((context)->sc_err)
1004# define MASK_sig(context) ((context)->sc_mask)
1005#else
1006# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1007# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1008# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1009# define MASK_sig(context) ((context)->uc_sigmask)
1010#endif
1011
1012int cpu_signal_handler(int host_signum, void *pinfo,
1013 void *puc)
1014{
1015 siginfo_t *info = pinfo;
1016#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1017 ucontext_t *uc = puc;
1018#elif defined(__OpenBSD__)
1019 struct sigcontext *uc = puc;
1020#else
1021 struct ucontext *uc = puc;
1022#endif
1023 uintptr_t pc;
1024 int trapno;
1025
1026#ifndef REG_EIP
1027/* for glibc 2.1 */
1028#define REG_EIP EIP
1029#define REG_ERR ERR
1030#define REG_TRAPNO TRAPNO
1031#endif
1032 pc = EIP_sig(uc);
1033 trapno = TRAP_sig(uc);
1034 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1035 trapno == 0xe ?
1036 (ERROR_sig(uc) >> 1) & 1 : 0,
1037 &MASK_sig(uc), puc);
1038}
1039
1040#elif defined(__x86_64__)
1041
1042#ifdef __NetBSD__
1043#define PC_sig(context) _UC_MACHINE_PC(context)
1044#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1045#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1046#define MASK_sig(context) ((context)->uc_sigmask)
1047#elif defined(__OpenBSD__)
1048#define PC_sig(context) ((context)->sc_rip)
1049#define TRAP_sig(context) ((context)->sc_trapno)
1050#define ERROR_sig(context) ((context)->sc_err)
1051#define MASK_sig(context) ((context)->sc_mask)
1052#elif defined (__FreeBSD__) || defined(__DragonFly__)
1053#include <ucontext.h>
1054
1055#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
1056#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1057#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1058#define MASK_sig(context) ((context)->uc_sigmask)
1059#else
1060#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1061#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1062#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1063#define MASK_sig(context) ((context)->uc_sigmask)
1064#endif
1065
1066int cpu_signal_handler(int host_signum, void *pinfo,
1067 void *puc)
1068{
1069 siginfo_t *info = pinfo;
1070 uintptr_t pc;
1071#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1072 ucontext_t *uc = puc;
1073#elif defined(__OpenBSD__)
1074 struct sigcontext *uc = puc;
1075#else
1076 struct ucontext *uc = puc;
1077#endif
1078
1079 pc = PC_sig(uc);
1080 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1081 TRAP_sig(uc) == 0xe ?
1082 (ERROR_sig(uc) >> 1) & 1 : 0,
1083 &MASK_sig(uc), puc);
1084}
1085
1086#elif defined(_ARCH_PPC)
1087
1088/***********************************************************************
1089 * signal context platform-specific definitions
1090 * From Wine
1091 */
1092#ifdef linux
1093/* All Registers access - only for local access */
1094# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1095/* Gpr Registers access */
1096# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1097# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1098# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1099# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1100# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1101# define LR_sig(context) REG_sig(link, context) /* Link register */
1102# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1103/* Float Registers access */
1104# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1105# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1106/* Exception Registers access */
1107# define DAR_sig(context) REG_sig(dar, context)
1108# define DSISR_sig(context) REG_sig(dsisr, context)
1109# define TRAP_sig(context) REG_sig(trap, context)
1110#endif /* linux */
1111
1112#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1113#include <ucontext.h>
1114# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
1115# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
1116# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
1117# define XER_sig(context) ((context)->uc_mcontext.mc_xer)
1118# define LR_sig(context) ((context)->uc_mcontext.mc_lr)
1119# define CR_sig(context) ((context)->uc_mcontext.mc_cr)
1120/* Exception Registers access */
1121# define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
1122# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
1123# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
1124#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
1125
1126#ifdef __APPLE__
1127# include <sys/ucontext.h>
1128typedef struct ucontext SIGCONTEXT;
1129/* All Registers access - only for local access */
1130# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1131# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1132# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1133# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1134/* Gpr Registers access */
1135# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1136# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1137# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1138# define CTR_sig(context) REG_sig(ctr, context)
1139# define XER_sig(context) REG_sig(xer, context) /* Link register */
1140# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1141# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1142/* Float Registers access */
1143# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1144# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1145/* Exception Registers access */
1146# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1147# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1148# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1149#endif /* __APPLE__ */
1150
1151int cpu_signal_handler(int host_signum, void *pinfo,
1152 void *puc)
1153{
1154 siginfo_t *info = pinfo;
1155#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1156 ucontext_t *uc = puc;
1157#else
1158 struct ucontext *uc = puc;
1159#endif
1160 uintptr_t pc;
1161 int is_write;
1162
1163 pc = IAR_sig(uc);
1164 is_write = 0;
1165#if 0
1166 /* ppc 4xx case */
1167 if (DSISR_sig(uc) & 0x00800000)
1168 is_write = 1;
1169#else
1170 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1171 is_write = 1;
1172#endif
1173 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1174 is_write, &uc->uc_sigmask, puc);
1175}
1176
1177#elif defined(__alpha__)
1178
1179int cpu_signal_handler(int host_signum, void *pinfo,
1180 void *puc)
1181{
1182 siginfo_t *info = pinfo;
1183 struct ucontext *uc = puc;
1184 uint32_t *pc = uc->uc_mcontext.sc_pc;
1185 uint32_t insn = *pc;
1186 int is_write = 0;
1187
1188 /* XXX: need kernel patch to get write flag faster */
1189 switch (insn >> 26) {
1190 case 0x0d: // stw
1191 case 0x0e: // stb
1192 case 0x0f: // stq_u
1193 case 0x24: // stf
1194 case 0x25: // stg
1195 case 0x26: // sts
1196 case 0x27: // stt
1197 case 0x2c: // stl
1198 case 0x2d: // stq
1199 case 0x2e: // stl_c
1200 case 0x2f: // stq_c
1201 is_write = 1;
1202 }
1203
1204 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1205 is_write, &uc->uc_sigmask, puc);
1206}
1207#elif defined(__sparc__)
1208
1209int cpu_signal_handler(int host_signum, void *pinfo,
1210 void *puc)
1211{
1212 siginfo_t *info = pinfo;
1213 int is_write;
1214 uint32_t insn;
1215#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1216 uint32_t *regs = (uint32_t *)(info + 1);
1217 void *sigmask = (regs + 20);
1218 /* XXX: is there a standard glibc define ? */
1219 uintptr_t pc = regs[1];
1220#else
1221#ifdef __linux__
1222 struct sigcontext *sc = puc;
1223 uintptr_t pc = sc->sigc_regs.tpc;
1224 void *sigmask = (void *)sc->sigc_mask;
1225#elif defined(__OpenBSD__)
1226 struct sigcontext *uc = puc;
1227 uintptr_t pc = uc->sc_pc;
1228 void *sigmask = (void *)(uintptr_t)uc->sc_mask;
1229#endif
1230#endif
1231
1232 /* XXX: need kernel patch to get write flag faster */
1233 is_write = 0;
1234 insn = *(uint32_t *)pc;
1235 if ((insn >> 30) == 3) {
1236 switch((insn >> 19) & 0x3f) {
1237 case 0x05: // stb
1238 case 0x15: // stba
1239 case 0x06: // sth
1240 case 0x16: // stha
1241 case 0x04: // st
1242 case 0x14: // sta
1243 case 0x07: // std
1244 case 0x17: // stda
1245 case 0x0e: // stx
1246 case 0x1e: // stxa
1247 case 0x24: // stf
1248 case 0x34: // stfa
1249 case 0x27: // stdf
1250 case 0x37: // stdfa
1251 case 0x26: // stqf
1252 case 0x36: // stqfa
1253 case 0x25: // stfsr
1254 case 0x3c: // casa
1255 case 0x3e: // casxa
1256 is_write = 1;
1257 break;
1258 }
1259 }
1260 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1261 is_write, sigmask, NULL);
1262}
1263
1264#elif defined(__arm__)
1265
1266int cpu_signal_handler(int host_signum, void *pinfo,
1267 void *puc)
1268{
1269 siginfo_t *info = pinfo;
1270 struct ucontext *uc = puc;
1271 uintptr_t pc;
1272 int is_write;
1273
1274#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1275 pc = uc->uc_mcontext.gregs[R15];
1276#else
1277 pc = uc->uc_mcontext.arm_pc;
1278#endif
1279 /* XXX: compute is_write */
1280 is_write = 0;
1281 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1282 is_write,
1283 &uc->uc_sigmask, puc);
1284}
1285
1286#elif defined(__mc68000)
1287
1288int cpu_signal_handler(int host_signum, void *pinfo,
1289 void *puc)
1290{
1291 siginfo_t *info = pinfo;
1292 struct ucontext *uc = puc;
1293 uintptr_t pc;
1294 int is_write;
1295
1296 pc = uc->uc_mcontext.gregs[16];
1297 /* XXX: compute is_write */
1298 is_write = 0;
1299 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1300 is_write,
1301 &uc->uc_sigmask, puc);
1302}
1303
1304#elif defined(__ia64)
1305
1306#ifndef __ISR_VALID
1307 /* This ought to be in <bits/siginfo.h>... */
1308# define __ISR_VALID 1
1309#endif
1310
1311int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1312{
1313 siginfo_t *info = pinfo;
1314 struct ucontext *uc = puc;
1315 uintptr_t ip;
1316 int is_write = 0;
1317
1318 ip = uc->uc_mcontext.sc_ip;
1319 switch (host_signum) {
1320 case SIGILL:
1321 case SIGFPE:
1322 case SIGSEGV:
1323 case SIGBUS:
1324 case SIGTRAP:
1325 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1326 /* ISR.W (write-access) is bit 33: */
1327 is_write = (info->si_isr >> 33) & 1;
1328 break;
1329
1330 default:
1331 break;
1332 }
1333 return handle_cpu_signal(ip, (uintptr_t)info->si_addr,
1334 is_write,
1335 (sigset_t *)&uc->uc_sigmask, puc);
1336}
1337
1338#elif defined(__s390__)
1339
1340int cpu_signal_handler(int host_signum, void *pinfo,
1341 void *puc)
1342{
1343 siginfo_t *info = pinfo;
1344 struct ucontext *uc = puc;
1345 uintptr_t pc;
1346 uint16_t *pinsn;
1347 int is_write = 0;
1348
1349 pc = uc->uc_mcontext.psw.addr;
1350
1351 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1352 of the normal 2 arguments. The 3rd argument contains the "int_code"
1353 from the hardware which does in fact contain the is_write value.
1354 The rt signal handler, as far as I can tell, does not give this value
1355 at all. Not that we could get to it from here even if it were. */
1356 /* ??? This is not even close to complete, since it ignores all
1357 of the read-modify-write instructions. */
1358 pinsn = (uint16_t *)pc;
1359 switch (pinsn[0] >> 8) {
1360 case 0x50: /* ST */
1361 case 0x42: /* STC */
1362 case 0x40: /* STH */
1363 is_write = 1;
1364 break;
1365 case 0xc4: /* RIL format insns */
1366 switch (pinsn[0] & 0xf) {
1367 case 0xf: /* STRL */
1368 case 0xb: /* STGRL */
1369 case 0x7: /* STHRL */
1370 is_write = 1;
1371 }
1372 break;
1373 case 0xe3: /* RXY format insns */
1374 switch (pinsn[2] & 0xff) {
1375 case 0x50: /* STY */
1376 case 0x24: /* STG */
1377 case 0x72: /* STCY */
1378 case 0x70: /* STHY */
1379 case 0x8e: /* STPQ */
1380 case 0x3f: /* STRVH */
1381 case 0x3e: /* STRV */
1382 case 0x2f: /* STRVG */
1383 is_write = 1;
1384 }
1385 break;
1386 }
1387 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1388 is_write, &uc->uc_sigmask, puc);
1389}
1390
1391#elif defined(__mips__)
1392
1393int cpu_signal_handler(int host_signum, void *pinfo,
1394 void *puc)
1395{
1396 siginfo_t *info = pinfo;
1397 struct ucontext *uc = puc;
1398 greg_t pc = uc->uc_mcontext.pc;
1399 int is_write;
1400
1401 /* XXX: compute is_write */
1402 is_write = 0;
1403 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1404 is_write, &uc->uc_sigmask, puc);
1405}
1406
1407#elif defined(__hppa__)
1408
1409int cpu_signal_handler(int host_signum, void *pinfo,
1410 void *puc)
1411{
1412 struct siginfo *info = pinfo;
1413 struct ucontext *uc = puc;
1414 uintptr_t pc = uc->uc_mcontext.sc_iaoq[0];
1415 uint32_t insn = *(uint32_t *)pc;
1416 int is_write = 0;
1417
1418 /* XXX: need kernel patch to get write flag faster. */
1419 switch (insn >> 26) {
1420 case 0x1a: /* STW */
1421 case 0x19: /* STH */
1422 case 0x18: /* STB */
1423 case 0x1b: /* STWM */
1424 is_write = 1;
1425 break;
1426
1427 case 0x09: /* CSTWX, FSTWX, FSTWS */
1428 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1429 /* Distinguish from coprocessor load ... */
1430 is_write = (insn >> 9) & 1;
1431 break;
1432
1433 case 0x03:
1434 switch ((insn >> 6) & 15) {
1435 case 0xa: /* STWS */
1436 case 0x9: /* STHS */
1437 case 0x8: /* STBS */
1438 case 0xe: /* STWAS */
1439 case 0xc: /* STBYS */
1440 is_write = 1;
1441 }
1442 break;
1443 }
1444
1445 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1446 is_write, &uc->uc_sigmask, puc);
1447}
1448
1449#else
1450
1451#error host CPU specific signal handler needed
1452
1453#endif
1454
1455#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette