VirtualBox

source: vbox/trunk/src/recompiler_new/cpu-exec.c@ 13382

Last change on this file since 13382 was 13382, checked in by vboxsync, 16 years ago

more MSVC-related stuff

  • Property svn:eol-style set to native
File size: 60.1 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120}
121
122static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125{
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
130 tb_invalidated_flag = 0;
131
132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
133
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
144 if (tb->pc == pc &&
145 tb->page_addr[0] == phys_page1 &&
146 tb->cs_base == cs_base &&
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165 found:
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
169}
170
171#ifndef VBOX
172static inline TranslationBlock *tb_find_fast(void)
173#else
174DECLINLINE(TranslationBlock *) tb_find_fast(void)
175#endif
176{
177 TranslationBlock *tb;
178 target_ulong cs_base, pc;
179 uint64_t flags;
180
181 /* we record a subset of the CPU state. It will
182 always be the same before a given translated block
183 is executed. */
184#if defined(TARGET_I386)
185 flags = env->hflags;
186 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
187 cs_base = env->segs[R_CS].base;
188 pc = cs_base + env->eip;
189#elif defined(TARGET_ARM)
190 flags = env->thumb | (env->vfp.vec_len << 1)
191 | (env->vfp.vec_stride << 4);
192 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
193 flags |= (1 << 6);
194 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
195 flags |= (1 << 7);
196 flags |= (env->condexec_bits << 8);
197 cs_base = 0;
198 pc = env->regs[15];
199#elif defined(TARGET_SPARC)
200#ifdef TARGET_SPARC64
201 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
202 flags = ((env->pstate & PS_AM) << 2)
203 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
204 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
205#else
206 // FPU enable . Supervisor
207 flags = (env->psref << 4) | env->psrs;
208#endif
209 cs_base = env->npc;
210 pc = env->pc;
211#elif defined(TARGET_PPC)
212 flags = env->hflags;
213 cs_base = 0;
214 pc = env->nip;
215#elif defined(TARGET_MIPS)
216 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
217 cs_base = 0;
218 pc = env->active_tc.PC;
219#elif defined(TARGET_M68K)
220 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
221 | (env->sr & SR_S) /* Bit 13 */
222 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
223 cs_base = 0;
224 pc = env->pc;
225#elif defined(TARGET_SH4)
226 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
227 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
228 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
229 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
230 cs_base = 0;
231 pc = env->pc;
232#elif defined(TARGET_ALPHA)
233 flags = env->ps;
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_CRIS)
237 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
238 flags |= env->dslot;
239 cs_base = 0;
240 pc = env->pc;
241#else
242#error unsupported CPU
243#endif
244 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
245 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
246 tb->flags != flags)) {
247 tb = tb_find_slow(pc, cs_base, flags);
248 }
249 return tb;
250}
251
252/* main execution loop */
253
254#ifdef VBOX
255
256int cpu_exec(CPUState *env1)
257{
258#define DECLARE_HOST_REGS 1
259#include "hostregs_helper.h"
260 int ret, interrupt_request;
261 TranslationBlock *tb;
262 uint8_t *tc_ptr;
263 unsigned long next_tb;
264
265 if (cpu_halted(env1) == EXCP_HALTED)
266 return EXCP_HALTED;
267
268 cpu_single_env = env1;
269
270 /* first we save global registers */
271#define SAVE_HOST_REGS 1
272#include "hostregs_helper.h"
273 env = env1;
274
275 env_to_regs();
276#if defined(TARGET_I386)
277 /* put eflags in CPU temporary format */
278 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279 DF = 1 - (2 * ((env->eflags >> 10) & 1));
280 CC_OP = CC_OP_EFLAGS;
281 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
282#elif defined(TARGET_SPARC)
283#elif defined(TARGET_M68K)
284 env->cc_op = CC_OP_FLAGS;
285 env->cc_dest = env->sr & 0xf;
286 env->cc_x = (env->sr >> 4) & 1;
287#elif defined(TARGET_ALPHA)
288#elif defined(TARGET_ARM)
289#elif defined(TARGET_PPC)
290#elif defined(TARGET_MIPS)
291#elif defined(TARGET_SH4)
292#elif defined(TARGET_CRIS)
293 /* XXXXX */
294#else
295#error unsupported target CPU
296#endif
297#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
298 env->exception_index = -1;
299#endif
300
301 /* prepare setjmp context for exception handling */
302 for(;;) {
303 if (setjmp(env->jmp_env) == 0)
304 {
305 env->current_tb = NULL;
306 VMMR3Unlock(env->pVM);
307 VMMR3Lock(env->pVM);
308
309 /*
310 * Check for fatal errors first
311 */
312 if (env->interrupt_request & CPU_INTERRUPT_RC) {
313 env->exception_index = EXCP_RC;
314 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
315 ret = env->exception_index;
316 cpu_loop_exit();
317 }
318
319 /* if an exception is pending, we execute it here */
320 if (env->exception_index >= 0) {
321 Assert(!env->user_mode_only);
322 if (env->exception_index >= EXCP_INTERRUPT) {
323 /* exit request from the cpu execution loop */
324 ret = env->exception_index;
325 break;
326 } else {
327 /* simulate a real cpu exception. On i386, it can
328 trigger new exceptions, but we do not handle
329 double or triple faults yet. */
330 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
331 Log(("do_interrupt %d %d %VGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
332 do_interrupt(env->exception_index,
333 env->exception_is_int,
334 env->error_code,
335 env->exception_next_eip, 0);
336 /* successfully delivered */
337 env->old_exception = -1;
338 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
339 }
340 env->exception_index = -1;
341 }
342
343 next_tb = 0; /* force lookup of first TB */
344 for(;;)
345 {
346 interrupt_request = env->interrupt_request;
347 if (__builtin_expect(interrupt_request, 0))
348 {
349 /** @todo: reconscille with what QEMU really does */
350
351 /* Single instruction exec request, we execute it and return (one way or the other).
352 The caller will always reschedule after doing this operation! */
353 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
354 {
355 /* not in flight are we? (if we are, we trapped) */
356 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
357 {
358 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
359 env->exception_index = EXCP_SINGLE_INSTR;
360 if (emulate_single_instr(env) == -1)
361 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
362
363 /* When we receive an external interrupt during execution of this single
364 instruction, then we should stay here. We will leave when we're ready
365 for raw-mode or when interrupted by pending EMT requests. */
366 interrupt_request = env->interrupt_request; /* reload this! */
367 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
368 || !(env->eflags & IF_MASK)
369 || (env->hflags & HF_INHIBIT_IRQ_MASK)
370 || (env->state & CPU_RAW_HWACC)
371 )
372 {
373 env->exception_index = ret = EXCP_SINGLE_INSTR;
374 cpu_loop_exit();
375 }
376 }
377 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
378 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
379 }
380
381 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
382 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
383 !(env->hflags & HF_SMM_MASK)) {
384 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
385 do_smm_enter();
386 next_tb = 0;
387 }
388 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
389 (env->eflags & IF_MASK) &&
390 !(env->hflags & HF_INHIBIT_IRQ_MASK))
391 {
392 /* if hardware interrupt pending, we execute it */
393 int intno;
394 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
395 intno = cpu_get_pic_interrupt(env);
396 if (intno >= 0)
397 {
398 Log(("do_interrupt %d\n", intno));
399 do_interrupt(intno, 0, 0, 0, 1);
400 }
401 /* ensure that no TB jump will be modified as
402 the program flow was changed */
403 next_tb = 0;
404 }
405 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
406 {
407 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
408 /* ensure that no TB jump will be modified as
409 the program flow was changed */
410 next_tb = 0;
411 }
412 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
413 if (interrupt_request & CPU_INTERRUPT_EXIT)
414 {
415 env->exception_index = EXCP_INTERRUPT;
416 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
417 ret = env->exception_index;
418 cpu_loop_exit();
419 }
420 if (interrupt_request & CPU_INTERRUPT_RC)
421 {
422 env->exception_index = EXCP_RC;
423 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
424 ret = env->exception_index;
425 cpu_loop_exit();
426 }
427 }
428
429 /*
430 * Check if we the CPU state allows us to execute the code in raw-mode.
431 */
432 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
433 if (remR3CanExecuteRaw(env,
434 env->eip + env->segs[R_CS].base,
435 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
436 &env->exception_index))
437 {
438 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
439 ret = env->exception_index;
440 cpu_loop_exit();
441 }
442 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
443
444 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
445 spin_lock(&tb_lock);
446 tb = tb_find_fast();
447 /* Note: we do it here to avoid a gcc bug on Mac OS X when
448 doing it in tb_find_slow */
449 if (tb_invalidated_flag) {
450 /* as some TB could have been invalidated because
451 of memory exceptions while generating the code, we
452 must recompute the hash index here */
453 next_tb = 0;
454 tb_invalidated_flag = 0;
455 }
456
457 /* see if we can patch the calling TB. When the TB
458 spans two pages, we cannot safely do a direct
459 jump. */
460 if (next_tb != 0
461 && !(tb->cflags & CF_RAW_MODE)
462 && tb->page_addr[1] == -1)
463 {
464 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
465 }
466 spin_unlock(&tb_lock);
467 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
468
469 env->current_tb = tb;
470 while (env->current_tb) {
471 tc_ptr = tb->tc_ptr;
472 /* execute the generated code */
473 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
474 next_tb = tcg_qemu_tb_exec(tc_ptr);
475 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
476 env->current_tb = NULL;
477 if ((next_tb & 3) == 2) {
478 /* Instruction counter expired. */
479 int insns_left;
480 tb = (TranslationBlock *)(long)(next_tb & ~3);
481 /* Restore PC. */
482 CPU_PC_FROM_TB(env, tb);
483 insns_left = env->icount_decr.u32;
484 if (env->icount_extra && insns_left >= 0) {
485 /* Refill decrementer and continue execution. */
486 env->icount_extra += insns_left;
487 if (env->icount_extra > 0xffff) {
488 insns_left = 0xffff;
489 } else {
490 insns_left = env->icount_extra;
491 }
492 env->icount_extra -= insns_left;
493 env->icount_decr.u16.low = insns_left;
494 } else {
495 if (insns_left > 0) {
496 /* Execute remaining instructions. */
497 cpu_exec_nocache(insns_left, tb);
498 }
499 env->exception_index = EXCP_INTERRUPT;
500 next_tb = 0;
501 cpu_loop_exit();
502 }
503 }
504 }
505
506 /* reset soft MMU for next block (it can currently
507 only be set by a memory fault) */
508#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
509 if (env->hflags & HF_SOFTMMU_MASK) {
510 env->hflags &= ~HF_SOFTMMU_MASK;
511 /* do not allow linking to another block */
512 next_tb = 0;
513 }
514#endif
515 } /* for(;;) */
516 } else {
517 env_to_regs();
518 }
519#ifdef VBOX_HIGH_RES_TIMERS_HACK
520 /* NULL the current_tb here so cpu_interrupt() doesn't do
521 anything unnecessary (like crashing during emulate single instruction). */
522 env->current_tb = NULL;
523 TMTimerPoll(env1->pVM);
524#endif
525 } /* for(;;) */
526
527#if defined(TARGET_I386)
528 /* restore flags in standard format */
529 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
530#else
531#error unsupported target CPU
532#endif
533#include "hostregs_helper.h"
534 return ret;
535}
536
537#else /* !VBOX */
538int cpu_exec(CPUState *env1)
539{
540#define DECLARE_HOST_REGS 1
541#include "hostregs_helper.h"
542 int ret, interrupt_request;
543 TranslationBlock *tb;
544 uint8_t *tc_ptr;
545 unsigned long next_tb;
546
547 if (cpu_halted(env1) == EXCP_HALTED)
548 return EXCP_HALTED;
549
550 cpu_single_env = env1;
551
552 /* first we save global registers */
553#define SAVE_HOST_REGS 1
554#include "hostregs_helper.h"
555 env = env1;
556
557 env_to_regs();
558#if defined(TARGET_I386)
559 /* put eflags in CPU temporary format */
560 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
561 DF = 1 - (2 * ((env->eflags >> 10) & 1));
562 CC_OP = CC_OP_EFLAGS;
563 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
564#elif defined(TARGET_SPARC)
565#elif defined(TARGET_M68K)
566 env->cc_op = CC_OP_FLAGS;
567 env->cc_dest = env->sr & 0xf;
568 env->cc_x = (env->sr >> 4) & 1;
569#elif defined(TARGET_ALPHA)
570#elif defined(TARGET_ARM)
571#elif defined(TARGET_PPC)
572#elif defined(TARGET_MIPS)
573#elif defined(TARGET_SH4)
574#elif defined(TARGET_CRIS)
575 /* XXXXX */
576#else
577#error unsupported target CPU
578#endif
579 env->exception_index = -1;
580
581 /* prepare setjmp context for exception handling */
582 for(;;) {
583 if (setjmp(env->jmp_env) == 0) {
584 env->current_tb = NULL;
585 /* if an exception is pending, we execute it here */
586 if (env->exception_index >= 0) {
587 if (env->exception_index >= EXCP_INTERRUPT) {
588 /* exit request from the cpu execution loop */
589 ret = env->exception_index;
590 break;
591 } else if (env->user_mode_only) {
592 /* if user mode only, we simulate a fake exception
593 which will be handled outside the cpu execution
594 loop */
595#if defined(TARGET_I386)
596 do_interrupt_user(env->exception_index,
597 env->exception_is_int,
598 env->error_code,
599 env->exception_next_eip);
600 /* successfully delivered */
601 env->old_exception = -1;
602#endif
603 ret = env->exception_index;
604 break;
605 } else {
606#if defined(TARGET_I386)
607 /* simulate a real cpu exception. On i386, it can
608 trigger new exceptions, but we do not handle
609 double or triple faults yet. */
610 do_interrupt(env->exception_index,
611 env->exception_is_int,
612 env->error_code,
613 env->exception_next_eip, 0);
614 /* successfully delivered */
615 env->old_exception = -1;
616#elif defined(TARGET_PPC)
617 do_interrupt(env);
618#elif defined(TARGET_MIPS)
619 do_interrupt(env);
620#elif defined(TARGET_SPARC)
621 do_interrupt(env);
622#elif defined(TARGET_ARM)
623 do_interrupt(env);
624#elif defined(TARGET_SH4)
625 do_interrupt(env);
626#elif defined(TARGET_ALPHA)
627 do_interrupt(env);
628#elif defined(TARGET_CRIS)
629 do_interrupt(env);
630#elif defined(TARGET_M68K)
631 do_interrupt(0);
632#endif
633 }
634 env->exception_index = -1;
635 }
636#ifdef USE_KQEMU
637 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
638 int ret;
639 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
640 ret = kqemu_cpu_exec(env);
641 /* put eflags in CPU temporary format */
642 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
643 DF = 1 - (2 * ((env->eflags >> 10) & 1));
644 CC_OP = CC_OP_EFLAGS;
645 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
646 if (ret == 1) {
647 /* exception */
648 longjmp(env->jmp_env, 1);
649 } else if (ret == 2) {
650 /* softmmu execution needed */
651 } else {
652 if (env->interrupt_request != 0) {
653 /* hardware interrupt will be executed just after */
654 } else {
655 /* otherwise, we restart */
656 longjmp(env->jmp_env, 1);
657 }
658 }
659 }
660#endif
661
662 next_tb = 0; /* force lookup of first TB */
663 for(;;) {
664 interrupt_request = env->interrupt_request;
665 if (unlikely(interrupt_request) &&
666 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
667 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
668 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
669 env->exception_index = EXCP_DEBUG;
670 cpu_loop_exit();
671 }
672#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
673 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
674 if (interrupt_request & CPU_INTERRUPT_HALT) {
675 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
676 env->halted = 1;
677 env->exception_index = EXCP_HLT;
678 cpu_loop_exit();
679 }
680#endif
681#if defined(TARGET_I386)
682 if (env->hflags2 & HF2_GIF_MASK) {
683 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
684 !(env->hflags & HF_SMM_MASK)) {
685 svm_check_intercept(SVM_EXIT_SMI);
686 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
687 do_smm_enter();
688 next_tb = 0;
689 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
690 !(env->hflags2 & HF2_NMI_MASK)) {
691 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
692 env->hflags2 |= HF2_NMI_MASK;
693 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
694 next_tb = 0;
695 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
696 (((env->hflags2 & HF2_VINTR_MASK) &&
697 (env->hflags2 & HF2_HIF_MASK)) ||
698 (!(env->hflags2 & HF2_VINTR_MASK) &&
699 (env->eflags & IF_MASK &&
700 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
701 int intno;
702 svm_check_intercept(SVM_EXIT_INTR);
703 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
704 intno = cpu_get_pic_interrupt(env);
705 if (loglevel & CPU_LOG_TB_IN_ASM) {
706 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
707 }
708 do_interrupt(intno, 0, 0, 0, 1);
709 /* ensure that no TB jump will be modified as
710 the program flow was changed */
711 next_tb = 0;
712#if !defined(CONFIG_USER_ONLY)
713 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
714 (env->eflags & IF_MASK) &&
715 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
716 int intno;
717 /* FIXME: this should respect TPR */
718 svm_check_intercept(SVM_EXIT_VINTR);
719 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
720 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
721 if (loglevel & CPU_LOG_TB_IN_ASM)
722 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
723 do_interrupt(intno, 0, 0, 0, 1);
724 next_tb = 0;
725#endif
726 }
727 }
728#elif defined(TARGET_PPC)
729#if 0
730 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
731 cpu_ppc_reset(env);
732 }
733#endif
734 if (interrupt_request & CPU_INTERRUPT_HARD) {
735 ppc_hw_interrupt(env);
736 if (env->pending_interrupts == 0)
737 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
738 next_tb = 0;
739 }
740#elif defined(TARGET_MIPS)
741 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
742 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
743 (env->CP0_Status & (1 << CP0St_IE)) &&
744 !(env->CP0_Status & (1 << CP0St_EXL)) &&
745 !(env->CP0_Status & (1 << CP0St_ERL)) &&
746 !(env->hflags & MIPS_HFLAG_DM)) {
747 /* Raise it */
748 env->exception_index = EXCP_EXT_INTERRUPT;
749 env->error_code = 0;
750 do_interrupt(env);
751 next_tb = 0;
752 }
753#elif defined(TARGET_SPARC)
754 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
755 (env->psret != 0)) {
756 int pil = env->interrupt_index & 15;
757 int type = env->interrupt_index & 0xf0;
758
759 if (((type == TT_EXTINT) &&
760 (pil == 15 || pil > env->psrpil)) ||
761 type != TT_EXTINT) {
762 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
763 env->exception_index = env->interrupt_index;
764 do_interrupt(env);
765 env->interrupt_index = 0;
766#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
767 cpu_check_irqs(env);
768#endif
769 next_tb = 0;
770 }
771 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
772 //do_interrupt(0, 0, 0, 0, 0);
773 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
774 }
775#elif defined(TARGET_ARM)
776 if (interrupt_request & CPU_INTERRUPT_FIQ
777 && !(env->uncached_cpsr & CPSR_F)) {
778 env->exception_index = EXCP_FIQ;
779 do_interrupt(env);
780 next_tb = 0;
781 }
782 /* ARMv7-M interrupt return works by loading a magic value
783 into the PC. On real hardware the load causes the
784 return to occur. The qemu implementation performs the
785 jump normally, then does the exception return when the
786 CPU tries to execute code at the magic address.
787 This will cause the magic PC value to be pushed to
788 the stack if an interrupt occured at the wrong time.
789 We avoid this by disabling interrupts when
790 pc contains a magic address. */
791 if (interrupt_request & CPU_INTERRUPT_HARD
792 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
793 || !(env->uncached_cpsr & CPSR_I))) {
794 env->exception_index = EXCP_IRQ;
795 do_interrupt(env);
796 next_tb = 0;
797 }
798#elif defined(TARGET_SH4)
799 if (interrupt_request & CPU_INTERRUPT_HARD) {
800 do_interrupt(env);
801 next_tb = 0;
802 }
803#elif defined(TARGET_ALPHA)
804 if (interrupt_request & CPU_INTERRUPT_HARD) {
805 do_interrupt(env);
806 next_tb = 0;
807 }
808#elif defined(TARGET_CRIS)
809 if (interrupt_request & CPU_INTERRUPT_HARD
810 && (env->pregs[PR_CCS] & I_FLAG)) {
811 env->exception_index = EXCP_IRQ;
812 do_interrupt(env);
813 next_tb = 0;
814 }
815 if (interrupt_request & CPU_INTERRUPT_NMI
816 && (env->pregs[PR_CCS] & M_FLAG)) {
817 env->exception_index = EXCP_NMI;
818 do_interrupt(env);
819 next_tb = 0;
820 }
821#elif defined(TARGET_M68K)
822 if (interrupt_request & CPU_INTERRUPT_HARD
823 && ((env->sr & SR_I) >> SR_I_SHIFT)
824 < env->pending_level) {
825 /* Real hardware gets the interrupt vector via an
826 IACK cycle at this point. Current emulated
827 hardware doesn't rely on this, so we
828 provide/save the vector when the interrupt is
829 first signalled. */
830 env->exception_index = env->pending_vector;
831 do_interrupt(1);
832 next_tb = 0;
833 }
834#endif
835 /* Don't use the cached interupt_request value,
836 do_interrupt may have updated the EXITTB flag. */
837 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
838 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
839 /* ensure that no TB jump will be modified as
840 the program flow was changed */
841 next_tb = 0;
842 }
843 if (interrupt_request & CPU_INTERRUPT_EXIT) {
844 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
845 env->exception_index = EXCP_INTERRUPT;
846 cpu_loop_exit();
847 }
848 }
849#ifdef DEBUG_EXEC
850 if ((loglevel & CPU_LOG_TB_CPU)) {
851 /* restore flags in standard format */
852 regs_to_env();
853#if defined(TARGET_I386)
854 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
855 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
856 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
857#elif defined(TARGET_ARM)
858 cpu_dump_state(env, logfile, fprintf, 0);
859#elif defined(TARGET_SPARC)
860 cpu_dump_state(env, logfile, fprintf, 0);
861#elif defined(TARGET_PPC)
862 cpu_dump_state(env, logfile, fprintf, 0);
863#elif defined(TARGET_M68K)
864 cpu_m68k_flush_flags(env, env->cc_op);
865 env->cc_op = CC_OP_FLAGS;
866 env->sr = (env->sr & 0xffe0)
867 | env->cc_dest | (env->cc_x << 4);
868 cpu_dump_state(env, logfile, fprintf, 0);
869#elif defined(TARGET_MIPS)
870 cpu_dump_state(env, logfile, fprintf, 0);
871#elif defined(TARGET_SH4)
872 cpu_dump_state(env, logfile, fprintf, 0);
873#elif defined(TARGET_ALPHA)
874 cpu_dump_state(env, logfile, fprintf, 0);
875#elif defined(TARGET_CRIS)
876 cpu_dump_state(env, logfile, fprintf, 0);
877#else
878#error unsupported target CPU
879#endif
880 }
881#endif
882 spin_lock(&tb_lock);
883 tb = tb_find_fast();
884 /* Note: we do it here to avoid a gcc bug on Mac OS X when
885 doing it in tb_find_slow */
886 if (tb_invalidated_flag) {
887 /* as some TB could have been invalidated because
888 of memory exceptions while generating the code, we
889 must recompute the hash index here */
890 next_tb = 0;
891 tb_invalidated_flag = 0;
892 }
893#ifdef DEBUG_EXEC
894 if ((loglevel & CPU_LOG_EXEC)) {
895 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
896 (long)tb->tc_ptr, tb->pc,
897 lookup_symbol(tb->pc));
898 }
899#endif
900 /* see if we can patch the calling TB. When the TB
901 spans two pages, we cannot safely do a direct
902 jump. */
903 {
904 if (next_tb != 0 &&
905#ifdef USE_KQEMU
906 (env->kqemu_enabled != 2) &&
907#endif
908 tb->page_addr[1] == -1) {
909 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
910 }
911 }
912 spin_unlock(&tb_lock);
913 env->current_tb = tb;
914 while (env->current_tb) {
915 tc_ptr = tb->tc_ptr;
916 /* execute the generated code */
917#if defined(__sparc__) && !defined(HOST_SOLARIS)
918#undef env
919 env = cpu_single_env;
920#define env cpu_single_env
921#endif
922 next_tb = tcg_qemu_tb_exec(tc_ptr);
923 env->current_tb = NULL;
924 if ((next_tb & 3) == 2) {
925 /* Instruction counter expired. */
926 int insns_left;
927 tb = (TranslationBlock *)(long)(next_tb & ~3);
928 /* Restore PC. */
929 CPU_PC_FROM_TB(env, tb);
930 insns_left = env->icount_decr.u32;
931 if (env->icount_extra && insns_left >= 0) {
932 /* Refill decrementer and continue execution. */
933 env->icount_extra += insns_left;
934 if (env->icount_extra > 0xffff) {
935 insns_left = 0xffff;
936 } else {
937 insns_left = env->icount_extra;
938 }
939 env->icount_extra -= insns_left;
940 env->icount_decr.u16.low = insns_left;
941 } else {
942 if (insns_left > 0) {
943 /* Execute remaining instructions. */
944 cpu_exec_nocache(insns_left, tb);
945 }
946 env->exception_index = EXCP_INTERRUPT;
947 next_tb = 0;
948 cpu_loop_exit();
949 }
950 }
951 }
952 /* reset soft MMU for next block (it can currently
953 only be set by a memory fault) */
954#if defined(USE_KQEMU)
955#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
956 if (kqemu_is_ok(env) &&
957 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
958 cpu_loop_exit();
959 }
960#endif
961 } /* for(;;) */
962 } else {
963 env_to_regs();
964 }
965 } /* for(;;) */
966
967
968#if defined(TARGET_I386)
969 /* restore flags in standard format */
970 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
971#elif defined(TARGET_ARM)
972 /* XXX: Save/restore host fpu exception state?. */
973#elif defined(TARGET_SPARC)
974#elif defined(TARGET_PPC)
975#elif defined(TARGET_M68K)
976 cpu_m68k_flush_flags(env, env->cc_op);
977 env->cc_op = CC_OP_FLAGS;
978 env->sr = (env->sr & 0xffe0)
979 | env->cc_dest | (env->cc_x << 4);
980#elif defined(TARGET_MIPS)
981#elif defined(TARGET_SH4)
982#elif defined(TARGET_ALPHA)
983#elif defined(TARGET_CRIS)
984 /* XXXXX */
985#else
986#error unsupported target CPU
987#endif
988
989 /* restore global registers */
990#include "hostregs_helper.h"
991
992 /* fail safe : never use cpu_single_env outside cpu_exec() */
993 cpu_single_env = NULL;
994 return ret;
995}
996#endif /* !VBOX */
997
998/* must only be called from the generated code as an exception can be
999 generated */
1000void tb_invalidate_page_range(target_ulong start, target_ulong end)
1001{
1002 /* XXX: cannot enable it yet because it yields to MMU exception
1003 where NIP != read address on PowerPC */
1004#if 0
1005 target_ulong phys_addr;
1006 phys_addr = get_phys_addr_code(env, start);
1007 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1008#endif
1009}
1010
1011#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1012
1013void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1014{
1015 CPUX86State *saved_env;
1016
1017 saved_env = env;
1018 env = s;
1019 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1020 selector &= 0xffff;
1021 cpu_x86_load_seg_cache(env, seg_reg, selector,
1022 (selector << 4), 0xffff, 0);
1023 } else {
1024 load_seg(seg_reg, selector);
1025 }
1026 env = saved_env;
1027}
1028
1029void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1030{
1031 CPUX86State *saved_env;
1032
1033 saved_env = env;
1034 env = s;
1035
1036 helper_fsave((target_ulong)ptr, data32);
1037
1038 env = saved_env;
1039}
1040
1041void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1042{
1043 CPUX86State *saved_env;
1044
1045 saved_env = env;
1046 env = s;
1047
1048 helper_frstor((target_ulong)ptr, data32);
1049
1050 env = saved_env;
1051}
1052
1053#endif /* TARGET_I386 */
1054
1055#if !defined(CONFIG_SOFTMMU)
1056
1057#if defined(TARGET_I386)
1058
1059/* 'pc' is the host PC at which the exception was raised. 'address' is
1060 the effective address of the memory exception. 'is_write' is 1 if a
1061 write caused the exception and otherwise 0'. 'old_set' is the
1062 signal set which should be restored */
1063static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1065 void *puc)
1066{
1067 TranslationBlock *tb;
1068 int ret;
1069
1070 if (cpu_single_env)
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072#if defined(DEBUG_SIGNAL)
1073 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc, address, is_write, *(unsigned long *)old_set);
1075#endif
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1078 return 1;
1079 }
1080
1081 /* see if it is an MMU fault */
1082 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1083 ((env->hflags & HF_CPL_MASK) == 3), 0);
1084 if (ret < 0)
1085 return 0; /* not an MMU fault */
1086 if (ret == 0)
1087 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb = tb_find_pc(pc);
1090 if (tb) {
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1094 }
1095 if (ret == 1) {
1096#if 0
1097 printf("PF exception: EIP=0x%VGv CR2=0x%VGv error=0x%x\n",
1098 env->eip, env->cr[2], env->error_code);
1099#endif
1100 /* we restore the process signal mask as the sigreturn should
1101 do it (XXX: use sigsetjmp) */
1102 sigprocmask(SIG_SETMASK, old_set, NULL);
1103 raise_exception_err(env->exception_index, env->error_code);
1104 } else {
1105 /* activate soft MMU for this block */
1106 env->hflags |= HF_SOFTMMU_MASK;
1107 cpu_resume_from_signal(env, puc);
1108 }
1109 /* never comes here */
1110 return 1;
1111}
1112
1113#elif defined(TARGET_ARM)
1114static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1115 int is_write, sigset_t *old_set,
1116 void *puc)
1117{
1118 TranslationBlock *tb;
1119 int ret;
1120
1121 if (cpu_single_env)
1122 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1123#if defined(DEBUG_SIGNAL)
1124 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1125 pc, address, is_write, *(unsigned long *)old_set);
1126#endif
1127 /* XXX: locking issue */
1128 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1129 return 1;
1130 }
1131 /* see if it is an MMU fault */
1132 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1133 if (ret < 0)
1134 return 0; /* not an MMU fault */
1135 if (ret == 0)
1136 return 1; /* the MMU fault was handled without causing real CPU fault */
1137 /* now we have a real cpu fault */
1138 tb = tb_find_pc(pc);
1139 if (tb) {
1140 /* the PC is inside the translated code. It means that we have
1141 a virtual CPU fault */
1142 cpu_restore_state(tb, env, pc, puc);
1143 }
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
1146 sigprocmask(SIG_SETMASK, old_set, NULL);
1147 cpu_loop_exit();
1148}
1149#elif defined(TARGET_SPARC)
1150static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1151 int is_write, sigset_t *old_set,
1152 void *puc)
1153{
1154 TranslationBlock *tb;
1155 int ret;
1156
1157 if (cpu_single_env)
1158 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1159#if defined(DEBUG_SIGNAL)
1160 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1161 pc, address, is_write, *(unsigned long *)old_set);
1162#endif
1163 /* XXX: locking issue */
1164 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1165 return 1;
1166 }
1167 /* see if it is an MMU fault */
1168 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1169 if (ret < 0)
1170 return 0; /* not an MMU fault */
1171 if (ret == 0)
1172 return 1; /* the MMU fault was handled without causing real CPU fault */
1173 /* now we have a real cpu fault */
1174 tb = tb_find_pc(pc);
1175 if (tb) {
1176 /* the PC is inside the translated code. It means that we have
1177 a virtual CPU fault */
1178 cpu_restore_state(tb, env, pc, puc);
1179 }
1180 /* we restore the process signal mask as the sigreturn should
1181 do it (XXX: use sigsetjmp) */
1182 sigprocmask(SIG_SETMASK, old_set, NULL);
1183 cpu_loop_exit();
1184}
1185#elif defined (TARGET_PPC)
1186static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1187 int is_write, sigset_t *old_set,
1188 void *puc)
1189{
1190 TranslationBlock *tb;
1191 int ret;
1192
1193 if (cpu_single_env)
1194 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1195#if defined(DEBUG_SIGNAL)
1196 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1197 pc, address, is_write, *(unsigned long *)old_set);
1198#endif
1199 /* XXX: locking issue */
1200 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1201 return 1;
1202 }
1203
1204 /* see if it is an MMU fault */
1205 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1206 if (ret < 0)
1207 return 0; /* not an MMU fault */
1208 if (ret == 0)
1209 return 1; /* the MMU fault was handled without causing real CPU fault */
1210
1211 /* now we have a real cpu fault */
1212 tb = tb_find_pc(pc);
1213 if (tb) {
1214 /* the PC is inside the translated code. It means that we have
1215 a virtual CPU fault */
1216 cpu_restore_state(tb, env, pc, puc);
1217 }
1218 if (ret == 1) {
1219#if 0
1220 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1221 env->nip, env->error_code, tb);
1222#endif
1223 /* we restore the process signal mask as the sigreturn should
1224 do it (XXX: use sigsetjmp) */
1225 sigprocmask(SIG_SETMASK, old_set, NULL);
1226 do_raise_exception_err(env->exception_index, env->error_code);
1227 } else {
1228 /* activate soft MMU for this block */
1229 cpu_resume_from_signal(env, puc);
1230 }
1231 /* never comes here */
1232 return 1;
1233}
1234
1235#elif defined(TARGET_M68K)
1236static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1237 int is_write, sigset_t *old_set,
1238 void *puc)
1239{
1240 TranslationBlock *tb;
1241 int ret;
1242
1243 if (cpu_single_env)
1244 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1245#if defined(DEBUG_SIGNAL)
1246 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1247 pc, address, is_write, *(unsigned long *)old_set);
1248#endif
1249 /* XXX: locking issue */
1250 if (is_write && page_unprotect(address, pc, puc)) {
1251 return 1;
1252 }
1253 /* see if it is an MMU fault */
1254 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1255 if (ret < 0)
1256 return 0; /* not an MMU fault */
1257 if (ret == 0)
1258 return 1; /* the MMU fault was handled without causing real CPU fault */
1259 /* now we have a real cpu fault */
1260 tb = tb_find_pc(pc);
1261 if (tb) {
1262 /* the PC is inside the translated code. It means that we have
1263 a virtual CPU fault */
1264 cpu_restore_state(tb, env, pc, puc);
1265 }
1266 /* we restore the process signal mask as the sigreturn should
1267 do it (XXX: use sigsetjmp) */
1268 sigprocmask(SIG_SETMASK, old_set, NULL);
1269 cpu_loop_exit();
1270 /* never comes here */
1271 return 1;
1272}
1273
1274#elif defined (TARGET_MIPS)
1275static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1276 int is_write, sigset_t *old_set,
1277 void *puc)
1278{
1279 TranslationBlock *tb;
1280 int ret;
1281
1282 if (cpu_single_env)
1283 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1284#if defined(DEBUG_SIGNAL)
1285 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1286 pc, address, is_write, *(unsigned long *)old_set);
1287#endif
1288 /* XXX: locking issue */
1289 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1290 return 1;
1291 }
1292
1293 /* see if it is an MMU fault */
1294 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1295 if (ret < 0)
1296 return 0; /* not an MMU fault */
1297 if (ret == 0)
1298 return 1; /* the MMU fault was handled without causing real CPU fault */
1299
1300 /* now we have a real cpu fault */
1301 tb = tb_find_pc(pc);
1302 if (tb) {
1303 /* the PC is inside the translated code. It means that we have
1304 a virtual CPU fault */
1305 cpu_restore_state(tb, env, pc, puc);
1306 }
1307 if (ret == 1) {
1308#if 0
1309 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1310 env->nip, env->error_code, tb);
1311#endif
1312 /* we restore the process signal mask as the sigreturn should
1313 do it (XXX: use sigsetjmp) */
1314 sigprocmask(SIG_SETMASK, old_set, NULL);
1315 do_raise_exception_err(env->exception_index, env->error_code);
1316 } else {
1317 /* activate soft MMU for this block */
1318 cpu_resume_from_signal(env, puc);
1319 }
1320 /* never comes here */
1321 return 1;
1322}
1323
1324#elif defined (TARGET_SH4)
1325static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1326 int is_write, sigset_t *old_set,
1327 void *puc)
1328{
1329 TranslationBlock *tb;
1330 int ret;
1331
1332 if (cpu_single_env)
1333 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1334#if defined(DEBUG_SIGNAL)
1335 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1336 pc, address, is_write, *(unsigned long *)old_set);
1337#endif
1338 /* XXX: locking issue */
1339 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1340 return 1;
1341 }
1342
1343 /* see if it is an MMU fault */
1344 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1345 if (ret < 0)
1346 return 0; /* not an MMU fault */
1347 if (ret == 0)
1348 return 1; /* the MMU fault was handled without causing real CPU fault */
1349
1350 /* now we have a real cpu fault */
1351 tb = tb_find_pc(pc);
1352 if (tb) {
1353 /* the PC is inside the translated code. It means that we have
1354 a virtual CPU fault */
1355 cpu_restore_state(tb, env, pc, puc);
1356 }
1357#if 0
1358 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1359 env->nip, env->error_code, tb);
1360#endif
1361 /* we restore the process signal mask as the sigreturn should
1362 do it (XXX: use sigsetjmp) */
1363 sigprocmask(SIG_SETMASK, old_set, NULL);
1364 cpu_loop_exit();
1365 /* never comes here */
1366 return 1;
1367}
1368#else
1369#error unsupported target CPU
1370#endif
1371
1372#if defined(__i386__)
1373
1374#if defined(__APPLE__)
1375# include <sys/ucontext.h>
1376
1377# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1378# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1379# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1380#else
1381# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1382# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1383# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1384#endif
1385
1386int cpu_signal_handler(int host_signum, void *pinfo,
1387 void *puc)
1388{
1389 siginfo_t *info = pinfo;
1390 struct ucontext *uc = puc;
1391 unsigned long pc;
1392 int trapno;
1393
1394#ifndef REG_EIP
1395/* for glibc 2.1 */
1396#define REG_EIP EIP
1397#define REG_ERR ERR
1398#define REG_TRAPNO TRAPNO
1399#endif
1400 pc = uc->uc_mcontext.gregs[REG_EIP];
1401 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1402#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1403 if (trapno == 0x00 || trapno == 0x05) {
1404 /* send division by zero or bound exception */
1405 cpu_send_trap(pc, trapno, uc);
1406 return 1;
1407 } else
1408#endif
1409 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1410 trapno == 0xe ?
1411 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1412 &uc->uc_sigmask, puc);
1413}
1414
1415#elif defined(__x86_64__)
1416
1417int cpu_signal_handler(int host_signum, void *pinfo,
1418 void *puc)
1419{
1420 siginfo_t *info = pinfo;
1421 struct ucontext *uc = puc;
1422 unsigned long pc;
1423
1424 pc = uc->uc_mcontext.gregs[REG_RIP];
1425 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1426 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1427 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1428 &uc->uc_sigmask, puc);
1429}
1430
1431#elif defined(__powerpc__)
1432
1433/***********************************************************************
1434 * signal context platform-specific definitions
1435 * From Wine
1436 */
1437#ifdef linux
1438/* All Registers access - only for local access */
1439# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1440/* Gpr Registers access */
1441# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1442# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1443# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1444# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1445# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1446# define LR_sig(context) REG_sig(link, context) /* Link register */
1447# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1448/* Float Registers access */
1449# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1450# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1451/* Exception Registers access */
1452# define DAR_sig(context) REG_sig(dar, context)
1453# define DSISR_sig(context) REG_sig(dsisr, context)
1454# define TRAP_sig(context) REG_sig(trap, context)
1455#endif /* linux */
1456
1457#ifdef __APPLE__
1458# include <sys/ucontext.h>
1459typedef struct ucontext SIGCONTEXT;
1460/* All Registers access - only for local access */
1461# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1462# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1463# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1464# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1465/* Gpr Registers access */
1466# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1467# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1468# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1469# define CTR_sig(context) REG_sig(ctr, context)
1470# define XER_sig(context) REG_sig(xer, context) /* Link register */
1471# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1472# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1473/* Float Registers access */
1474# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1475# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1476/* Exception Registers access */
1477# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1478# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1479# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1480#endif /* __APPLE__ */
1481
1482int cpu_signal_handler(int host_signum, void *pinfo,
1483 void *puc)
1484{
1485 siginfo_t *info = pinfo;
1486 struct ucontext *uc = puc;
1487 unsigned long pc;
1488 int is_write;
1489
1490 pc = IAR_sig(uc);
1491 is_write = 0;
1492#if 0
1493 /* ppc 4xx case */
1494 if (DSISR_sig(uc) & 0x00800000)
1495 is_write = 1;
1496#else
1497 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1498 is_write = 1;
1499#endif
1500 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1501 is_write, &uc->uc_sigmask, puc);
1502}
1503
1504#elif defined(__alpha__)
1505
1506int cpu_signal_handler(int host_signum, void *pinfo,
1507 void *puc)
1508{
1509 siginfo_t *info = pinfo;
1510 struct ucontext *uc = puc;
1511 uint32_t *pc = uc->uc_mcontext.sc_pc;
1512 uint32_t insn = *pc;
1513 int is_write = 0;
1514
1515 /* XXX: need kernel patch to get write flag faster */
1516 switch (insn >> 26) {
1517 case 0x0d: // stw
1518 case 0x0e: // stb
1519 case 0x0f: // stq_u
1520 case 0x24: // stf
1521 case 0x25: // stg
1522 case 0x26: // sts
1523 case 0x27: // stt
1524 case 0x2c: // stl
1525 case 0x2d: // stq
1526 case 0x2e: // stl_c
1527 case 0x2f: // stq_c
1528 is_write = 1;
1529 }
1530
1531 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1532 is_write, &uc->uc_sigmask, puc);
1533}
1534#elif defined(__sparc__)
1535
1536int cpu_signal_handler(int host_signum, void *pinfo,
1537 void *puc)
1538{
1539 siginfo_t *info = pinfo;
1540 uint32_t *regs = (uint32_t *)(info + 1);
1541 void *sigmask = (regs + 20);
1542 unsigned long pc;
1543 int is_write;
1544 uint32_t insn;
1545
1546 /* XXX: is there a standard glibc define ? */
1547 pc = regs[1];
1548 /* XXX: need kernel patch to get write flag faster */
1549 is_write = 0;
1550 insn = *(uint32_t *)pc;
1551 if ((insn >> 30) == 3) {
1552 switch((insn >> 19) & 0x3f) {
1553 case 0x05: // stb
1554 case 0x06: // sth
1555 case 0x04: // st
1556 case 0x07: // std
1557 case 0x24: // stf
1558 case 0x27: // stdf
1559 case 0x25: // stfsr
1560 is_write = 1;
1561 break;
1562 }
1563 }
1564 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1565 is_write, sigmask, NULL);
1566}
1567
1568#elif defined(__arm__)
1569
1570int cpu_signal_handler(int host_signum, void *pinfo,
1571 void *puc)
1572{
1573 siginfo_t *info = pinfo;
1574 struct ucontext *uc = puc;
1575 unsigned long pc;
1576 int is_write;
1577
1578 pc = uc->uc_mcontext.gregs[R15];
1579 /* XXX: compute is_write */
1580 is_write = 0;
1581 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1582 is_write,
1583 &uc->uc_sigmask, puc);
1584}
1585
1586#elif defined(__mc68000)
1587
1588int cpu_signal_handler(int host_signum, void *pinfo,
1589 void *puc)
1590{
1591 siginfo_t *info = pinfo;
1592 struct ucontext *uc = puc;
1593 unsigned long pc;
1594 int is_write;
1595
1596 pc = uc->uc_mcontext.gregs[16];
1597 /* XXX: compute is_write */
1598 is_write = 0;
1599 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1600 is_write,
1601 &uc->uc_sigmask, puc);
1602}
1603
1604#elif defined(__ia64)
1605
1606#ifndef __ISR_VALID
1607 /* This ought to be in <bits/siginfo.h>... */
1608# define __ISR_VALID 1
1609#endif
1610
1611int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1612{
1613 siginfo_t *info = pinfo;
1614 struct ucontext *uc = puc;
1615 unsigned long ip;
1616 int is_write = 0;
1617
1618 ip = uc->uc_mcontext.sc_ip;
1619 switch (host_signum) {
1620 case SIGILL:
1621 case SIGFPE:
1622 case SIGSEGV:
1623 case SIGBUS:
1624 case SIGTRAP:
1625 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1626 /* ISR.W (write-access) is bit 33: */
1627 is_write = (info->si_isr >> 33) & 1;
1628 break;
1629
1630 default:
1631 break;
1632 }
1633 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1634 is_write,
1635 &uc->uc_sigmask, puc);
1636}
1637
1638#elif defined(__s390__)
1639
1640int cpu_signal_handler(int host_signum, void *pinfo,
1641 void *puc)
1642{
1643 siginfo_t *info = pinfo;
1644 struct ucontext *uc = puc;
1645 unsigned long pc;
1646 int is_write;
1647
1648 pc = uc->uc_mcontext.psw.addr;
1649 /* XXX: compute is_write */
1650 is_write = 0;
1651 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1652 is_write,
1653 &uc->uc_sigmask, puc);
1654}
1655
1656#else
1657
1658#error host CPU specific signal handler needed
1659
1660#endif
1661
1662#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette