VirtualBox

source: vbox/trunk/src/recompiler/new/exec-all.h@ 535

Last change on this file since 535 was 182, checked in by vboxsync, 18 years ago

gcc 3.3.3 didn't like my references to the two labels pointers. Also shut to two warnings and added an #error for amd64+darwin.

  • Property svn:eol-style set to native
File size: 19.4 KB
Line 
1/*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* allow to see translation results - the slowdown should be negligible, so we leave it */
22#ifndef VBOX
23#define DEBUG_DISAS
24#endif
25
26#ifdef VBOX
27# include <VBox/tm.h>
28# ifndef LOG_GROUP
29# define LOG_GROUP LOG_GROUP_REM
30# endif
31# include <VBox/log.h>
32# include "REMInternal.h"
33# include <VBox/vm.h>
34#endif /* VBOX */
35
36#ifndef glue
37#define xglue(x, y) x ## y
38#define glue(x, y) xglue(x, y)
39#define stringify(s) tostring(s)
40#define tostring(s) #s
41#endif
42
43#if __GNUC__ < 3
44#define __builtin_expect(x, n) (x)
45#endif
46
47#ifdef __i386__
48#define REGPARM(n) __attribute((regparm(n)))
49#else
50#define REGPARM(n)
51#endif
52
53/* is_jmp field values */
54#define DISAS_NEXT 0 /* next instruction can be analyzed */
55#define DISAS_JUMP 1 /* only pc was modified dynamically */
56#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
57#define DISAS_TB_JUMP 3 /* only pc was modified statically */
58
59struct TranslationBlock;
60
61/* XXX: make safe guess about sizes */
62#define MAX_OP_PER_INSTR 32
63#define OPC_BUF_SIZE 512
64#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
65
66#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
67
68extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
69extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
70extern long gen_labels[OPC_BUF_SIZE];
71extern int nb_gen_labels;
72extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
73extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
74extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
75extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
76extern target_ulong gen_opc_jump_pc[2];
77extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
78
79typedef void (GenOpFunc)(void);
80typedef void (GenOpFunc1)(long);
81typedef void (GenOpFunc2)(long, long);
82typedef void (GenOpFunc3)(long, long, long);
83
84#if defined(TARGET_I386)
85
86void optimize_flags_init(void);
87
88#endif
89
90extern FILE *logfile;
91extern int loglevel;
92
93int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
94int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
95void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
96int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
97 int max_code_size, int *gen_code_size_ptr);
98int cpu_restore_state(struct TranslationBlock *tb,
99 CPUState *env, unsigned long searched_pc,
100 void *puc);
101int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
102 int max_code_size, int *gen_code_size_ptr);
103int cpu_restore_state_copy(struct TranslationBlock *tb,
104 CPUState *env, unsigned long searched_pc,
105 void *puc);
106void cpu_resume_from_signal(CPUState *env1, void *puc);
107void cpu_exec_init(CPUState *env);
108int page_unprotect(target_ulong address, unsigned long pc, void *puc);
109void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
110 int is_cpu_write_access);
111void tb_invalidate_page_range(target_ulong start, target_ulong end);
112void tlb_flush_page(CPUState *env, target_ulong addr);
113void tlb_flush(CPUState *env, int flush_global);
114int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
115 target_phys_addr_t paddr, int prot,
116 int is_user, int is_softmmu);
117static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
118 target_phys_addr_t paddr, int prot,
119 int is_user, int is_softmmu)
120{
121 if (prot & PAGE_READ)
122 prot |= PAGE_EXEC;
123 return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
124}
125
126#define CODE_GEN_MAX_SIZE 65536
127#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
128
129#define CODE_GEN_PHYS_HASH_BITS 15
130#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
131
132/* maximum total translate dcode allocated */
133
134/* NOTE: the translated code area cannot be too big because on some
135 archs the range of "fast" function calls is limited. Here is a
136 summary of the ranges:
137
138 i386 : signed 32 bits
139 arm : signed 26 bits
140 ppc : signed 24 bits
141 sparc : signed 32 bits
142 alpha : signed 23 bits
143*/
144
145#if defined(__alpha__)
146#define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
147#elif defined(__ia64)
148#define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */
149#elif defined(__powerpc__)
150#define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
151#else
152#define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024)
153#endif
154
155//#define CODE_GEN_BUFFER_SIZE (128 * 1024)
156
157/* estimated block size for TB allocation */
158/* XXX: use a per code average code fragment size and modulate it
159 according to the host CPU */
160#if defined(CONFIG_SOFTMMU)
161#define CODE_GEN_AVG_BLOCK_SIZE 128
162#else
163#define CODE_GEN_AVG_BLOCK_SIZE 64
164#endif
165
166#define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
167
168#if defined(__powerpc__)
169#define USE_DIRECT_JUMP
170#endif
171#if defined(__i386__) && !defined(_WIN32)
172#define USE_DIRECT_JUMP
173#endif
174#ifdef VBOX /* bird: not safe in next step because of threading & cpu_interrupt. */
175#undef USE_DIRECT_JUMP
176#endif /* VBOX */
177
178typedef struct TranslationBlock {
179 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
180 target_ulong cs_base; /* CS base for this block */
181 unsigned int flags; /* flags defining in which context the code was generated */
182 uint16_t size; /* size of target code for this block (1 <=
183 size <= TARGET_PAGE_SIZE) */
184 uint16_t cflags; /* compile flags */
185#define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
186#define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
187#define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
188#define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
189#ifdef VBOX
190#define CF_RAW_MODE 0x0010 /* block was generated in raw mode */
191#endif
192
193 uint8_t *tc_ptr; /* pointer to the translated code */
194 /* next matching tb for physical address. */
195 struct TranslationBlock *phys_hash_next;
196 /* first and second physical page containing code. The lower bit
197 of the pointer tells the index in page_next[] */
198 struct TranslationBlock *page_next[2];
199 target_ulong page_addr[2];
200
201 /* the following data are used to directly call another TB from
202 the code of this one. */
203 uint16_t tb_next_offset[2]; /* offset of original jump target */
204#ifdef USE_DIRECT_JUMP
205 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
206#else
207# if defined(VBOX) && defined(__DARWIN__) && defined(__AMD64__)
208# error "First 4GB aren't reachable. jmp dword [tb_next] wont work."
209# endif
210 uint32_t tb_next[2]; /* address of jump generated code */
211#endif
212 /* list of TBs jumping to this one. This is a circular list using
213 the two least significant bits of the pointers to tell what is
214 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
215 jmp_first */
216 struct TranslationBlock *jmp_next[2];
217 struct TranslationBlock *jmp_first;
218} TranslationBlock;
219
220static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
221{
222 target_ulong tmp;
223 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
224 return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
225}
226
227static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
228{
229 target_ulong tmp;
230 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
231 return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
232 (tmp & TB_JMP_ADDR_MASK));
233}
234
235static inline unsigned int tb_phys_hash_func(unsigned long pc)
236{
237 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
238}
239
240TranslationBlock *tb_alloc(target_ulong pc);
241void tb_flush(CPUState *env);
242void tb_link_phys(TranslationBlock *tb,
243 target_ulong phys_pc, target_ulong phys_page2);
244
245extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
246
247extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
248extern uint8_t *code_gen_ptr;
249
250#if defined(USE_DIRECT_JUMP)
251
252#if defined(__powerpc__)
253static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
254{
255 uint32_t val, *ptr;
256
257 /* patch the branch destination */
258 ptr = (uint32_t *)jmp_addr;
259 val = *ptr;
260 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
261 *ptr = val;
262 /* flush icache */
263 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
264 asm volatile ("sync" : : : "memory");
265 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
266 asm volatile ("sync" : : : "memory");
267 asm volatile ("isync" : : : "memory");
268}
269#elif defined(__i386__)
270static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
271{
272 /* patch the branch destination */
273 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
274 /* no need to flush icache explicitely */
275}
276#endif
277
278static inline void tb_set_jmp_target(TranslationBlock *tb,
279 int n, unsigned long addr)
280{
281 unsigned long offset;
282
283 offset = tb->tb_jmp_offset[n];
284 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
285 offset = tb->tb_jmp_offset[n + 2];
286 if (offset != 0xffff)
287 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
288}
289
290#else
291
292/* set the jump target */
293static inline void tb_set_jmp_target(TranslationBlock *tb,
294 int n, unsigned long addr)
295{
296 tb->tb_next[n] = addr;
297}
298
299#endif
300
301static inline void tb_add_jump(TranslationBlock *tb, int n,
302 TranslationBlock *tb_next)
303{
304 /* NOTE: this test is only needed for thread safety */
305 if (!tb->jmp_next[n]) {
306 /* patch the native jump address */
307 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
308
309 /* add in TB jmp circular list */
310 tb->jmp_next[n] = tb_next->jmp_first;
311 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
312 }
313}
314
315TranslationBlock *tb_find_pc(unsigned long pc_ptr);
316
317#ifndef offsetof
318#define offsetof(type, field) ((size_t) &((type *)0)->field)
319#endif
320
321#if defined(_WIN32)
322#define ASM_DATA_SECTION ".section \".data\"\n"
323#define ASM_PREVIOUS_SECTION ".section .text\n"
324#elif defined(__APPLE__)
325#define ASM_DATA_SECTION ".data\n"
326#define ASM_PREVIOUS_SECTION ".text\n"
327#else
328#define ASM_DATA_SECTION ".section \".data\"\n"
329#define ASM_PREVIOUS_SECTION ".previous\n"
330#endif
331
332#define ASM_OP_LABEL_NAME(n, opname) \
333 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
334
335#if defined(__powerpc__)
336
337/* we patch the jump instruction directly */
338#define GOTO_TB(opname, tbparam, n)\
339do {\
340 asm volatile (ASM_DATA_SECTION\
341 ASM_OP_LABEL_NAME(n, opname) ":\n"\
342 ".long 1f\n"\
343 ASM_PREVIOUS_SECTION \
344 "b " ASM_NAME(__op_jmp) #n "\n"\
345 "1:\n");\
346} while (0)
347
348#elif defined(__i386__) && defined(USE_DIRECT_JUMP)
349
350/* we patch the jump instruction directly */
351#define GOTO_TB(opname, tbparam, n)\
352do {\
353 asm volatile (".section .data\n"\
354 ASM_OP_LABEL_NAME(n, opname) ":\n"\
355 ".long 1f\n"\
356 ASM_PREVIOUS_SECTION \
357 "jmp " ASM_NAME(__op_jmp) #n "\n"\
358 "1:\n");\
359} while (0)
360
361#else
362
363/* jump to next block operations (more portable code, does not need
364 cache flushing, but slower because of indirect jump) */
365# ifdef VBOX /* bird: GCC4 (and Ming 3.4.x?) will remove the two unused static
366 variables. I've added a dummy __asm__ statement which reference
367 the two variables to prevent this. */
368# if __GNUC__ >= 4
369# define GOTO_TB(opname, tbparam, n)\
370 do {\
371 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
372 static void __attribute__((unused)) *__op_label ## n \
373 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
374 __asm__ ("" : : "m" (__op_label ## n), "m" (dummy ## n));\
375 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\
376 label ## n: ;\
377 dummy_label ## n: ;\
378 } while (0)
379# else
380# define GOTO_TB(opname, tbparam, n)\
381 do {\
382 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
383 static void __attribute__((unused)) *__op_label ## n \
384 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
385 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\
386 label ## n: ;\
387 dummy_label ## n: ;\
388 } while (0)
389# endif
390# else /* !VBOX */
391#define GOTO_TB(opname, tbparam, n)\
392do {\
393 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
394 static void __attribute__((unused)) *__op_label ## n \
395 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
396 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
397label ## n: ;\
398dummy_label ## n: ;\
399} while (0)
400# endif /* !VBOX */
401
402#endif
403
404extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
405extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
406extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
407
408#ifdef __powerpc__
409static inline int testandset (int *p)
410{
411 int ret;
412 __asm__ __volatile__ (
413 "0: lwarx %0,0,%1\n"
414 " xor. %0,%3,%0\n"
415 " bne 1f\n"
416 " stwcx. %2,0,%1\n"
417 " bne- 0b\n"
418 "1: "
419 : "=&r" (ret)
420 : "r" (p), "r" (1), "r" (0)
421 : "cr0", "memory");
422 return ret;
423}
424#endif
425
426#ifdef __i386__
427static inline int testandset (int *p)
428{
429 long int readval = 0;
430
431 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
432 : "+m" (*p), "+a" (readval)
433 : "r" (1)
434 : "cc");
435 return readval;
436}
437#endif
438
439#ifdef __x86_64__
440static inline int testandset (int *p)
441{
442 long int readval = 0;
443
444 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
445 : "+m" (*p), "+a" (readval)
446 : "r" (1)
447 : "cc");
448 return readval;
449}
450#endif
451
452#ifdef __s390__
453static inline int testandset (int *p)
454{
455 int ret;
456
457 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
458 " jl 0b"
459 : "=&d" (ret)
460 : "r" (1), "a" (p), "0" (*p)
461 : "cc", "memory" );
462 return ret;
463}
464#endif
465
466#ifdef __alpha__
467static inline int testandset (int *p)
468{
469 int ret;
470 unsigned long one;
471
472 __asm__ __volatile__ ("0: mov 1,%2\n"
473 " ldl_l %0,%1\n"
474 " stl_c %2,%1\n"
475 " beq %2,1f\n"
476 ".subsection 2\n"
477 "1: br 0b\n"
478 ".previous"
479 : "=r" (ret), "=m" (*p), "=r" (one)
480 : "m" (*p));
481 return ret;
482}
483#endif
484
485#ifdef __sparc__
486static inline int testandset (int *p)
487{
488 int ret;
489
490 __asm__ __volatile__("ldstub [%1], %0"
491 : "=r" (ret)
492 : "r" (p)
493 : "memory");
494
495 return (ret ? 1 : 0);
496}
497#endif
498
499#ifdef __arm__
500static inline int testandset (int *spinlock)
501{
502 register unsigned int ret;
503 __asm__ __volatile__("swp %0, %1, [%2]"
504 : "=r"(ret)
505 : "0"(1), "r"(spinlock));
506
507 return ret;
508}
509#endif
510
511#ifdef __mc68000
512static inline int testandset (int *p)
513{
514 char ret;
515 __asm__ __volatile__("tas %1; sne %0"
516 : "=r" (ret)
517 : "m" (p)
518 : "cc","memory");
519 return ret;
520}
521#endif
522
523#ifdef __ia64
524#include <ia64intrin.h>
525
526static inline int testandset (int *p)
527{
528 return __sync_lock_test_and_set (p, 1);
529}
530#endif
531
532typedef int spinlock_t;
533
534#define SPIN_LOCK_UNLOCKED 0
535
536#if defined(CONFIG_USER_ONLY)
537static inline void spin_lock(spinlock_t *lock)
538{
539 while (testandset(lock));
540}
541
542static inline void spin_unlock(spinlock_t *lock)
543{
544 *lock = 0;
545}
546
547static inline int spin_trylock(spinlock_t *lock)
548{
549 return !testandset(lock);
550}
551#else
552static inline void spin_lock(spinlock_t *lock)
553{
554}
555
556static inline void spin_unlock(spinlock_t *lock)
557{
558}
559
560static inline int spin_trylock(spinlock_t *lock)
561{
562 return 1;
563}
564#endif
565
566extern spinlock_t tb_lock;
567
568extern int tb_invalidated_flag;
569
570#if !defined(CONFIG_USER_ONLY)
571
572void tlb_fill(target_ulong addr, int is_write, int is_user,
573 void *retaddr);
574
575#define ACCESS_TYPE 3
576#define MEMSUFFIX _code
577#define env cpu_single_env
578
579#define DATA_SIZE 1
580#include "softmmu_header.h"
581
582#define DATA_SIZE 2
583#include "softmmu_header.h"
584
585#define DATA_SIZE 4
586#include "softmmu_header.h"
587
588#define DATA_SIZE 8
589#include "softmmu_header.h"
590
591#undef ACCESS_TYPE
592#undef MEMSUFFIX
593#undef env
594
595#endif
596
597#if defined(CONFIG_USER_ONLY)
598static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
599{
600 return addr;
601}
602#else
603# ifdef VBOX
604target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry);
605# endif
606/* NOTE: this function can trigger an exception */
607/* NOTE2: the returned address is not exactly the physical address: it
608 is the offset relative to phys_ram_base */
609static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
610{
611 int is_user, index, pd;
612
613 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
614#if defined(TARGET_I386)
615 is_user = ((env->hflags & HF_CPL_MASK) == 3);
616#elif defined (TARGET_PPC)
617 is_user = msr_pr;
618#elif defined (TARGET_MIPS)
619 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
620#elif defined (TARGET_SPARC)
621 is_user = (env->psrs == 0);
622#elif defined (TARGET_ARM)
623 is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
624#elif defined (TARGET_SH4)
625 is_user = ((env->sr & SR_MD) == 0);
626#else
627#error unimplemented CPU
628#endif
629 if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
630 (addr & TARGET_PAGE_MASK), 0)) {
631 ldub_code(addr);
632 }
633 pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
634 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
635#ifdef VBOX
636 /* deal with non-MMIO access handlers. */
637 return remR3PhysGetPhysicalAddressCode(env, addr, &env->tlb_table[is_user][index]);
638#else
639 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
640#endif
641 }
642 return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
643}
644#endif
645
646
647#ifdef USE_KQEMU
648#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
649
650int kqemu_init(CPUState *env);
651int kqemu_cpu_exec(CPUState *env);
652void kqemu_flush_page(CPUState *env, target_ulong addr);
653void kqemu_flush(CPUState *env, int global);
654void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
655void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
656void kqemu_cpu_interrupt(CPUState *env);
657void kqemu_record_dump(void);
658
659static inline int kqemu_is_ok(CPUState *env)
660{
661 return(env->kqemu_enabled &&
662 (env->cr[0] & CR0_PE_MASK) &&
663 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
664 (env->eflags & IF_MASK) &&
665 !(env->eflags & VM_MASK) &&
666 (env->kqemu_enabled == 2 ||
667 ((env->hflags & HF_CPL_MASK) == 3 &&
668 (env->eflags & IOPL_MASK) != IOPL_MASK)));
669}
670
671#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette