VirtualBox

source: vbox/trunk/src/recompiler/cpu-all.h@ 55

Last change on this file since 55 was 55, checked in by vboxsync, 18 years ago

RAM size should be an *unsigned* int

  • Property svn:eol-style set to native
File size: 21.4 KB
Line 
1/*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef CPU_ALL_H
21#define CPU_ALL_H
22
23#if defined(__arm__) || defined(__sparc__)
24#define WORDS_ALIGNED
25#endif
26
27/* some important defines:
28 *
29 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
30 * memory accesses.
31 *
32 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
33 * otherwise little endian.
34 *
35 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
36 *
37 * TARGET_WORDS_BIGENDIAN : same for target cpu
38 */
39
40#include "bswap.h"
41
42#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
43#define BSWAP_NEEDED
44#endif
45
46#ifdef BSWAP_NEEDED
47
48static inline uint16_t tswap16(uint16_t s)
49{
50 return bswap16(s);
51}
52
53static inline uint32_t tswap32(uint32_t s)
54{
55 return bswap32(s);
56}
57
58static inline uint64_t tswap64(uint64_t s)
59{
60 return bswap64(s);
61}
62
63static inline void tswap16s(uint16_t *s)
64{
65 *s = bswap16(*s);
66}
67
68static inline void tswap32s(uint32_t *s)
69{
70 *s = bswap32(*s);
71}
72
73static inline void tswap64s(uint64_t *s)
74{
75 *s = bswap64(*s);
76}
77
78#else
79
80static inline uint16_t tswap16(uint16_t s)
81{
82 return s;
83}
84
85static inline uint32_t tswap32(uint32_t s)
86{
87 return s;
88}
89
90static inline uint64_t tswap64(uint64_t s)
91{
92 return s;
93}
94
95static inline void tswap16s(uint16_t *s)
96{
97}
98
99static inline void tswap32s(uint32_t *s)
100{
101}
102
103static inline void tswap64s(uint64_t *s)
104{
105}
106
107#endif
108
109#if TARGET_LONG_SIZE == 4
110#define tswapl(s) tswap32(s)
111#define tswapls(s) tswap32s((uint32_t *)(s))
112#else
113#define tswapl(s) tswap64(s)
114#define tswapls(s) tswap64s((uint64_t *)(s))
115#endif
116
117/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
118 endian ! */
119typedef union {
120 double d;
121#if defined(WORDS_BIGENDIAN) || (defined(__arm__) && !defined(__VFP_FP__))
122 struct {
123 uint32_t upper;
124 uint32_t lower;
125 } l;
126#else
127 struct {
128 uint32_t lower;
129 uint32_t upper;
130 } l;
131#endif
132 uint64_t ll;
133} CPU_DoubleU;
134
135/* CPU memory access without any memory or io remapping */
136
137/*
138 * the generic syntax for the memory accesses is:
139 *
140 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
141 *
142 * store: st{type}{size}{endian}_{access_type}(ptr, val)
143 *
144 * type is:
145 * (empty): integer access
146 * f : float access
147 *
148 * sign is:
149 * (empty): for floats or 32 bit size
150 * u : unsigned
151 * s : signed
152 *
153 * size is:
154 * b: 8 bits
155 * w: 16 bits
156 * l: 32 bits
157 * q: 64 bits
158 *
159 * endian is:
160 * (empty): target cpu endianness or 8 bit access
161 * r : reversed target cpu endianness (not implemented yet)
162 * be : big endian (not implemented yet)
163 * le : little endian (not implemented yet)
164 *
165 * access_type is:
166 * raw : host memory access
167 * user : user mode access using soft MMU
168 * kernel : kernel mode access using soft MMU
169 */
170#ifdef VBOX
171
172#if !defined(REMR3PHYSREADWRITE_DEFINED)
173#define REMR3PHYSREADWRITE_DEFINED
174/* Header sharing between vbox & qemu is rather ugly. */
175void remR3PhysReadBytes(uint8_t *pbSrcPhys, void *pvDst, unsigned cb);
176uint8_t remR3PhysReadUByte(uint8_t *pbSrcPhys);
177int8_t remR3PhysReadSByte(uint8_t *pbSrcPhys);
178uint16_t remR3PhysReadUWord(uint8_t *pbSrcPhys);
179int16_t remR3PhysReadSWord(uint8_t *pbSrcPhys);
180uint32_t remR3PhysReadULong(uint8_t *pbSrcPhys);
181int32_t remR3PhysReadSLong(uint8_t *pbSrcPhys);
182void remR3PhysWriteBytes(uint8_t *pbDstPhys, const void *pvSrc, unsigned cb);
183void remR3PhysWriteByte(uint8_t *pbDstPhys, uint8_t val);
184void remR3PhysWriteWord(uint8_t *pbDstPhys, uint16_t val);
185void remR3PhysWriteDword(uint8_t *pbDstPhys, uint32_t val);
186void *remR3GCPhys2HCVirt(void *env, target_ulong addr);
187target_ulong remR3HCVirt2GCPhys(void *env, void *addr);
188void remR3GrowDynRange(unsigned long physaddr);
189#endif
190
191static inline int ldub_p(void *ptr)
192{
193 return remR3PhysReadUByte(ptr);
194}
195
196static inline int ldsb_p(void *ptr)
197{
198 return remR3PhysReadSByte(ptr);
199}
200
201static inline void stb_p(void *ptr, int v)
202{
203 remR3PhysWriteByte(ptr, v);
204}
205
206#else
207static inline int ldub_p(void *ptr)
208{
209 return *(uint8_t *)ptr;
210}
211
212static inline int ldsb_p(void *ptr)
213{
214 return *(int8_t *)ptr;
215}
216
217static inline void stb_p(void *ptr, int v)
218{
219 *(uint8_t *)ptr = v;
220}
221#endif
222
223/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
224 kernel handles unaligned load/stores may give better results, but
225 it is a system wide setting : bad */
226#if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
227/* conservative code for little endian unaligned accesses */
228static inline int lduw_p(void *ptr)
229{
230#ifdef __powerpc__
231 int val;
232 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
233 return val;
234#else
235 uint8_t *p = ptr;
236 return p[0] | (p[1] << 8);
237#endif
238}
239
240static inline int ldsw_p(void *ptr)
241{
242#ifdef __powerpc__
243 int val;
244 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
245 return (int16_t)val;
246#else
247 uint8_t *p = ptr;
248 return (int16_t)(p[0] | (p[1] << 8));
249#endif
250}
251
252static inline int ldl_p(void *ptr)
253{
254#ifdef __powerpc__
255 int val;
256 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
257 return val;
258#else
259 uint8_t *p = ptr;
260 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
261#endif
262}
263
264static inline uint64_t ldq_p(void *ptr)
265{
266 uint8_t *p = ptr;
267 uint32_t v1, v2;
268 v1 = ldl_p(p);
269 v2 = ldl_p(p + 4);
270 return v1 | ((uint64_t)v2 << 32);
271}
272
273static inline void stw_p(void *ptr, int v)
274{
275#ifdef __powerpc__
276 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
277#else
278 uint8_t *p = ptr;
279 p[0] = v;
280 p[1] = v >> 8;
281#endif
282}
283
284static inline void stl_p(void *ptr, int v)
285{
286#ifdef __powerpc__
287 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
288#else
289 uint8_t *p = ptr;
290 p[0] = v;
291 p[1] = v >> 8;
292 p[2] = v >> 16;
293 p[3] = v >> 24;
294#endif
295}
296
297static inline void stq_p(void *ptr, uint64_t v)
298{
299 uint8_t *p = ptr;
300 stl_p(p, (uint32_t)v);
301 stl_p(p + 4, v >> 32);
302}
303
304/* float access */
305
306static inline float ldfl_p(void *ptr)
307{
308 union {
309 float f;
310 uint32_t i;
311 } u;
312 u.i = ldl_p(ptr);
313 return u.f;
314}
315
316static inline void stfl_p(void *ptr, float v)
317{
318 union {
319 float f;
320 uint32_t i;
321 } u;
322 u.f = v;
323 stl_p(ptr, u.i);
324}
325
326static inline double ldfq_p(void *ptr)
327{
328 CPU_DoubleU u;
329 u.l.lower = ldl_p(ptr);
330 u.l.upper = ldl_p(ptr + 4);
331 return u.d;
332}
333
334static inline void stfq_p(void *ptr, double v)
335{
336 CPU_DoubleU u;
337 u.d = v;
338 stl_p(ptr, u.l.lower);
339 stl_p(ptr + 4, u.l.upper);
340}
341
342#elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
343static inline int lduw_p(void *ptr)
344{
345#if defined(__i386__)
346 int val;
347 asm volatile ("movzwl %1, %0\n"
348 "xchgb %b0, %h0\n"
349 : "=q" (val)
350 : "m" (*(uint16_t *)ptr));
351 return val;
352#else
353 uint8_t *b = (uint8_t *) ptr;
354 return ((b[0] << 8) | b[1]);
355#endif
356}
357
358static inline int ldsw_p(void *ptr)
359{
360#if defined(__i386__)
361 int val;
362 asm volatile ("movzwl %1, %0\n"
363 "xchgb %b0, %h0\n"
364 : "=q" (val)
365 : "m" (*(uint16_t *)ptr));
366 return (int16_t)val;
367#else
368 uint8_t *b = (uint8_t *) ptr;
369 return (int16_t)((b[0] << 8) | b[1]);
370#endif
371}
372
373static inline int ldl_p(void *ptr)
374{
375#if defined(__i386__) || defined(__x86_64__)
376 int val;
377 asm volatile ("movl %1, %0\n"
378 "bswap %0\n"
379 : "=r" (val)
380 : "m" (*(uint32_t *)ptr));
381 return val;
382#else
383 uint8_t *b = (uint8_t *) ptr;
384 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
385#endif
386}
387
388static inline uint64_t ldq_p(void *ptr)
389{
390 uint32_t a,b;
391 a = ldl_p(ptr);
392 b = ldl_p(ptr+4);
393 return (((uint64_t)a<<32)|b);
394}
395
396static inline void stw_p(void *ptr, int v)
397{
398#if defined(__i386__)
399 asm volatile ("xchgb %b0, %h0\n"
400 "movw %w0, %1\n"
401 : "=q" (v)
402 : "m" (*(uint16_t *)ptr), "0" (v));
403#else
404 uint8_t *d = (uint8_t *) ptr;
405 d[0] = v >> 8;
406 d[1] = v;
407#endif
408}
409
410static inline void stl_p(void *ptr, int v)
411{
412#if defined(__i386__) || defined(__x86_64__)
413 asm volatile ("bswap %0\n"
414 "movl %0, %1\n"
415 : "=r" (v)
416 : "m" (*(uint32_t *)ptr), "0" (v));
417#else
418 uint8_t *d = (uint8_t *) ptr;
419 d[0] = v >> 24;
420 d[1] = v >> 16;
421 d[2] = v >> 8;
422 d[3] = v;
423#endif
424}
425
426static inline void stq_p(void *ptr, uint64_t v)
427{
428 stl_p(ptr, v >> 32);
429 stl_p(ptr + 4, v);
430}
431
432/* float access */
433
434static inline float ldfl_p(void *ptr)
435{
436 union {
437 float f;
438 uint32_t i;
439 } u;
440 u.i = ldl_p(ptr);
441 return u.f;
442}
443
444static inline void stfl_p(void *ptr, float v)
445{
446 union {
447 float f;
448 uint32_t i;
449 } u;
450 u.f = v;
451 stl_p(ptr, u.i);
452}
453
454static inline double ldfq_p(void *ptr)
455{
456 CPU_DoubleU u;
457 u.l.upper = ldl_p(ptr);
458 u.l.lower = ldl_p(ptr + 4);
459 return u.d;
460}
461
462static inline void stfq_p(void *ptr, double v)
463{
464 CPU_DoubleU u;
465 u.d = v;
466 stl_p(ptr, u.l.upper);
467 stl_p(ptr + 4, u.l.lower);
468}
469
470#else
471
472#ifdef VBOX
473static inline int lduw_p(void *ptr)
474{
475 return remR3PhysReadUWord(ptr);
476}
477
478static inline int ldsw_p(void *ptr)
479{
480 return remR3PhysReadSWord(ptr);
481}
482
483static inline int ldl_p(void *ptr)
484{
485 return remR3PhysReadULong(ptr);
486}
487
488static inline uint64_t ldq_p(void *ptr)
489{
490 uint64_t val;
491
492 remR3PhysReadBytes(ptr, &val, sizeof(val));
493 return val;
494}
495
496static inline void stw_p(void *ptr, int v)
497{
498 remR3PhysWriteWord(ptr, (uint16_t)v);
499}
500
501static inline void stl_p(void *ptr, int v)
502{
503 remR3PhysWriteDword(ptr, (uint32_t)v);
504}
505
506static inline void stq_p(void *ptr, uint64_t v)
507{
508 remR3PhysWriteBytes(ptr, &v, sizeof(v));
509}
510
511/* float access */
512
513static inline float ldfl_p(void *ptr)
514{
515 float val;
516
517 remR3PhysReadBytes(ptr, &val, sizeof(val));
518 return val;
519}
520
521static inline double ldfq_p(void *ptr)
522{
523 double val;
524
525 remR3PhysReadBytes(ptr, &val, sizeof(val));
526 return val;
527}
528
529static inline void stfl_p(void *ptr, float v)
530{
531 remR3PhysWriteBytes(ptr, &v, sizeof(v));
532}
533
534static inline void stfq_p(void *ptr, double v)
535{
536 remR3PhysWriteBytes(ptr, &v, sizeof(v));
537}
538#else
539static inline int lduw_p(void *ptr)
540{
541 return *(uint16_t *)ptr;
542}
543
544static inline int ldsw_p(void *ptr)
545{
546 return *(int16_t *)ptr;
547}
548
549static inline int ldl_p(void *ptr)
550{
551 return *(uint32_t *)ptr;
552}
553
554static inline uint64_t ldq_p(void *ptr)
555{
556 return *(uint64_t *)ptr;
557}
558
559static inline void stw_p(void *ptr, int v)
560{
561 *(uint16_t *)ptr = v;
562}
563
564static inline void stl_p(void *ptr, int v)
565{
566 *(uint32_t *)ptr = v;
567}
568
569static inline void stq_p(void *ptr, uint64_t v)
570{
571 *(uint64_t *)ptr = v;
572}
573
574/* float access */
575
576static inline float ldfl_p(void *ptr)
577{
578 return *(float *)ptr;
579}
580
581static inline double ldfq_p(void *ptr)
582{
583 return *(double *)ptr;
584}
585
586static inline void stfl_p(void *ptr, float v)
587{
588 *(float *)ptr = v;
589}
590
591static inline void stfq_p(void *ptr, double v)
592{
593 *(double *)ptr = v;
594}
595#endif /* VBOX */
596
597#endif
598
599/* MMU memory access macros */
600
601/* NOTE: we use double casts if pointers and target_ulong have
602 different sizes */
603#define ldub_raw(p) ldub_p((uint8_t *)(long)(p))
604#define ldsb_raw(p) ldsb_p((uint8_t *)(long)(p))
605#define lduw_raw(p) lduw_p((uint8_t *)(long)(p))
606#define ldsw_raw(p) ldsw_p((uint8_t *)(long)(p))
607#define ldl_raw(p) ldl_p((uint8_t *)(long)(p))
608#define ldq_raw(p) ldq_p((uint8_t *)(long)(p))
609#define ldfl_raw(p) ldfl_p((uint8_t *)(long)(p))
610#define ldfq_raw(p) ldfq_p((uint8_t *)(long)(p))
611#define stb_raw(p, v) stb_p((uint8_t *)(long)(p), v)
612#define stw_raw(p, v) stw_p((uint8_t *)(long)(p), v)
613#define stl_raw(p, v) stl_p((uint8_t *)(long)(p), v)
614#define stq_raw(p, v) stq_p((uint8_t *)(long)(p), v)
615#define stfl_raw(p, v) stfl_p((uint8_t *)(long)(p), v)
616#define stfq_raw(p, v) stfq_p((uint8_t *)(long)(p), v)
617
618
619#if defined(CONFIG_USER_ONLY)
620
621/* if user mode, no other memory access functions */
622#define ldub(p) ldub_raw(p)
623#define ldsb(p) ldsb_raw(p)
624#define lduw(p) lduw_raw(p)
625#define ldsw(p) ldsw_raw(p)
626#define ldl(p) ldl_raw(p)
627#define ldq(p) ldq_raw(p)
628#define ldfl(p) ldfl_raw(p)
629#define ldfq(p) ldfq_raw(p)
630#define stb(p, v) stb_raw(p, v)
631#define stw(p, v) stw_raw(p, v)
632#define stl(p, v) stl_raw(p, v)
633#define stq(p, v) stq_raw(p, v)
634#define stfl(p, v) stfl_raw(p, v)
635#define stfq(p, v) stfq_raw(p, v)
636
637#define ldub_code(p) ldub_raw(p)
638#define ldsb_code(p) ldsb_raw(p)
639#define lduw_code(p) lduw_raw(p)
640#define ldsw_code(p) ldsw_raw(p)
641#define ldl_code(p) ldl_raw(p)
642
643#define ldub_kernel(p) ldub_raw(p)
644#define ldsb_kernel(p) ldsb_raw(p)
645#define lduw_kernel(p) lduw_raw(p)
646#define ldsw_kernel(p) ldsw_raw(p)
647#define ldl_kernel(p) ldl_raw(p)
648#define ldfl_kernel(p) ldfl_raw(p)
649#define ldfq_kernel(p) ldfq_raw(p)
650#define stb_kernel(p, v) stb_raw(p, v)
651#define stw_kernel(p, v) stw_raw(p, v)
652#define stl_kernel(p, v) stl_raw(p, v)
653#define stq_kernel(p, v) stq_raw(p, v)
654#define stfl_kernel(p, v) stfl_raw(p, v)
655#define stfq_kernel(p, vt) stfq_raw(p, v)
656
657#endif /* defined(CONFIG_USER_ONLY) */
658
659/* page related stuff */
660
661#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
662#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
663#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
664
665extern unsigned long qemu_real_host_page_size;
666extern unsigned long qemu_host_page_bits;
667extern unsigned long qemu_host_page_size;
668extern unsigned long qemu_host_page_mask;
669
670#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
671
672/* same as PROT_xxx */
673#define PAGE_READ 0x0001
674#define PAGE_WRITE 0x0002
675#define PAGE_EXEC 0x0004
676#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
677#define PAGE_VALID 0x0008
678/* original state of the write flag (used when tracking self-modifying
679 code */
680#define PAGE_WRITE_ORG 0x0010
681
682void page_dump(FILE *f);
683int page_get_flags(unsigned long address);
684void page_set_flags(unsigned long start, unsigned long end, int flags);
685void page_unprotect_range(uint8_t *data, unsigned long data_size);
686
687#define SINGLE_CPU_DEFINES
688#ifdef SINGLE_CPU_DEFINES
689
690#if defined(TARGET_I386)
691
692#define CPUState CPUX86State
693#define cpu_init cpu_x86_init
694#define cpu_exec cpu_x86_exec
695#define cpu_gen_code cpu_x86_gen_code
696#define cpu_signal_handler cpu_x86_signal_handler
697
698#elif defined(TARGET_ARM)
699
700#define CPUState CPUARMState
701#define cpu_init cpu_arm_init
702#define cpu_exec cpu_arm_exec
703#define cpu_gen_code cpu_arm_gen_code
704#define cpu_signal_handler cpu_arm_signal_handler
705
706#elif defined(TARGET_SPARC)
707
708#define CPUState CPUSPARCState
709#define cpu_init cpu_sparc_init
710#define cpu_exec cpu_sparc_exec
711#define cpu_gen_code cpu_sparc_gen_code
712#define cpu_signal_handler cpu_sparc_signal_handler
713
714#elif defined(TARGET_PPC)
715
716#define CPUState CPUPPCState
717#define cpu_init cpu_ppc_init
718#define cpu_exec cpu_ppc_exec
719#define cpu_gen_code cpu_ppc_gen_code
720#define cpu_signal_handler cpu_ppc_signal_handler
721
722#else
723
724#error unsupported target CPU
725
726#endif
727
728#endif /* SINGLE_CPU_DEFINES */
729
730void cpu_dump_state(CPUState *env, FILE *f,
731 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
732 int flags);
733
734void cpu_abort(CPUState *env, const char *fmt, ...);
735extern CPUState *cpu_single_env;
736extern int code_copy_enabled;
737
738#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
739#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
740#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
741#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
742#ifdef VBOX
743/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
744#define CPU_INTERRUPT_SINGLE_INSTR 0x0040
745/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
746#define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0080
747/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
748#define CPU_INTERRUPT_RC 0x0100
749/** Exit current TB to process an external interrupt request (also in op.c!!) */
750#define CPU_INTERRUPT_EXTERNAL_EXIT 0x0200
751/** Exit current TB to process an external interrupt request (also in op.c!!) */
752#define CPU_INTERRUPT_EXTERNAL_HARD 0x0400
753/** Exit current TB to process an external interrupt request (also in op.c!!) */
754#define CPU_INTERRUPT_EXTERNAL_TIMER 0x0800
755/** Exit current TB to process an external interrupt request (also in op.c!!) */
756#define CPU_INTERRUPT_EXTERNAL_DMA 0x1000
757#endif /* VBOX */
758void cpu_interrupt(CPUState *s, int mask);
759void cpu_reset_interrupt(CPUState *env, int mask);
760
761int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
762int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
763void cpu_single_step(CPUState *env, int enabled);
764void cpu_reset(CPUState *s);
765
766/* Return the physical page corresponding to a virtual one. Use it
767 only for debugging because no protection checks are done. Return -1
768 if no page found. */
769target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
770
771#define CPU_LOG_TB_OUT_ASM (1 << 0)
772#define CPU_LOG_TB_IN_ASM (1 << 1)
773#define CPU_LOG_TB_OP (1 << 2)
774#define CPU_LOG_TB_OP_OPT (1 << 3)
775#define CPU_LOG_INT (1 << 4)
776#define CPU_LOG_EXEC (1 << 5)
777#define CPU_LOG_PCALL (1 << 6)
778#define CPU_LOG_IOPORT (1 << 7)
779#define CPU_LOG_TB_CPU (1 << 8)
780
781/* define log items */
782typedef struct CPULogItem {
783 int mask;
784 const char *name;
785 const char *help;
786} CPULogItem;
787
788extern CPULogItem cpu_log_items[];
789
790void cpu_set_log(int log_flags);
791void cpu_set_log_filename(const char *filename);
792int cpu_str_to_log_mask(const char *str);
793
794/* IO ports API */
795
796/* NOTE: as these functions may be even used when there is an isa
797 brige on non x86 targets, we always defined them */
798#ifndef NO_CPU_IO_DEFS
799void cpu_outb(CPUState *env, int addr, int val);
800void cpu_outw(CPUState *env, int addr, int val);
801void cpu_outl(CPUState *env, int addr, int val);
802int cpu_inb(CPUState *env, int addr);
803int cpu_inw(CPUState *env, int addr);
804int cpu_inl(CPUState *env, int addr);
805#endif
806
807/* memory API */
808extern uint32_t phys_ram_size;
809#ifndef VBOX
810extern int phys_ram_fd;
811extern int phys_ram_size;
812extern uint8_t *phys_ram_base;
813#endif
814extern uint8_t *phys_ram_dirty;
815
816/* physical memory access */
817#define IO_MEM_NB_ENTRIES 256
818#define TLB_INVALID_MASK (1 << 3)
819#define IO_MEM_SHIFT 4
820
821#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
822#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
823#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
824#define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */
825#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
826#ifdef VBOX
827#define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */
828#endif
829
830typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
831typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
832
833void cpu_register_physical_memory(target_phys_addr_t start_addr,
834 unsigned long size,
835 unsigned long phys_offset);
836int cpu_register_io_memory(int io_index,
837 CPUReadMemoryFunc **mem_read,
838 CPUWriteMemoryFunc **mem_write,
839 void *opaque);
840CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
841CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
842
843void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
844 int len, int is_write);
845static inline void cpu_physical_memory_read(target_phys_addr_t addr,
846 uint8_t *buf, int len)
847{
848 cpu_physical_memory_rw(addr, buf, len, 0);
849}
850static inline void cpu_physical_memory_write(target_phys_addr_t addr,
851 const uint8_t *buf, int len)
852{
853 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
854}
855uint32_t ldl_phys(target_phys_addr_t addr);
856void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
857void stl_phys(target_phys_addr_t addr, uint32_t val);
858
859int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
860 uint8_t *buf, int len, int is_write);
861
862/* read dirty bit (return 0 or 1) */
863static inline int cpu_physical_memory_is_dirty(target_ulong addr)
864{
865 return phys_ram_dirty[addr >> TARGET_PAGE_BITS];
866}
867
868static inline void cpu_physical_memory_set_dirty(target_ulong addr)
869{
870 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 1;
871}
872
873void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end);
874
875void dump_exec_info(FILE *f,
876 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
877
878
879#ifdef VBOX
880void tb_invalidate_virt(CPUState *env, uint32_t eip);
881#endif
882
883#endif /* CPU_ALL_H */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette