VirtualBox

source: vbox/trunk/src/recompiler/cpu-all.h@ 5926

Last change on this file since 5926 was 4535, checked in by vboxsync, 17 years ago

Switched to reading and writing through PGM (like we already did for 64 bits hosts)

  • Property svn:eol-style set to native
File size: 30.0 KB
Line 
1/*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef CPU_ALL_H
21#define CPU_ALL_H
22
23#ifdef VBOX
24# ifndef LOG_GROUP
25# include <VBox/log.h>
26# define LOG_GROUP LOG_GROUP_REM
27# endif
28# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
29#endif
30
31#if defined(__arm__) || defined(__sparc__)
32#define WORDS_ALIGNED
33#endif
34
35/* some important defines:
36 *
37 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
38 * memory accesses.
39 *
40 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
41 * otherwise little endian.
42 *
43 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
44 *
45 * TARGET_WORDS_BIGENDIAN : same for target cpu
46 */
47
48#include "bswap.h"
49
50#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
51#define BSWAP_NEEDED
52#endif
53
54#ifdef BSWAP_NEEDED
55
56static inline uint16_t tswap16(uint16_t s)
57{
58 return bswap16(s);
59}
60
61static inline uint32_t tswap32(uint32_t s)
62{
63 return bswap32(s);
64}
65
66static inline uint64_t tswap64(uint64_t s)
67{
68 return bswap64(s);
69}
70
71static inline void tswap16s(uint16_t *s)
72{
73 *s = bswap16(*s);
74}
75
76static inline void tswap32s(uint32_t *s)
77{
78 *s = bswap32(*s);
79}
80
81static inline void tswap64s(uint64_t *s)
82{
83 *s = bswap64(*s);
84}
85
86#else
87
88static inline uint16_t tswap16(uint16_t s)
89{
90 return s;
91}
92
93static inline uint32_t tswap32(uint32_t s)
94{
95 return s;
96}
97
98static inline uint64_t tswap64(uint64_t s)
99{
100 return s;
101}
102
103static inline void tswap16s(uint16_t *s)
104{
105}
106
107static inline void tswap32s(uint32_t *s)
108{
109}
110
111static inline void tswap64s(uint64_t *s)
112{
113}
114
115#endif
116
117#if TARGET_LONG_SIZE == 4
118#define tswapl(s) tswap32(s)
119#define tswapls(s) tswap32s((uint32_t *)(s))
120#define bswaptls(s) bswap32s(s)
121#else
122#define tswapl(s) tswap64(s)
123#define tswapls(s) tswap64s((uint64_t *)(s))
124#define bswaptls(s) bswap64s(s)
125#endif
126
127/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
128 endian ! */
129typedef union {
130 float64 d;
131#if defined(WORDS_BIGENDIAN) \
132 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
133 struct {
134 uint32_t upper;
135 uint32_t lower;
136 } l;
137#else
138 struct {
139 uint32_t lower;
140 uint32_t upper;
141 } l;
142#endif
143 uint64_t ll;
144} CPU_DoubleU;
145
146/* CPU memory access without any memory or io remapping */
147
148/*
149 * the generic syntax for the memory accesses is:
150 *
151 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
152 *
153 * store: st{type}{size}{endian}_{access_type}(ptr, val)
154 *
155 * type is:
156 * (empty): integer access
157 * f : float access
158 *
159 * sign is:
160 * (empty): for floats or 32 bit size
161 * u : unsigned
162 * s : signed
163 *
164 * size is:
165 * b: 8 bits
166 * w: 16 bits
167 * l: 32 bits
168 * q: 64 bits
169 *
170 * endian is:
171 * (empty): target cpu endianness or 8 bit access
172 * r : reversed target cpu endianness (not implemented yet)
173 * be : big endian (not implemented yet)
174 * le : little endian (not implemented yet)
175 *
176 * access_type is:
177 * raw : host memory access
178 * user : user mode access using soft MMU
179 * kernel : kernel mode access using soft MMU
180 */
181#ifdef VBOX
182
183void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb);
184uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys);
185int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys);
186uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys);
187int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys);
188uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys);
189int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys);
190uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys);
191int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys);
192void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb);
193void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val);
194void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val);
195void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val);
196void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val);
197
198#ifdef PGM_DYNAMIC_RAM_ALLOC
199void remR3GrowDynRange(unsigned long physaddr);
200#endif
201#if 0 /*defined(RT_ARCH_AMD64) && defined(VBOX_STRICT)*/
202# define VBOX_CHECK_ADDR(ptr) do { if ((uintptr_t)(ptr) >= _4G) __asm__("int3"); } while (0)
203#else
204# define VBOX_CHECK_ADDR(ptr) do { } while (0)
205#endif
206
207static inline int ldub_p(void *ptr)
208{
209 VBOX_CHECK_ADDR(ptr);
210 return remR3PhysReadU8((uintptr_t)ptr);
211}
212
213static inline int ldsb_p(void *ptr)
214{
215 VBOX_CHECK_ADDR(ptr);
216 return remR3PhysReadS8((uintptr_t)ptr);
217}
218
219static inline void stb_p(void *ptr, int v)
220{
221 VBOX_CHECK_ADDR(ptr);
222 remR3PhysWriteU8((uintptr_t)ptr, v);
223}
224
225static inline int lduw_le_p(void *ptr)
226{
227 VBOX_CHECK_ADDR(ptr);
228 return remR3PhysReadU16((uintptr_t)ptr);
229}
230
231static inline int ldsw_le_p(void *ptr)
232{
233 VBOX_CHECK_ADDR(ptr);
234 return remR3PhysReadS16((uintptr_t)ptr);
235}
236
237static inline void stw_le_p(void *ptr, int v)
238{
239 VBOX_CHECK_ADDR(ptr);
240 remR3PhysWriteU16((uintptr_t)ptr, v);
241}
242
243static inline int ldl_le_p(void *ptr)
244{
245 VBOX_CHECK_ADDR(ptr);
246 return remR3PhysReadU32((uintptr_t)ptr);
247}
248
249static inline void stl_le_p(void *ptr, int v)
250{
251 VBOX_CHECK_ADDR(ptr);
252 remR3PhysWriteU32((uintptr_t)ptr, v);
253}
254
255static inline void stq_le_p(void *ptr, uint64_t v)
256{
257 VBOX_CHECK_ADDR(ptr);
258 remR3PhysWriteU64((uintptr_t)ptr, v);
259}
260
261static inline uint64_t ldq_le_p(void *ptr)
262{
263 VBOX_CHECK_ADDR(ptr);
264 return remR3PhysReadU64((uintptr_t)ptr);
265}
266
267#undef VBOX_CHECK_ADDR
268
269/* float access */
270
271static inline float32 ldfl_le_p(void *ptr)
272{
273 union {
274 float32 f;
275 uint32_t i;
276 } u;
277 u.i = ldl_le_p(ptr);
278 return u.f;
279}
280
281static inline void stfl_le_p(void *ptr, float32 v)
282{
283 union {
284 float32 f;
285 uint32_t i;
286 } u;
287 u.f = v;
288 stl_le_p(ptr, u.i);
289}
290
291static inline float64 ldfq_le_p(void *ptr)
292{
293 CPU_DoubleU u;
294 u.l.lower = ldl_le_p(ptr);
295 u.l.upper = ldl_le_p(ptr + 4);
296 return u.d;
297}
298
299static inline void stfq_le_p(void *ptr, float64 v)
300{
301 CPU_DoubleU u;
302 u.d = v;
303 stl_le_p(ptr, u.l.lower);
304 stl_le_p(ptr + 4, u.l.upper);
305}
306
307#else /* !VBOX */
308
309static inline int ldub_p(void *ptr)
310{
311 return *(uint8_t *)ptr;
312}
313
314static inline int ldsb_p(void *ptr)
315{
316 return *(int8_t *)ptr;
317}
318
319static inline void stb_p(void *ptr, int v)
320{
321 *(uint8_t *)ptr = v;
322}
323
324/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
325 kernel handles unaligned load/stores may give better results, but
326 it is a system wide setting : bad */
327#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
328
329/* conservative code for little endian unaligned accesses */
330static inline int lduw_le_p(void *ptr)
331{
332#ifdef __powerpc__
333 int val;
334 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
335 return val;
336#else
337 uint8_t *p = ptr;
338 return p[0] | (p[1] << 8);
339#endif
340}
341
342static inline int ldsw_le_p(void *ptr)
343{
344#ifdef __powerpc__
345 int val;
346 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
347 return (int16_t)val;
348#else
349 uint8_t *p = ptr;
350 return (int16_t)(p[0] | (p[1] << 8));
351#endif
352}
353
354static inline int ldl_le_p(void *ptr)
355{
356#ifdef __powerpc__
357 int val;
358 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
359 return val;
360#else
361 uint8_t *p = ptr;
362 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
363#endif
364}
365
366static inline uint64_t ldq_le_p(void *ptr)
367{
368 uint8_t *p = ptr;
369 uint32_t v1, v2;
370 v1 = ldl_le_p(p);
371 v2 = ldl_le_p(p + 4);
372 return v1 | ((uint64_t)v2 << 32);
373}
374
375static inline void stw_le_p(void *ptr, int v)
376{
377#ifdef __powerpc__
378 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
379#else
380 uint8_t *p = ptr;
381 p[0] = v;
382 p[1] = v >> 8;
383#endif
384}
385
386static inline void stl_le_p(void *ptr, int v)
387{
388#ifdef __powerpc__
389 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
390#else
391 uint8_t *p = ptr;
392 p[0] = v;
393 p[1] = v >> 8;
394 p[2] = v >> 16;
395 p[3] = v >> 24;
396#endif
397}
398
399static inline void stq_le_p(void *ptr, uint64_t v)
400{
401 uint8_t *p = ptr;
402 stl_le_p(p, (uint32_t)v);
403 stl_le_p(p + 4, v >> 32);
404}
405
406/* float access */
407
408static inline float32 ldfl_le_p(void *ptr)
409{
410 union {
411 float32 f;
412 uint32_t i;
413 } u;
414 u.i = ldl_le_p(ptr);
415 return u.f;
416}
417
418static inline void stfl_le_p(void *ptr, float32 v)
419{
420 union {
421 float32 f;
422 uint32_t i;
423 } u;
424 u.f = v;
425 stl_le_p(ptr, u.i);
426}
427
428static inline float64 ldfq_le_p(void *ptr)
429{
430 CPU_DoubleU u;
431 u.l.lower = ldl_le_p(ptr);
432 u.l.upper = ldl_le_p(ptr + 4);
433 return u.d;
434}
435
436static inline void stfq_le_p(void *ptr, float64 v)
437{
438 CPU_DoubleU u;
439 u.d = v;
440 stl_le_p(ptr, u.l.lower);
441 stl_le_p(ptr + 4, u.l.upper);
442}
443
444#else
445
446static inline int lduw_le_p(void *ptr)
447{
448 return *(uint16_t *)ptr;
449}
450
451static inline int ldsw_le_p(void *ptr)
452{
453 return *(int16_t *)ptr;
454}
455
456static inline int ldl_le_p(void *ptr)
457{
458 return *(uint32_t *)ptr;
459}
460
461static inline uint64_t ldq_le_p(void *ptr)
462{
463 return *(uint64_t *)ptr;
464}
465
466static inline void stw_le_p(void *ptr, int v)
467{
468 *(uint16_t *)ptr = v;
469}
470
471static inline void stl_le_p(void *ptr, int v)
472{
473 *(uint32_t *)ptr = v;
474}
475
476static inline void stq_le_p(void *ptr, uint64_t v)
477{
478 *(uint64_t *)ptr = v;
479}
480
481/* float access */
482
483static inline float32 ldfl_le_p(void *ptr)
484{
485 return *(float32 *)ptr;
486}
487
488static inline float64 ldfq_le_p(void *ptr)
489{
490 return *(float64 *)ptr;
491}
492
493static inline void stfl_le_p(void *ptr, float32 v)
494{
495 *(float32 *)ptr = v;
496}
497
498static inline void stfq_le_p(void *ptr, float64 v)
499{
500 *(float64 *)ptr = v;
501}
502#endif
503#endif /* !VBOX */
504
505#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
506
507static inline int lduw_be_p(void *ptr)
508{
509#if defined(__i386__)
510 int val;
511 asm volatile ("movzwl %1, %0\n"
512 "xchgb %b0, %h0\n"
513 : "=q" (val)
514 : "m" (*(uint16_t *)ptr));
515 return val;
516#else
517 uint8_t *b = (uint8_t *) ptr;
518 return ((b[0] << 8) | b[1]);
519#endif
520}
521
522static inline int ldsw_be_p(void *ptr)
523{
524#if defined(__i386__)
525 int val;
526 asm volatile ("movzwl %1, %0\n"
527 "xchgb %b0, %h0\n"
528 : "=q" (val)
529 : "m" (*(uint16_t *)ptr));
530 return (int16_t)val;
531#else
532 uint8_t *b = (uint8_t *) ptr;
533 return (int16_t)((b[0] << 8) | b[1]);
534#endif
535}
536
537static inline int ldl_be_p(void *ptr)
538{
539#if defined(__i386__) || defined(__x86_64__)
540 int val;
541 asm volatile ("movl %1, %0\n"
542 "bswap %0\n"
543 : "=r" (val)
544 : "m" (*(uint32_t *)ptr));
545 return val;
546#else
547 uint8_t *b = (uint8_t *) ptr;
548 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
549#endif
550}
551
552static inline uint64_t ldq_be_p(void *ptr)
553{
554 uint32_t a,b;
555 a = ldl_be_p(ptr);
556 b = ldl_be_p(ptr+4);
557 return (((uint64_t)a<<32)|b);
558}
559
560static inline void stw_be_p(void *ptr, int v)
561{
562#if defined(__i386__)
563 asm volatile ("xchgb %b0, %h0\n"
564 "movw %w0, %1\n"
565 : "=q" (v)
566 : "m" (*(uint16_t *)ptr), "0" (v));
567#else
568 uint8_t *d = (uint8_t *) ptr;
569 d[0] = v >> 8;
570 d[1] = v;
571#endif
572}
573
574static inline void stl_be_p(void *ptr, int v)
575{
576#if defined(__i386__) || defined(__x86_64__)
577 asm volatile ("bswap %0\n"
578 "movl %0, %1\n"
579 : "=r" (v)
580 : "m" (*(uint32_t *)ptr), "0" (v));
581#else
582 uint8_t *d = (uint8_t *) ptr;
583 d[0] = v >> 24;
584 d[1] = v >> 16;
585 d[2] = v >> 8;
586 d[3] = v;
587#endif
588}
589
590static inline void stq_be_p(void *ptr, uint64_t v)
591{
592 stl_be_p(ptr, v >> 32);
593 stl_be_p(ptr + 4, v);
594}
595
596/* float access */
597
598static inline float32 ldfl_be_p(void *ptr)
599{
600 union {
601 float32 f;
602 uint32_t i;
603 } u;
604 u.i = ldl_be_p(ptr);
605 return u.f;
606}
607
608static inline void stfl_be_p(void *ptr, float32 v)
609{
610 union {
611 float32 f;
612 uint32_t i;
613 } u;
614 u.f = v;
615 stl_be_p(ptr, u.i);
616}
617
618static inline float64 ldfq_be_p(void *ptr)
619{
620 CPU_DoubleU u;
621 u.l.upper = ldl_be_p(ptr);
622 u.l.lower = ldl_be_p(ptr + 4);
623 return u.d;
624}
625
626static inline void stfq_be_p(void *ptr, float64 v)
627{
628 CPU_DoubleU u;
629 u.d = v;
630 stl_be_p(ptr, u.l.upper);
631 stl_be_p(ptr + 4, u.l.lower);
632}
633
634#else
635
636static inline int lduw_be_p(void *ptr)
637{
638 return *(uint16_t *)ptr;
639}
640
641static inline int ldsw_be_p(void *ptr)
642{
643 return *(int16_t *)ptr;
644}
645
646static inline int ldl_be_p(void *ptr)
647{
648 return *(uint32_t *)ptr;
649}
650
651static inline uint64_t ldq_be_p(void *ptr)
652{
653 return *(uint64_t *)ptr;
654}
655
656static inline void stw_be_p(void *ptr, int v)
657{
658 *(uint16_t *)ptr = v;
659}
660
661static inline void stl_be_p(void *ptr, int v)
662{
663 *(uint32_t *)ptr = v;
664}
665
666static inline void stq_be_p(void *ptr, uint64_t v)
667{
668 *(uint64_t *)ptr = v;
669}
670
671/* float access */
672
673static inline float32 ldfl_be_p(void *ptr)
674{
675 return *(float32 *)ptr;
676}
677
678static inline float64 ldfq_be_p(void *ptr)
679{
680 return *(float64 *)ptr;
681}
682
683static inline void stfl_be_p(void *ptr, float32 v)
684{
685 *(float32 *)ptr = v;
686}
687
688static inline void stfq_be_p(void *ptr, float64 v)
689{
690 *(float64 *)ptr = v;
691}
692
693#endif
694
695/* target CPU memory access functions */
696#if defined(TARGET_WORDS_BIGENDIAN)
697#define lduw_p(p) lduw_be_p(p)
698#define ldsw_p(p) ldsw_be_p(p)
699#define ldl_p(p) ldl_be_p(p)
700#define ldq_p(p) ldq_be_p(p)
701#define ldfl_p(p) ldfl_be_p(p)
702#define ldfq_p(p) ldfq_be_p(p)
703#define stw_p(p, v) stw_be_p(p, v)
704#define stl_p(p, v) stl_be_p(p, v)
705#define stq_p(p, v) stq_be_p(p, v)
706#define stfl_p(p, v) stfl_be_p(p, v)
707#define stfq_p(p, v) stfq_be_p(p, v)
708#else
709#define lduw_p(p) lduw_le_p(p)
710#define ldsw_p(p) ldsw_le_p(p)
711#define ldl_p(p) ldl_le_p(p)
712#define ldq_p(p) ldq_le_p(p)
713#define ldfl_p(p) ldfl_le_p(p)
714#define ldfq_p(p) ldfq_le_p(p)
715#define stw_p(p, v) stw_le_p(p, v)
716#define stl_p(p, v) stl_le_p(p, v)
717#define stq_p(p, v) stq_le_p(p, v)
718#define stfl_p(p, v) stfl_le_p(p, v)
719#define stfq_p(p, v) stfq_le_p(p, v)
720#endif
721
722/* MMU memory access macros */
723
724#if defined(CONFIG_USER_ONLY)
725/* On some host systems the guest address space is reserved on the host.
726 * This allows the guest address space to be offset to a convenient location.
727 */
728//#define GUEST_BASE 0x20000000
729#define GUEST_BASE 0
730
731/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
732#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
733#define h2g(x) ((target_ulong)(x - GUEST_BASE))
734
735#define saddr(x) g2h(x)
736#define laddr(x) g2h(x)
737
738#else /* !CONFIG_USER_ONLY */
739/* NOTE: we use double casts if pointers and target_ulong have
740 different sizes */
741#define saddr(x) (uint8_t *)(long)(x)
742#define laddr(x) (uint8_t *)(long)(x)
743#endif
744
745#define ldub_raw(p) ldub_p(laddr((p)))
746#define ldsb_raw(p) ldsb_p(laddr((p)))
747#define lduw_raw(p) lduw_p(laddr((p)))
748#define ldsw_raw(p) ldsw_p(laddr((p)))
749#define ldl_raw(p) ldl_p(laddr((p)))
750#define ldq_raw(p) ldq_p(laddr((p)))
751#define ldfl_raw(p) ldfl_p(laddr((p)))
752#define ldfq_raw(p) ldfq_p(laddr((p)))
753#define stb_raw(p, v) stb_p(saddr((p)), v)
754#define stw_raw(p, v) stw_p(saddr((p)), v)
755#define stl_raw(p, v) stl_p(saddr((p)), v)
756#define stq_raw(p, v) stq_p(saddr((p)), v)
757#define stfl_raw(p, v) stfl_p(saddr((p)), v)
758#define stfq_raw(p, v) stfq_p(saddr((p)), v)
759
760
761#if defined(CONFIG_USER_ONLY)
762
763/* if user mode, no other memory access functions */
764#define ldub(p) ldub_raw(p)
765#define ldsb(p) ldsb_raw(p)
766#define lduw(p) lduw_raw(p)
767#define ldsw(p) ldsw_raw(p)
768#define ldl(p) ldl_raw(p)
769#define ldq(p) ldq_raw(p)
770#define ldfl(p) ldfl_raw(p)
771#define ldfq(p) ldfq_raw(p)
772#define stb(p, v) stb_raw(p, v)
773#define stw(p, v) stw_raw(p, v)
774#define stl(p, v) stl_raw(p, v)
775#define stq(p, v) stq_raw(p, v)
776#define stfl(p, v) stfl_raw(p, v)
777#define stfq(p, v) stfq_raw(p, v)
778
779#define ldub_code(p) ldub_raw(p)
780#define ldsb_code(p) ldsb_raw(p)
781#define lduw_code(p) lduw_raw(p)
782#define ldsw_code(p) ldsw_raw(p)
783#define ldl_code(p) ldl_raw(p)
784
785#define ldub_kernel(p) ldub_raw(p)
786#define ldsb_kernel(p) ldsb_raw(p)
787#define lduw_kernel(p) lduw_raw(p)
788#define ldsw_kernel(p) ldsw_raw(p)
789#define ldl_kernel(p) ldl_raw(p)
790#define ldfl_kernel(p) ldfl_raw(p)
791#define ldfq_kernel(p) ldfq_raw(p)
792#define stb_kernel(p, v) stb_raw(p, v)
793#define stw_kernel(p, v) stw_raw(p, v)
794#define stl_kernel(p, v) stl_raw(p, v)
795#define stq_kernel(p, v) stq_raw(p, v)
796#define stfl_kernel(p, v) stfl_raw(p, v)
797#define stfq_kernel(p, vt) stfq_raw(p, v)
798
799#endif /* defined(CONFIG_USER_ONLY) */
800
801/* page related stuff */
802
803#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
804#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
805#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
806
807/* ??? These should be the larger of unsigned long and target_ulong. */
808extern unsigned long qemu_real_host_page_size;
809extern unsigned long qemu_host_page_bits;
810extern unsigned long qemu_host_page_size;
811extern unsigned long qemu_host_page_mask;
812
813#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
814
815/* same as PROT_xxx */
816#define PAGE_READ 0x0001
817#define PAGE_WRITE 0x0002
818#define PAGE_EXEC 0x0004
819#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
820#define PAGE_VALID 0x0008
821/* original state of the write flag (used when tracking self-modifying
822 code */
823#define PAGE_WRITE_ORG 0x0010
824
825void page_dump(FILE *f);
826int page_get_flags(target_ulong address);
827void page_set_flags(target_ulong start, target_ulong end, int flags);
828void page_unprotect_range(target_ulong data, target_ulong data_size);
829
830#define SINGLE_CPU_DEFINES
831#ifdef SINGLE_CPU_DEFINES
832
833#if defined(TARGET_I386)
834
835#define CPUState CPUX86State
836#define cpu_init cpu_x86_init
837#define cpu_exec cpu_x86_exec
838#define cpu_gen_code cpu_x86_gen_code
839#define cpu_signal_handler cpu_x86_signal_handler
840
841#elif defined(TARGET_ARM)
842
843#define CPUState CPUARMState
844#define cpu_init cpu_arm_init
845#define cpu_exec cpu_arm_exec
846#define cpu_gen_code cpu_arm_gen_code
847#define cpu_signal_handler cpu_arm_signal_handler
848
849#elif defined(TARGET_SPARC)
850
851#define CPUState CPUSPARCState
852#define cpu_init cpu_sparc_init
853#define cpu_exec cpu_sparc_exec
854#define cpu_gen_code cpu_sparc_gen_code
855#define cpu_signal_handler cpu_sparc_signal_handler
856
857#elif defined(TARGET_PPC)
858
859#define CPUState CPUPPCState
860#define cpu_init cpu_ppc_init
861#define cpu_exec cpu_ppc_exec
862#define cpu_gen_code cpu_ppc_gen_code
863#define cpu_signal_handler cpu_ppc_signal_handler
864
865#elif defined(TARGET_M68K)
866#define CPUState CPUM68KState
867#define cpu_init cpu_m68k_init
868#define cpu_exec cpu_m68k_exec
869#define cpu_gen_code cpu_m68k_gen_code
870#define cpu_signal_handler cpu_m68k_signal_handler
871
872#elif defined(TARGET_MIPS)
873#define CPUState CPUMIPSState
874#define cpu_init cpu_mips_init
875#define cpu_exec cpu_mips_exec
876#define cpu_gen_code cpu_mips_gen_code
877#define cpu_signal_handler cpu_mips_signal_handler
878
879#elif defined(TARGET_SH4)
880#define CPUState CPUSH4State
881#define cpu_init cpu_sh4_init
882#define cpu_exec cpu_sh4_exec
883#define cpu_gen_code cpu_sh4_gen_code
884#define cpu_signal_handler cpu_sh4_signal_handler
885
886#else
887
888#error unsupported target CPU
889
890#endif
891
892#endif /* SINGLE_CPU_DEFINES */
893
894void cpu_dump_state(CPUState *env, FILE *f,
895 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
896 int flags);
897
898void cpu_abort(CPUState *env, const char *fmt, ...);
899extern CPUState *first_cpu;
900extern CPUState *cpu_single_env;
901extern int code_copy_enabled;
902
903#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
904#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
905#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
906#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
907#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
908#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
909#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
910
911#ifdef VBOX
912/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
913#define CPU_INTERRUPT_SINGLE_INSTR 0x0200
914/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
915#define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0400
916/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
917#define CPU_INTERRUPT_RC 0x0800
918/** Exit current TB to process an external interrupt request (also in op.c!!) */
919#define CPU_INTERRUPT_EXTERNAL_EXIT 0x1000
920/** Exit current TB to process an external interrupt request (also in op.c!!) */
921#define CPU_INTERRUPT_EXTERNAL_HARD 0x2000
922/** Exit current TB to process an external interrupt request (also in op.c!!) */
923#define CPU_INTERRUPT_EXTERNAL_TIMER 0x4000
924/** Exit current TB to process an external interrupt request (also in op.c!!) */
925#define CPU_INTERRUPT_EXTERNAL_DMA 0x8000
926#endif /* VBOX */
927void cpu_interrupt(CPUState *s, int mask);
928void cpu_reset_interrupt(CPUState *env, int mask);
929
930int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
931int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
932void cpu_single_step(CPUState *env, int enabled);
933void cpu_reset(CPUState *s);
934
935/* Return the physical page corresponding to a virtual one. Use it
936 only for debugging because no protection checks are done. Return -1
937 if no page found. */
938target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
939
940#define CPU_LOG_TB_OUT_ASM (1 << 0)
941#define CPU_LOG_TB_IN_ASM (1 << 1)
942#define CPU_LOG_TB_OP (1 << 2)
943#define CPU_LOG_TB_OP_OPT (1 << 3)
944#define CPU_LOG_INT (1 << 4)
945#define CPU_LOG_EXEC (1 << 5)
946#define CPU_LOG_PCALL (1 << 6)
947#define CPU_LOG_IOPORT (1 << 7)
948#define CPU_LOG_TB_CPU (1 << 8)
949
950/* define log items */
951typedef struct CPULogItem {
952 int mask;
953 const char *name;
954 const char *help;
955} CPULogItem;
956
957extern CPULogItem cpu_log_items[];
958
959void cpu_set_log(int log_flags);
960void cpu_set_log_filename(const char *filename);
961int cpu_str_to_log_mask(const char *str);
962
963/* IO ports API */
964
965/* NOTE: as these functions may be even used when there is an isa
966 brige on non x86 targets, we always defined them */
967#ifndef NO_CPU_IO_DEFS
968void cpu_outb(CPUState *env, int addr, int val);
969void cpu_outw(CPUState *env, int addr, int val);
970void cpu_outl(CPUState *env, int addr, int val);
971int cpu_inb(CPUState *env, int addr);
972int cpu_inw(CPUState *env, int addr);
973int cpu_inl(CPUState *env, int addr);
974#endif
975
976/* memory API */
977
978#ifndef VBOX
979extern int phys_ram_size;
980extern int phys_ram_fd;
981extern int phys_ram_size;
982#else /* VBOX */
983extern RTGCPHYS phys_ram_size;
984/** This is required for bounds checking the phys_ram_dirty accesses. */
985extern uint32_t phys_ram_dirty_size;
986#endif /* VBOX */
987#if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB))
988extern uint8_t *phys_ram_base;
989#endif
990extern uint8_t *phys_ram_dirty;
991
992/* physical memory access */
993#define TLB_INVALID_MASK (1 << 3)
994#define IO_MEM_SHIFT 4
995#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
996
997#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
998#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
999#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
1000#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
1001#if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
1002#define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */
1003#endif
1004/* acts like a ROM when read and like a device when written. As an
1005 exception, the write memory callback gets the ram offset instead of
1006 the physical address */
1007#define IO_MEM_ROMD (1)
1008
1009typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
1010typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
1011
1012void cpu_register_physical_memory(target_phys_addr_t start_addr,
1013 unsigned long size,
1014 unsigned long phys_offset);
1015uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr);
1016int cpu_register_io_memory(int io_index,
1017 CPUReadMemoryFunc **mem_read,
1018 CPUWriteMemoryFunc **mem_write,
1019 void *opaque);
1020CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
1021CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
1022
1023void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1024 int len, int is_write);
1025static inline void cpu_physical_memory_read(target_phys_addr_t addr,
1026 uint8_t *buf, int len)
1027{
1028 cpu_physical_memory_rw(addr, buf, len, 0);
1029}
1030static inline void cpu_physical_memory_write(target_phys_addr_t addr,
1031 const uint8_t *buf, int len)
1032{
1033 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
1034}
1035uint32_t ldub_phys(target_phys_addr_t addr);
1036uint32_t lduw_phys(target_phys_addr_t addr);
1037uint32_t ldl_phys(target_phys_addr_t addr);
1038uint64_t ldq_phys(target_phys_addr_t addr);
1039void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
1040void stb_phys(target_phys_addr_t addr, uint32_t val);
1041void stw_phys(target_phys_addr_t addr, uint32_t val);
1042void stl_phys(target_phys_addr_t addr, uint32_t val);
1043void stq_phys(target_phys_addr_t addr, uint64_t val);
1044
1045void cpu_physical_memory_write_rom(target_phys_addr_t addr,
1046 const uint8_t *buf, int len);
1047int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1048 uint8_t *buf, int len, int is_write);
1049
1050#define VGA_DIRTY_FLAG 0x01
1051#define CODE_DIRTY_FLAG 0x02
1052
1053/* read dirty bit (return 0 or 1) */
1054static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1055{
1056#ifdef VBOX
1057 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1058 {
1059 Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));
1060 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/
1061 return 0;
1062 }
1063#endif
1064 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1065}
1066
1067static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
1068 int dirty_flags)
1069{
1070#ifdef VBOX
1071 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1072 {
1073 Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));
1074 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/
1075 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */
1076 }
1077#endif
1078 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1079}
1080
1081static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1082{
1083#ifdef VBOX
1084 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1085 {
1086 Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));
1087 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/
1088 return;
1089 }
1090#endif
1091 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1092}
1093
1094void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1095 int dirty_flags);
1096void cpu_tlb_update_dirty(CPUState *env);
1097
1098void dump_exec_info(FILE *f,
1099 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
1100
1101/*******************************************/
1102/* host CPU ticks (if available) */
1103
1104#if defined(__powerpc__)
1105
1106static inline uint32_t get_tbl(void)
1107{
1108 uint32_t tbl;
1109 asm volatile("mftb %0" : "=r" (tbl));
1110 return tbl;
1111}
1112
1113static inline uint32_t get_tbu(void)
1114{
1115 uint32_t tbl;
1116 asm volatile("mftbu %0" : "=r" (tbl));
1117 return tbl;
1118}
1119
1120static inline int64_t cpu_get_real_ticks(void)
1121{
1122 uint32_t l, h, h1;
1123 /* NOTE: we test if wrapping has occurred */
1124 do {
1125 h = get_tbu();
1126 l = get_tbl();
1127 h1 = get_tbu();
1128 } while (h != h1);
1129 return ((int64_t)h << 32) | l;
1130}
1131
1132#elif defined(__i386__)
1133
1134static inline int64_t cpu_get_real_ticks(void)
1135{
1136 int64_t val;
1137 asm volatile ("rdtsc" : "=A" (val));
1138 return val;
1139}
1140
1141#elif defined(__x86_64__)
1142
1143static inline int64_t cpu_get_real_ticks(void)
1144{
1145 uint32_t low,high;
1146 int64_t val;
1147 asm volatile("rdtsc" : "=a" (low), "=d" (high));
1148 val = high;
1149 val <<= 32;
1150 val |= low;
1151 return val;
1152}
1153
1154#elif defined(__ia64)
1155
1156static inline int64_t cpu_get_real_ticks(void)
1157{
1158 int64_t val;
1159 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
1160 return val;
1161}
1162
1163#elif defined(__s390__)
1164
1165static inline int64_t cpu_get_real_ticks(void)
1166{
1167 int64_t val;
1168 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
1169 return val;
1170}
1171
1172#elif defined(__sparc_v9__)
1173
1174static inline int64_t cpu_get_real_ticks (void)
1175{
1176#if defined(_LP64)
1177 uint64_t rval;
1178 asm volatile("rd %%tick,%0" : "=r"(rval));
1179 return rval;
1180#else
1181 union {
1182 uint64_t i64;
1183 struct {
1184 uint32_t high;
1185 uint32_t low;
1186 } i32;
1187 } rval;
1188 asm volatile("rd %%tick,%1; srlx %1,32,%0"
1189 : "=r"(rval.i32.high), "=r"(rval.i32.low));
1190 return rval.i64;
1191#endif
1192}
1193#else
1194/* The host CPU doesn't have an easily accessible cycle counter.
1195 Just return a monotonically increasing vlue. This will be totally wrong,
1196 but hopefully better than nothing. */
1197static inline int64_t cpu_get_real_ticks (void)
1198{
1199 static int64_t ticks = 0;
1200 return ticks++;
1201}
1202#endif
1203
1204/* profiling */
1205#ifdef CONFIG_PROFILER
1206static inline int64_t profile_getclock(void)
1207{
1208 return cpu_get_real_ticks();
1209}
1210
1211extern int64_t kqemu_time, kqemu_time_start;
1212extern int64_t qemu_time, qemu_time_start;
1213extern int64_t tlb_flush_time;
1214extern int64_t kqemu_exec_count;
1215extern int64_t dev_time;
1216extern int64_t kqemu_ret_int_count;
1217extern int64_t kqemu_ret_excp_count;
1218extern int64_t kqemu_ret_intr_count;
1219
1220#endif
1221
1222#ifdef VBOX
1223void tb_invalidate_virt(CPUState *env, uint32_t eip);
1224#endif /* VBOX */
1225
1226#endif /* CPU_ALL_H */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette