VirtualBox

source: vbox/trunk/src/recompiler_new/cpu-all.h@ 14277

Last change on this file since 14277 was 14277, checked in by vboxsync, 16 years ago

Implemented support for virtual addresses in TLB, improves performance greatly,
but not fully functional as breaks some sync checks, so disabled.
To enable - comment out $(REM_MOD)_DEFS += REM_PHYS_ADDR_IN_TLB
in Makefile.kmk

  • Property svn:eol-style set to native
File size: 36.4 KB
Line 
1/*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#ifndef CPU_ALL_H
30#define CPU_ALL_H
31
32#ifdef VBOX
33# ifndef LOG_GROUP
34# define LOG_GROUP LOG_GROUP_REM
35# endif
36# include <VBox/log.h>
37# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
38#endif
39
40#if defined(__arm__) || defined(__sparc__)
41#define WORDS_ALIGNED
42#endif
43
44/* some important defines:
45 *
46 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
47 * memory accesses.
48 *
49 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
50 * otherwise little endian.
51 *
52 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
53 *
54 * TARGET_WORDS_BIGENDIAN : same for target cpu
55 */
56
57#include "bswap.h"
58
59#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
60#define BSWAP_NEEDED
61#endif
62
63#ifdef BSWAP_NEEDED
64
65static inline uint16_t tswap16(uint16_t s)
66{
67 return bswap16(s);
68}
69
70static inline uint32_t tswap32(uint32_t s)
71{
72 return bswap32(s);
73}
74
75static inline uint64_t tswap64(uint64_t s)
76{
77 return bswap64(s);
78}
79
80static inline void tswap16s(uint16_t *s)
81{
82 *s = bswap16(*s);
83}
84
85static inline void tswap32s(uint32_t *s)
86{
87 *s = bswap32(*s);
88}
89
90static inline void tswap64s(uint64_t *s)
91{
92 *s = bswap64(*s);
93}
94
95#else
96
97#ifndef VBOX
98static inline uint16_t tswap16(uint16_t s)
99#else
100DECLINLINE(uint16_t) tswap16(uint16_t s)
101#endif
102{
103 return s;
104}
105
106#ifndef VBOX
107static inline uint32_t tswap32(uint32_t s)
108#else
109DECLINLINE(uint32_t) tswap32(uint32_t s)
110#endif
111{
112 return s;
113}
114
115#ifndef VBOX
116static inline uint64_t tswap64(uint64_t s)
117#else
118DECLINLINE(uint64_t) tswap64(uint64_t s)
119#endif
120{
121 return s;
122}
123
124#ifndef VBOX
125static inline void tswap16s(uint16_t *s)
126#else
127DECLINLINE(void) tswap16s(uint16_t *s)
128#endif
129{
130}
131
132#ifndef VBOX
133static inline void tswap32s(uint32_t *s)
134#else
135DECLINLINE(void) tswap32s(uint32_t *s)
136#endif
137{
138}
139
140#ifndef VBOX
141static inline void tswap64s(uint64_t *s)
142#else
143DECLINLINE(void) tswap64s(uint64_t *s)
144#endif
145{
146}
147
148#endif
149
150#if TARGET_LONG_SIZE == 4
151#define tswapl(s) tswap32(s)
152#define tswapls(s) tswap32s((uint32_t *)(s))
153#define bswaptls(s) bswap32s(s)
154#else
155#define tswapl(s) tswap64(s)
156#define tswapls(s) tswap64s((uint64_t *)(s))
157#define bswaptls(s) bswap64s(s)
158#endif
159
160typedef union {
161 float32 f;
162 uint32_t l;
163} CPU_FloatU;
164
165/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
166 endian ! */
167typedef union {
168 float64 d;
169#if defined(WORDS_BIGENDIAN) \
170 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
171 struct {
172 uint32_t upper;
173 uint32_t lower;
174 } l;
175#else
176 struct {
177 uint32_t lower;
178 uint32_t upper;
179 } l;
180#endif
181 uint64_t ll;
182} CPU_DoubleU;
183
184#ifdef TARGET_SPARC
185typedef union {
186 float128 q;
187#if defined(WORDS_BIGENDIAN) \
188 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
189 struct {
190 uint32_t upmost;
191 uint32_t upper;
192 uint32_t lower;
193 uint32_t lowest;
194 } l;
195 struct {
196 uint64_t upper;
197 uint64_t lower;
198 } ll;
199#else
200 struct {
201 uint32_t lowest;
202 uint32_t lower;
203 uint32_t upper;
204 uint32_t upmost;
205 } l;
206 struct {
207 uint64_t lower;
208 uint64_t upper;
209 } ll;
210#endif
211} CPU_QuadU;
212#endif
213
214/* CPU memory access without any memory or io remapping */
215
216/*
217 * the generic syntax for the memory accesses is:
218 *
219 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
220 *
221 * store: st{type}{size}{endian}_{access_type}(ptr, val)
222 *
223 * type is:
224 * (empty): integer access
225 * f : float access
226 *
227 * sign is:
228 * (empty): for floats or 32 bit size
229 * u : unsigned
230 * s : signed
231 *
232 * size is:
233 * b: 8 bits
234 * w: 16 bits
235 * l: 32 bits
236 * q: 64 bits
237 *
238 * endian is:
239 * (empty): target cpu endianness or 8 bit access
240 * r : reversed target cpu endianness (not implemented yet)
241 * be : big endian (not implemented yet)
242 * le : little endian (not implemented yet)
243 *
244 * access_type is:
245 * raw : host memory access
246 * user : user mode access using soft MMU
247 * kernel : kernel mode access using soft MMU
248 */
249
250#ifdef VBOX
251#ifndef VBOX_WITH_NEW_PHYS_CODE
252void remR3GrowDynRange(unsigned long physaddr);
253#endif
254
255void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb);
256uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys);
257int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys);
258uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys);
259int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys);
260uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys);
261int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys);
262uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys);
263int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys);
264void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb);
265void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val);
266void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val);
267void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val);
268void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val);
269
270#ifndef REM_PHYS_ADDR_IN_TLB
271target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr);
272void* remR3GCPhys2HCVirt(CPUState *env1, target_ulong physAddr);
273#endif
274
275#endif /* VBOX */
276
277#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
278
279DECLINLINE(int) ldub_p(void *ptr)
280{
281 VBOX_CHECK_ADDR(ptr);
282 return remR3PhysReadU8((uintptr_t)ptr);
283}
284
285DECLINLINE(int) ldsb_p(void *ptr)
286{
287 VBOX_CHECK_ADDR(ptr);
288 return remR3PhysReadS8((uintptr_t)ptr);
289}
290
291DECLINLINE(void) stb_p(void *ptr, int v)
292{
293 VBOX_CHECK_ADDR(ptr);
294 remR3PhysWriteU8((uintptr_t)ptr, v);
295}
296
297DECLINLINE(int) lduw_le_p(void *ptr)
298{
299 VBOX_CHECK_ADDR(ptr);
300 return remR3PhysReadU16((uintptr_t)ptr);
301}
302
303DECLINLINE(int) ldsw_le_p(void *ptr)
304{
305 VBOX_CHECK_ADDR(ptr);
306 return remR3PhysReadS16((uintptr_t)ptr);
307}
308
309DECLINLINE(void) stw_le_p(void *ptr, int v)
310{
311 VBOX_CHECK_ADDR(ptr);
312 remR3PhysWriteU16((uintptr_t)ptr, v);
313}
314
315DECLINLINE(int) ldl_le_p(void *ptr)
316{
317 VBOX_CHECK_ADDR(ptr);
318 return remR3PhysReadU32((uintptr_t)ptr);
319}
320
321DECLINLINE(void) stl_le_p(void *ptr, int v)
322{
323 VBOX_CHECK_ADDR(ptr);
324 remR3PhysWriteU32((uintptr_t)ptr, v);
325}
326
327DECLINLINE(void) stq_le_p(void *ptr, uint64_t v)
328{
329 VBOX_CHECK_ADDR(ptr);
330 remR3PhysWriteU64((uintptr_t)ptr, v);
331}
332
333DECLINLINE(uint64_t) ldq_le_p(void *ptr)
334{
335 VBOX_CHECK_ADDR(ptr);
336 return remR3PhysReadU64((uintptr_t)ptr);
337}
338
339#undef VBOX_CHECK_ADDR
340
341/* float access */
342
343DECLINLINE(float32) ldfl_le_p(void *ptr)
344{
345 union {
346 float32 f;
347 uint32_t i;
348 } u;
349 u.i = ldl_le_p(ptr);
350 return u.f;
351}
352
353DECLINLINE(void) stfl_le_p(void *ptr, float32 v)
354{
355 union {
356 float32 f;
357 uint32_t i;
358 } u;
359 u.f = v;
360 stl_le_p(ptr, u.i);
361}
362
363DECLINLINE(float64) ldfq_le_p(void *ptr)
364{
365 CPU_DoubleU u;
366 u.l.lower = ldl_le_p(ptr);
367 u.l.upper = ldl_le_p((uint8_t*)ptr + 4);
368 return u.d;
369}
370
371DECLINLINE(void) stfq_le_p(void *ptr, float64 v)
372{
373 CPU_DoubleU u;
374 u.d = v;
375 stl_le_p(ptr, u.l.lower);
376 stl_le_p((uint8_t*)ptr + 4, u.l.upper);
377}
378
379#else /* !(VBOX && REM_PHYS_ADDR_IN_TLB) */
380
381static inline int ldub_p(void *ptr)
382{
383 return *(uint8_t *)ptr;
384}
385
386static inline int ldsb_p(void *ptr)
387{
388 return *(int8_t *)ptr;
389}
390
391static inline void stb_p(void *ptr, int v)
392{
393 *(uint8_t *)ptr = v;
394}
395
396/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
397 kernel handles unaligned load/stores may give better results, but
398 it is a system wide setting : bad */
399#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
400
401/* conservative code for little endian unaligned accesses */
402static inline int lduw_le_p(void *ptr)
403{
404#ifdef __powerpc__
405 int val;
406 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
407 return val;
408#else
409 uint8_t *p = ptr;
410 return p[0] | (p[1] << 8);
411#endif
412}
413
414static inline int ldsw_le_p(void *ptr)
415{
416#ifdef __powerpc__
417 int val;
418 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
419 return (int16_t)val;
420#else
421 uint8_t *p = ptr;
422 return (int16_t)(p[0] | (p[1] << 8));
423#endif
424}
425
426static inline int ldl_le_p(void *ptr)
427{
428#ifdef __powerpc__
429 int val;
430 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
431 return val;
432#else
433 uint8_t *p = ptr;
434 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
435#endif
436}
437
438static inline uint64_t ldq_le_p(void *ptr)
439{
440 uint8_t *p = ptr;
441 uint32_t v1, v2;
442 v1 = ldl_le_p(p);
443 v2 = ldl_le_p(p + 4);
444 return v1 | ((uint64_t)v2 << 32);
445}
446
447static inline void stw_le_p(void *ptr, int v)
448{
449#ifdef __powerpc__
450 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
451#else
452 uint8_t *p = ptr;
453 p[0] = v;
454 p[1] = v >> 8;
455#endif
456}
457
458static inline void stl_le_p(void *ptr, int v)
459{
460#ifdef __powerpc__
461 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
462#else
463 uint8_t *p = ptr;
464 p[0] = v;
465 p[1] = v >> 8;
466 p[2] = v >> 16;
467 p[3] = v >> 24;
468#endif
469}
470
471static inline void stq_le_p(void *ptr, uint64_t v)
472{
473 uint8_t *p = ptr;
474 stl_le_p(p, (uint32_t)v);
475 stl_le_p(p + 4, v >> 32);
476}
477
478/* float access */
479
480static inline float32 ldfl_le_p(void *ptr)
481{
482 union {
483 float32 f;
484 uint32_t i;
485 } u;
486 u.i = ldl_le_p(ptr);
487 return u.f;
488}
489
490static inline void stfl_le_p(void *ptr, float32 v)
491{
492 union {
493 float32 f;
494 uint32_t i;
495 } u;
496 u.f = v;
497 stl_le_p(ptr, u.i);
498}
499
500static inline float64 ldfq_le_p(void *ptr)
501{
502 CPU_DoubleU u;
503 u.l.lower = ldl_le_p(ptr);
504 u.l.upper = ldl_le_p(ptr + 4);
505 return u.d;
506}
507
508static inline void stfq_le_p(void *ptr, float64 v)
509{
510 CPU_DoubleU u;
511 u.d = v;
512 stl_le_p(ptr, u.l.lower);
513 stl_le_p(ptr + 4, u.l.upper);
514}
515
516#else
517
518static inline int lduw_le_p(void *ptr)
519{
520 return *(uint16_t *)ptr;
521}
522
523static inline int ldsw_le_p(void *ptr)
524{
525 return *(int16_t *)ptr;
526}
527
528static inline int ldl_le_p(void *ptr)
529{
530 return *(uint32_t *)ptr;
531}
532
533static inline uint64_t ldq_le_p(void *ptr)
534{
535 return *(uint64_t *)ptr;
536}
537
538static inline void stw_le_p(void *ptr, int v)
539{
540 *(uint16_t *)ptr = v;
541}
542
543static inline void stl_le_p(void *ptr, int v)
544{
545 *(uint32_t *)ptr = v;
546}
547
548static inline void stq_le_p(void *ptr, uint64_t v)
549{
550 *(uint64_t *)ptr = v;
551}
552
553/* float access */
554
555static inline float32 ldfl_le_p(void *ptr)
556{
557 return *(float32 *)ptr;
558}
559
560static inline float64 ldfq_le_p(void *ptr)
561{
562 return *(float64 *)ptr;
563}
564
565static inline void stfl_le_p(void *ptr, float32 v)
566{
567 *(float32 *)ptr = v;
568}
569
570static inline void stfq_le_p(void *ptr, float64 v)
571{
572 *(float64 *)ptr = v;
573}
574#endif
575#endif /* !VBOX */
576
577#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
578
579#ifndef VBOX
580static inline int lduw_be_p(void *ptr)
581{
582#if defined(__i386__)
583 int val;
584 asm volatile ("movzwl %1, %0\n"
585 "xchgb %b0, %h0\n"
586 : "=q" (val)
587 : "m" (*(uint16_t *)ptr));
588 return val;
589#else
590 uint8_t *b = (uint8_t *) ptr;
591 return ((b[0] << 8) | b[1]);
592#endif
593}
594#else /* VBOX */
595DECLINLINE(int) lduw_be_p(void *ptr)
596{
597#if defined(__i386__) && !defined(_MSC_VER)
598 int val;
599 asm volatile ("movzwl %1, %0\n"
600 "xchgb %b0, %h0\n"
601 : "=q" (val)
602 : "m" (*(uint16_t *)ptr));
603 return val;
604#else
605 uint8_t *b = (uint8_t *) ptr;
606 return ((b[0] << 8) | b[1]);
607#endif
608}
609#endif
610
611#ifndef VBOX
612static inline int ldsw_be_p(void *ptr)
613{
614#if defined(__i386__)
615 int val;
616 asm volatile ("movzwl %1, %0\n"
617 "xchgb %b0, %h0\n"
618 : "=q" (val)
619 : "m" (*(uint16_t *)ptr));
620 return (int16_t)val;
621#else
622 uint8_t *b = (uint8_t *) ptr;
623 return (int16_t)((b[0] << 8) | b[1]);
624#endif
625}
626#else
627DECLINLINE(int) ldsw_be_p(void *ptr)
628{
629#if defined(__i386__) && !defined(_MSC_VER)
630 int val;
631 asm volatile ("movzwl %1, %0\n"
632 "xchgb %b0, %h0\n"
633 : "=q" (val)
634 : "m" (*(uint16_t *)ptr));
635 return (int16_t)val;
636#else
637 uint8_t *b = (uint8_t *) ptr;
638 return (int16_t)((b[0] << 8) | b[1]);
639#endif
640}
641#endif
642
643#ifndef VBOX
644static inline int ldl_be_p(void *ptr)
645{
646#if defined(__i386__) || defined(__x86_64__)
647 int val;
648 asm volatile ("movl %1, %0\n"
649 "bswap %0\n"
650 : "=r" (val)
651 : "m" (*(uint32_t *)ptr));
652 return val;
653#else
654 uint8_t *b = (uint8_t *) ptr;
655 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
656#endif
657}
658#else
659DECLINLINE(int) ldl_be_p(void *ptr)
660{
661#if (defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER)
662 int val;
663 asm volatile ("movl %1, %0\n"
664 "bswap %0\n"
665 : "=r" (val)
666 : "m" (*(uint32_t *)ptr));
667 return val;
668#else
669 uint8_t *b = (uint8_t *) ptr;
670 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
671#endif
672}
673#endif
674
675#ifndef VBOX
676static inline uint64_t ldq_be_p(void *ptr)
677#else
678DECLINLINE(uint64_t) ldq_be_p(void *ptr)
679#endif
680{
681 uint32_t a,b;
682 a = ldl_be_p(ptr);
683 b = ldl_be_p((uint8_t*)ptr+4);
684 return (((uint64_t)a<<32)|b);
685}
686
687#ifndef VBOX
688static inline void stw_be_p(void *ptr, int v)
689{
690#if defined(__i386__)
691 asm volatile ("xchgb %b0, %h0\n"
692 "movw %w0, %1\n"
693 : "=q" (v)
694 : "m" (*(uint16_t *)ptr), "0" (v));
695#else
696 uint8_t *d = (uint8_t *) ptr;
697 d[0] = v >> 8;
698 d[1] = v;
699#endif
700}
701#else
702DECLINLINE(void) stw_be_p(void *ptr, int v)
703{
704#if defined(__i386__) && !defined(_MSC_VER)
705 asm volatile ("xchgb %b0, %h0\n"
706 "movw %w0, %1\n"
707 : "=q" (v)
708 : "m" (*(uint16_t *)ptr), "0" (v));
709#else
710 uint8_t *d = (uint8_t *) ptr;
711 d[0] = v >> 8;
712 d[1] = v;
713#endif
714}
715
716#endif /* VBOX */
717
718#ifndef VBOX
719static inline void stl_be_p(void *ptr, int v)
720{
721#if defined(__i386__) || defined(__x86_64__)
722 asm volatile ("bswap %0\n"
723 "movl %0, %1\n"
724 : "=r" (v)
725 : "m" (*(uint32_t *)ptr), "0" (v));
726#else
727 uint8_t *d = (uint8_t *) ptr;
728 d[0] = v >> 24;
729 d[1] = v >> 16;
730 d[2] = v >> 8;
731 d[3] = v;
732#endif
733}
734#else
735DECLINLINE(void) stl_be_p(void *ptr, int v)
736{
737#if !defined(_MSC_VER) && (defined(__i386__) || defined(__x86_64__))
738 asm volatile ("bswap %0\n"
739 "movl %0, %1\n"
740 : "=r" (v)
741 : "m" (*(uint32_t *)ptr), "0" (v));
742#else
743 uint8_t *d = (uint8_t *) ptr;
744 d[0] = v >> 24;
745 d[1] = v >> 16;
746 d[2] = v >> 8;
747 d[3] = v;
748#endif
749}
750#endif /* VBOX */
751
752#ifndef VBOX
753static inline void stq_be_p(void *ptr, uint64_t v)
754#else
755DECLINLINE(void) stq_be_p(void *ptr, uint64_t v)
756#endif
757{
758 stl_be_p(ptr, v >> 32);
759 stl_be_p((uint8_t*)ptr + 4, v);
760}
761
762/* float access */
763#ifndef VBOX
764static inline float32 ldfl_be_p(void *ptr)
765#else
766DECLINLINE(float32) ldfl_be_p(void *ptr)
767#endif
768{
769 union {
770 float32 f;
771 uint32_t i;
772 } u;
773 u.i = ldl_be_p(ptr);
774 return u.f;
775}
776
777#ifndef VBOX
778static inline void stfl_be_p(void *ptr, float32 v)
779#else
780DECLINLINE(void) stfl_be_p(void *ptr, float32 v)
781#endif
782{
783 union {
784 float32 f;
785 uint32_t i;
786 } u;
787 u.f = v;
788 stl_be_p(ptr, u.i);
789}
790
791#ifndef VBOX
792static inline float64 ldfq_be_p(void *ptr)
793#else
794DECLINLINE(float64) ldfq_be_p(void *ptr)
795#endif
796{
797 CPU_DoubleU u;
798 u.l.upper = ldl_be_p(ptr);
799 u.l.lower = ldl_be_p((uint8_t*)ptr + 4);
800 return u.d;
801}
802
803#ifndef VBOX
804static inline void stfq_be_p(void *ptr, float64 v)
805#else
806DECLINLINE(void) stfq_be_p(void *ptr, float64 v)
807#endif
808{
809 CPU_DoubleU u;
810 u.d = v;
811 stl_be_p(ptr, u.l.upper);
812 stl_be_p((uint8_t*)ptr + 4, u.l.lower);
813}
814
815#else
816
817static inline int lduw_be_p(void *ptr)
818{
819 return *(uint16_t *)ptr;
820}
821
822static inline int ldsw_be_p(void *ptr)
823{
824 return *(int16_t *)ptr;
825}
826
827static inline int ldl_be_p(void *ptr)
828{
829 return *(uint32_t *)ptr;
830}
831
832static inline uint64_t ldq_be_p(void *ptr)
833{
834 return *(uint64_t *)ptr;
835}
836
837static inline void stw_be_p(void *ptr, int v)
838{
839 *(uint16_t *)ptr = v;
840}
841
842static inline void stl_be_p(void *ptr, int v)
843{
844 *(uint32_t *)ptr = v;
845}
846
847static inline void stq_be_p(void *ptr, uint64_t v)
848{
849 *(uint64_t *)ptr = v;
850}
851
852/* float access */
853
854static inline float32 ldfl_be_p(void *ptr)
855{
856 return *(float32 *)ptr;
857}
858
859static inline float64 ldfq_be_p(void *ptr)
860{
861 return *(float64 *)ptr;
862}
863
864static inline void stfl_be_p(void *ptr, float32 v)
865{
866 *(float32 *)ptr = v;
867}
868
869static inline void stfq_be_p(void *ptr, float64 v)
870{
871 *(float64 *)ptr = v;
872}
873
874#endif
875
876/* target CPU memory access functions */
877#if defined(TARGET_WORDS_BIGENDIAN)
878#define lduw_p(p) lduw_be_p(p)
879#define ldsw_p(p) ldsw_be_p(p)
880#define ldl_p(p) ldl_be_p(p)
881#define ldq_p(p) ldq_be_p(p)
882#define ldfl_p(p) ldfl_be_p(p)
883#define ldfq_p(p) ldfq_be_p(p)
884#define stw_p(p, v) stw_be_p(p, v)
885#define stl_p(p, v) stl_be_p(p, v)
886#define stq_p(p, v) stq_be_p(p, v)
887#define stfl_p(p, v) stfl_be_p(p, v)
888#define stfq_p(p, v) stfq_be_p(p, v)
889#else
890#define lduw_p(p) lduw_le_p(p)
891#define ldsw_p(p) ldsw_le_p(p)
892#define ldl_p(p) ldl_le_p(p)
893#define ldq_p(p) ldq_le_p(p)
894#define ldfl_p(p) ldfl_le_p(p)
895#define ldfq_p(p) ldfq_le_p(p)
896#define stw_p(p, v) stw_le_p(p, v)
897#define stl_p(p, v) stl_le_p(p, v)
898#define stq_p(p, v) stq_le_p(p, v)
899#define stfl_p(p, v) stfl_le_p(p, v)
900#define stfq_p(p, v) stfq_le_p(p, v)
901#endif
902
903/* MMU memory access macros */
904
905#if defined(CONFIG_USER_ONLY)
906/* On some host systems the guest address space is reserved on the host.
907 * This allows the guest address space to be offset to a convenient location.
908 */
909//#define GUEST_BASE 0x20000000
910#define GUEST_BASE 0
911
912/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
913#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
914#define h2g(x) ((target_ulong)(x - GUEST_BASE))
915#define saddr(x) g2h(x)
916#define laddr(x) g2h(x)
917
918#else /* !CONFIG_USER_ONLY */
919/* NOTE: we use double casts if pointers and target_ulong have
920 different sizes */
921#define saddr(x) (uint8_t *)(long)(x)
922#define laddr(x) (uint8_t *)(long)(x)
923#endif
924
925#define ldub_raw(p) ldub_p(laddr((p)))
926#define ldsb_raw(p) ldsb_p(laddr((p)))
927#define lduw_raw(p) lduw_p(laddr((p)))
928#define ldsw_raw(p) ldsw_p(laddr((p)))
929#define ldl_raw(p) ldl_p(laddr((p)))
930#define ldq_raw(p) ldq_p(laddr((p)))
931#define ldfl_raw(p) ldfl_p(laddr((p)))
932#define ldfq_raw(p) ldfq_p(laddr((p)))
933#define stb_raw(p, v) stb_p(saddr((p)), v)
934#define stw_raw(p, v) stw_p(saddr((p)), v)
935#define stl_raw(p, v) stl_p(saddr((p)), v)
936#define stq_raw(p, v) stq_p(saddr((p)), v)
937#define stfl_raw(p, v) stfl_p(saddr((p)), v)
938#define stfq_raw(p, v) stfq_p(saddr((p)), v)
939
940
941#if defined(CONFIG_USER_ONLY)
942
943/* if user mode, no other memory access functions */
944#define ldub(p) ldub_raw(p)
945#define ldsb(p) ldsb_raw(p)
946#define lduw(p) lduw_raw(p)
947#define ldsw(p) ldsw_raw(p)
948#define ldl(p) ldl_raw(p)
949#define ldq(p) ldq_raw(p)
950#define ldfl(p) ldfl_raw(p)
951#define ldfq(p) ldfq_raw(p)
952#define stb(p, v) stb_raw(p, v)
953#define stw(p, v) stw_raw(p, v)
954#define stl(p, v) stl_raw(p, v)
955#define stq(p, v) stq_raw(p, v)
956#define stfl(p, v) stfl_raw(p, v)
957#define stfq(p, v) stfq_raw(p, v)
958
959#define ldub_code(p) ldub_raw(p)
960#define ldsb_code(p) ldsb_raw(p)
961#define lduw_code(p) lduw_raw(p)
962#define ldsw_code(p) ldsw_raw(p)
963#define ldl_code(p) ldl_raw(p)
964
965#define ldub_kernel(p) ldub_raw(p)
966#define ldsb_kernel(p) ldsb_raw(p)
967#define lduw_kernel(p) lduw_raw(p)
968#define ldsw_kernel(p) ldsw_raw(p)
969#define ldl_kernel(p) ldl_raw(p)
970#define ldfl_kernel(p) ldfl_raw(p)
971#define ldfq_kernel(p) ldfq_raw(p)
972#define stb_kernel(p, v) stb_raw(p, v)
973#define stw_kernel(p, v) stw_raw(p, v)
974#define stl_kernel(p, v) stl_raw(p, v)
975#define stq_kernel(p, v) stq_raw(p, v)
976#define stfl_kernel(p, v) stfl_raw(p, v)
977#define stfq_kernel(p, vt) stfq_raw(p, v)
978
979#endif /* defined(CONFIG_USER_ONLY) */
980
981/* page related stuff */
982
983#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
984#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
985#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
986
987/* ??? These should be the larger of unsigned long and target_ulong. */
988extern unsigned long qemu_real_host_page_size;
989extern unsigned long qemu_host_page_bits;
990extern unsigned long qemu_host_page_size;
991extern unsigned long qemu_host_page_mask;
992
993#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
994
995/* same as PROT_xxx */
996#define PAGE_READ 0x0001
997#define PAGE_WRITE 0x0002
998#define PAGE_EXEC 0x0004
999#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
1000#define PAGE_VALID 0x0008
1001/* original state of the write flag (used when tracking self-modifying
1002 code */
1003#define PAGE_WRITE_ORG 0x0010
1004#define PAGE_RESERVED 0x0020
1005
1006void page_dump(FILE *f);
1007int page_get_flags(target_ulong address);
1008void page_set_flags(target_ulong start, target_ulong end, int flags);
1009int page_check_range(target_ulong start, target_ulong len, int flags);
1010void page_unprotect_range(target_ulong data, target_ulong data_size);
1011
1012#define SINGLE_CPU_DEFINES
1013#ifdef SINGLE_CPU_DEFINES
1014
1015#if defined(TARGET_I386)
1016
1017#define CPUState CPUX86State
1018#define cpu_init cpu_x86_init
1019#define cpu_exec cpu_x86_exec
1020#define cpu_gen_code cpu_x86_gen_code
1021#define cpu_signal_handler cpu_x86_signal_handler
1022
1023#elif defined(TARGET_ARM)
1024
1025#define CPUState CPUARMState
1026#define cpu_init cpu_arm_init
1027#define cpu_exec cpu_arm_exec
1028#define cpu_gen_code cpu_arm_gen_code
1029#define cpu_signal_handler cpu_arm_signal_handler
1030
1031#elif defined(TARGET_SPARC)
1032
1033#define CPUState CPUSPARCState
1034#define cpu_init cpu_sparc_init
1035#define cpu_exec cpu_sparc_exec
1036#define cpu_gen_code cpu_sparc_gen_code
1037#define cpu_signal_handler cpu_sparc_signal_handler
1038
1039#elif defined(TARGET_PPC)
1040
1041#define CPUState CPUPPCState
1042#define cpu_init cpu_ppc_init
1043#define cpu_exec cpu_ppc_exec
1044#define cpu_gen_code cpu_ppc_gen_code
1045#define cpu_signal_handler cpu_ppc_signal_handler
1046
1047#elif defined(TARGET_M68K)
1048#define CPUState CPUM68KState
1049#define cpu_init cpu_m68k_init
1050#define cpu_exec cpu_m68k_exec
1051#define cpu_gen_code cpu_m68k_gen_code
1052#define cpu_signal_handler cpu_m68k_signal_handler
1053
1054#elif defined(TARGET_MIPS)
1055#define CPUState CPUMIPSState
1056#define cpu_init cpu_mips_init
1057#define cpu_exec cpu_mips_exec
1058#define cpu_gen_code cpu_mips_gen_code
1059#define cpu_signal_handler cpu_mips_signal_handler
1060
1061#elif defined(TARGET_SH4)
1062#define CPUState CPUSH4State
1063#define cpu_init cpu_sh4_init
1064#define cpu_exec cpu_sh4_exec
1065#define cpu_gen_code cpu_sh4_gen_code
1066#define cpu_signal_handler cpu_sh4_signal_handler
1067
1068#else
1069
1070#error unsupported target CPU
1071
1072#endif
1073
1074#endif /* SINGLE_CPU_DEFINES */
1075
1076void cpu_dump_state(CPUState *env, FILE *f,
1077 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
1078 int flags);
1079
1080DECLNORETURN(void) cpu_abort(CPUState *env, const char *fmt, ...);
1081extern CPUState *first_cpu;
1082extern CPUState *cpu_single_env;
1083extern int64_t qemu_icount;
1084extern int use_icount;
1085
1086#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
1087#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
1088#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
1089#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
1090#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
1091#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
1092#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
1093#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
1094#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
1095#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
1096
1097#ifdef VBOX
1098/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
1099#define CPU_INTERRUPT_SINGLE_INSTR 0x0400
1100/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
1101#define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0800
1102/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
1103#define CPU_INTERRUPT_RC 0x1000
1104/** Exit current TB to process an external interrupt request (also in op.c!!) */
1105#define CPU_INTERRUPT_EXTERNAL_EXIT 0x2000
1106/** Exit current TB to process an external interrupt request (also in op.c!!) */
1107#define CPU_INTERRUPT_EXTERNAL_HARD 0x4000
1108/** Exit current TB to process an external interrupt request (also in op.c!!) */
1109#define CPU_INTERRUPT_EXTERNAL_TIMER 0x8000
1110/** Exit current TB to process an external interrupt request (also in op.c!!) */
1111#define CPU_INTERRUPT_EXTERNAL_DMA 0x10000
1112#endif /* VBOX */
1113void cpu_interrupt(CPUState *s, int mask);
1114void cpu_reset_interrupt(CPUState *env, int mask);
1115
1116int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type);
1117int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
1118void cpu_watchpoint_remove_all(CPUState *env);
1119int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
1120int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
1121void cpu_breakpoint_remove_all(CPUState *env);
1122
1123#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
1124#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
1125#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
1126
1127void cpu_single_step(CPUState *env, int enabled);
1128void cpu_reset(CPUState *s);
1129
1130/* Return the physical page corresponding to a virtual one. Use it
1131 only for debugging because no protection checks are done. Return -1
1132 if no page found. */
1133target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
1134
1135#define CPU_LOG_TB_OUT_ASM (1 << 0)
1136#define CPU_LOG_TB_IN_ASM (1 << 1)
1137#define CPU_LOG_TB_OP (1 << 2)
1138#define CPU_LOG_TB_OP_OPT (1 << 3)
1139#define CPU_LOG_INT (1 << 4)
1140#define CPU_LOG_EXEC (1 << 5)
1141#define CPU_LOG_PCALL (1 << 6)
1142#define CPU_LOG_IOPORT (1 << 7)
1143#define CPU_LOG_TB_CPU (1 << 8)
1144
1145/* define log items */
1146typedef struct CPULogItem {
1147 int mask;
1148 const char *name;
1149 const char *help;
1150} CPULogItem;
1151
1152extern CPULogItem cpu_log_items[];
1153
1154void cpu_set_log(int log_flags);
1155void cpu_set_log_filename(const char *filename);
1156int cpu_str_to_log_mask(const char *str);
1157
1158/* IO ports API */
1159
1160/* NOTE: as these functions may be even used when there is an isa
1161 brige on non x86 targets, we always defined them */
1162#ifndef NO_CPU_IO_DEFS
1163void cpu_outb(CPUState *env, int addr, int val);
1164void cpu_outw(CPUState *env, int addr, int val);
1165void cpu_outl(CPUState *env, int addr, int val);
1166int cpu_inb(CPUState *env, int addr);
1167int cpu_inw(CPUState *env, int addr);
1168int cpu_inl(CPUState *env, int addr);
1169#endif
1170
1171/* address in the RAM (different from a physical address) */
1172#ifdef USE_KQEMU
1173typedef uint32_t ram_addr_t;
1174#else
1175typedef unsigned long ram_addr_t;
1176#endif
1177
1178/* memory API */
1179
1180#ifndef VBOX
1181extern int phys_ram_size;
1182extern int phys_ram_fd;
1183extern int phys_ram_size;
1184#else /* VBOX */
1185extern RTGCPHYS phys_ram_size;
1186/** This is required for bounds checking the phys_ram_dirty accesses. */
1187extern uint32_t phys_ram_dirty_size;
1188#endif /* VBOX */
1189#if !defined(VBOX)
1190extern uint8_t *phys_ram_base;
1191#endif
1192extern uint8_t *phys_ram_dirty;
1193
1194/* physical memory access */
1195
1196/* MMIO pages are identified by a combination of an IO device index and
1197 3 flags. The ROMD code stores the page ram offset in iotlb entry,
1198 so only a limited number of ids are avaiable. */
1199
1200#define IO_MEM_SHIFT 3
1201#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
1202
1203#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
1204#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
1205#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
1206#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
1207#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
1208#define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */
1209#endif
1210
1211/* Acts like a ROM when read and like a device when written. */
1212#define IO_MEM_ROMD (1)
1213#define IO_MEM_SUBPAGE (2)
1214#define IO_MEM_SUBWIDTH (4)
1215
1216/* Flags stored in the low bits of the TLB virtual address. These are
1217 defined so that fast path ram access is all zeros. */
1218/* Zero if TLB entry is valid. */
1219#define TLB_INVALID_MASK (1 << 3)
1220/* Set if TLB entry references a clean RAM page. The iotlb entry will
1221 contain the page physical address. */
1222#define TLB_NOTDIRTY (1 << 4)
1223/* Set if TLB entry is an IO callback. */
1224#define TLB_MMIO (1 << 5)
1225
1226typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
1227typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
1228
1229void cpu_register_physical_memory(target_phys_addr_t start_addr,
1230 ram_addr_t size,
1231 ram_addr_t phys_offset);
1232uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr);
1233ram_addr_t qemu_ram_alloc(ram_addr_t);
1234void qemu_ram_free(ram_addr_t addr);
1235int cpu_register_io_memory(int io_index,
1236 CPUReadMemoryFunc **mem_read,
1237 CPUWriteMemoryFunc **mem_write,
1238 void *opaque);
1239CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
1240CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
1241
1242void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1243 int len, int is_write);
1244#ifndef VBOX
1245static inline void cpu_physical_memory_read(target_phys_addr_t addr,
1246 uint8_t *buf, int len)
1247#else
1248DECLINLINE(void) cpu_physical_memory_read(target_phys_addr_t addr,
1249 uint8_t *buf, int len)
1250#endif
1251{
1252 cpu_physical_memory_rw(addr, buf, len, 0);
1253}
1254#ifndef VBOX
1255static inline void cpu_physical_memory_write(target_phys_addr_t addr,
1256 const uint8_t *buf, int len)
1257#else
1258DECLINLINE(void) cpu_physical_memory_write(target_phys_addr_t addr,
1259 const uint8_t *buf, int len)
1260#endif
1261{
1262 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
1263}
1264uint32_t ldub_phys(target_phys_addr_t addr);
1265uint32_t lduw_phys(target_phys_addr_t addr);
1266uint32_t ldl_phys(target_phys_addr_t addr);
1267uint64_t ldq_phys(target_phys_addr_t addr);
1268void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
1269void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
1270void stb_phys(target_phys_addr_t addr, uint32_t val);
1271void stw_phys(target_phys_addr_t addr, uint32_t val);
1272void stl_phys(target_phys_addr_t addr, uint32_t val);
1273void stq_phys(target_phys_addr_t addr, uint64_t val);
1274
1275void cpu_physical_memory_write_rom(target_phys_addr_t addr,
1276 const uint8_t *buf, int len);
1277int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1278 uint8_t *buf, int len, int is_write);
1279
1280#define VGA_DIRTY_FLAG 0x01
1281#define CODE_DIRTY_FLAG 0x02
1282#define KQEMU_DIRTY_FLAG 0x04
1283#define MIGRATION_DIRTY_FLAG 0x08
1284
1285/* read dirty bit (return 0 or 1) */
1286#ifndef VBOX
1287static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1288{
1289 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1290}
1291#else
1292DECLINLINE(int) cpu_physical_memory_is_dirty(ram_addr_t addr)
1293{
1294 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1295 {
1296 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1297 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1298 return 0;
1299 }
1300 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1301}
1302#endif
1303
1304#ifndef VBOX
1305static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
1306 int dirty_flags)
1307{
1308 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1309}
1310#else
1311DECLINLINE(int) cpu_physical_memory_get_dirty(ram_addr_t addr,
1312 int dirty_flags)
1313{
1314 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1315 {
1316 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1317 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1318 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */
1319 }
1320 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1321}
1322#endif
1323
1324#ifndef VBOX
1325static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1326{
1327 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1328}
1329#else
1330DECLINLINE(void) cpu_physical_memory_set_dirty(ram_addr_t addr)
1331{
1332 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1333 {
1334 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1335 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1336 return;
1337 }
1338 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1339}
1340#endif
1341
1342void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1343 int dirty_flags);
1344void cpu_tlb_update_dirty(CPUState *env);
1345
1346int cpu_physical_memory_set_dirty_tracking(int enable);
1347
1348int cpu_physical_memory_get_dirty_tracking(void);
1349
1350void dump_exec_info(FILE *f,
1351 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
1352
1353/*******************************************/
1354/* host CPU ticks (if available) */
1355
1356#ifdef VBOX
1357
1358DECLINLINE(int64_t) cpu_get_real_ticks(void)
1359{
1360 return ASMReadTSC();
1361}
1362
1363#elif defined(__powerpc__)
1364
1365static inline uint32_t get_tbl(void)
1366{
1367 uint32_t tbl;
1368 asm volatile("mftb %0" : "=r" (tbl));
1369 return tbl;
1370}
1371
1372static inline uint32_t get_tbu(void)
1373{
1374 uint32_t tbl;
1375 asm volatile("mftbu %0" : "=r" (tbl));
1376 return tbl;
1377}
1378
1379static inline int64_t cpu_get_real_ticks(void)
1380{
1381 uint32_t l, h, h1;
1382 /* NOTE: we test if wrapping has occurred */
1383 do {
1384 h = get_tbu();
1385 l = get_tbl();
1386 h1 = get_tbu();
1387 } while (h != h1);
1388 return ((int64_t)h << 32) | l;
1389}
1390
1391#elif defined(__i386__)
1392
1393static inline int64_t cpu_get_real_ticks(void)
1394{
1395 int64_t val;
1396 asm volatile ("rdtsc" : "=A" (val));
1397 return val;
1398}
1399
1400#elif defined(__x86_64__)
1401
1402static inline int64_t cpu_get_real_ticks(void)
1403{
1404 uint32_t low,high;
1405 int64_t val;
1406 asm volatile("rdtsc" : "=a" (low), "=d" (high));
1407 val = high;
1408 val <<= 32;
1409 val |= low;
1410 return val;
1411}
1412
1413#elif defined(__ia64)
1414
1415static inline int64_t cpu_get_real_ticks(void)
1416{
1417 int64_t val;
1418 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
1419 return val;
1420}
1421
1422#elif defined(__s390__)
1423
1424static inline int64_t cpu_get_real_ticks(void)
1425{
1426 int64_t val;
1427 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
1428 return val;
1429}
1430
1431#elif defined(__sparc_v9__)
1432
1433static inline int64_t cpu_get_real_ticks (void)
1434{
1435#if defined(_LP64)
1436 uint64_t rval;
1437 asm volatile("rd %%tick,%0" : "=r"(rval));
1438 return rval;
1439#else
1440 union {
1441 uint64_t i64;
1442 struct {
1443 uint32_t high;
1444 uint32_t low;
1445 } i32;
1446 } rval;
1447 asm volatile("rd %%tick,%1; srlx %1,32,%0"
1448 : "=r"(rval.i32.high), "=r"(rval.i32.low));
1449 return rval.i64;
1450#endif
1451}
1452#else
1453/* The host CPU doesn't have an easily accessible cycle counter.
1454 Just return a monotonically increasing vlue. This will be totally wrong,
1455 but hopefully better than nothing. */
1456static inline int64_t cpu_get_real_ticks (void)
1457{
1458 static int64_t ticks = 0;
1459 return ticks++;
1460}
1461#endif
1462
1463/* profiling */
1464#ifdef CONFIG_PROFILER
1465static inline int64_t profile_getclock(void)
1466{
1467 return cpu_get_real_ticks();
1468}
1469
1470extern int64_t kqemu_time, kqemu_time_start;
1471extern int64_t qemu_time, qemu_time_start;
1472extern int64_t tlb_flush_time;
1473extern int64_t kqemu_exec_count;
1474extern int64_t dev_time;
1475extern int64_t kqemu_ret_int_count;
1476extern int64_t kqemu_ret_excp_count;
1477extern int64_t kqemu_ret_intr_count;
1478
1479#endif
1480
1481#ifdef VBOX
1482void tb_invalidate_virt(CPUState *env, uint32_t eip);
1483#endif /* VBOX */
1484
1485#endif /* CPU_ALL_H */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette