VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 13384

Last change on this file since 13384 was 13382, checked in by vboxsync, 16 years ago

more MSVC-related stuff

  • Property svn:eol-style set to native
File size: 93.0 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118ALIGNED_MEMBER(uint8_t, code_gen_prologue[1024], 32);
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140uint32_t phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186
187#define L1_SIZE (1 << L1_BITS)
188#define L2_SIZE (1 << L2_BITS)
189
190static void io_mem_init(void);
191
192unsigned long qemu_real_host_page_size;
193unsigned long qemu_host_page_bits;
194unsigned long qemu_host_page_size;
195unsigned long qemu_host_page_mask;
196
197/* XXX: for system emulation, it could just be an array */
198static PageDesc *l1_map[L1_SIZE];
199static PhysPageDesc **l1_phys_map;
200
201#if !defined(CONFIG_USER_ONLY)
202static void io_mem_init(void);
203
204/* io memory support */
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
208static int io_mem_nb;
209static int io_mem_watch;
210#endif
211
212#ifndef VBOX
213/* log support */
214static const char *logfilename = "/tmp/qemu.log";
215#endif /* !VBOX */
216FILE *logfile;
217int loglevel;
218#ifndef VBOX
219static int log_append = 0;
220#endif
221
222/* statistics */
223static int tlb_flush_count;
224static int tb_flush_count;
225#ifndef VBOX
226static int tb_phys_invalidate_count;
227#endif /* !VBOX */
228
229#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
230typedef struct subpage_t {
231 target_phys_addr_t base;
232 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
233 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
234 void *opaque[TARGET_PAGE_SIZE][2][4];
235} subpage_t;
236
237
238#ifndef VBOX
239#ifdef _WIN32
240static void map_exec(void *addr, long size)
241{
242 DWORD old_protect;
243 VirtualProtect(addr, size,
244 PAGE_EXECUTE_READWRITE, &old_protect);
245
246}
247#else
248static void map_exec(void *addr, long size)
249{
250 unsigned long start, end, page_size;
251
252 page_size = getpagesize();
253 start = (unsigned long)addr;
254 start &= ~(page_size - 1);
255
256 end = (unsigned long)addr + size;
257 end += page_size - 1;
258 end &= ~(page_size - 1);
259
260 mprotect((void *)start, end - start,
261 PROT_READ | PROT_WRITE | PROT_EXEC);
262}
263#endif
264#else // VBOX
265static void map_exec(void *addr, long size)
266{
267 RTMemProtect(addr, size,
268 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
269}
270#endif
271
272static void page_init(void)
273{
274 /* NOTE: we can always suppose that qemu_host_page_size >=
275 TARGET_PAGE_SIZE */
276#ifdef VBOX
277 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
278 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
279 qemu_real_host_page_size = PAGE_SIZE;
280#else /* !VBOX */
281#ifdef _WIN32
282 {
283 SYSTEM_INFO system_info;
284 DWORD old_protect;
285
286 GetSystemInfo(&system_info);
287 qemu_real_host_page_size = system_info.dwPageSize;
288 }
289#else
290 qemu_real_host_page_size = getpagesize();
291#endif
292#endif /* !VBOX */
293
294 if (qemu_host_page_size == 0)
295 qemu_host_page_size = qemu_real_host_page_size;
296 if (qemu_host_page_size < TARGET_PAGE_SIZE)
297 qemu_host_page_size = TARGET_PAGE_SIZE;
298 qemu_host_page_bits = 0;
299 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
300 qemu_host_page_bits++;
301 qemu_host_page_mask = ~(qemu_host_page_size - 1);
302 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
303 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
304#ifdef VBOX
305 /* We use other means to set reserved bit on our pages */
306#else
307#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
308 {
309 long long startaddr, endaddr;
310 FILE *f;
311 int n;
312
313 mmap_lock();
314 last_brk = (unsigned long)sbrk(0);
315 f = fopen("/proc/self/maps", "r");
316 if (f) {
317 do {
318 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
319 if (n == 2) {
320 startaddr = MIN(startaddr,
321 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
322 endaddr = MIN(endaddr,
323 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
324 page_set_flags(startaddr & TARGET_PAGE_MASK,
325 TARGET_PAGE_ALIGN(endaddr),
326 PAGE_RESERVED);
327 }
328 } while (!feof(f));
329 fclose(f);
330 }
331 mmap_unlock();
332 }
333#endif
334#endif
335}
336
337#ifndef VBOX
338static inline PageDesc **page_l1_map(target_ulong index)
339#else
340DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
341#endif
342{
343#if TARGET_LONG_BITS > 32
344 /* Host memory outside guest VM. For 32-bit targets we have already
345 excluded high addresses. */
346 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
347 return NULL;
348#endif
349 return &l1_map[index >> L2_BITS];
350}
351
352#ifndef VBOX
353static inline PageDesc *page_find_alloc(target_ulong index)
354#else
355DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
356#endif
357{
358 PageDesc **lp, *p;
359 lp = page_l1_map(index);
360 if (!lp)
361 return NULL;
362
363 p = *lp;
364 if (!p) {
365 /* allocate if not found */
366#if defined(CONFIG_USER_ONLY)
367 unsigned long addr;
368 size_t len = sizeof(PageDesc) * L2_SIZE;
369 /* Don't use qemu_malloc because it may recurse. */
370 p = mmap(0, len, PROT_READ | PROT_WRITE,
371 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
372 *lp = p;
373 addr = h2g(p);
374 if (addr == (target_ulong)addr) {
375 page_set_flags(addr & TARGET_PAGE_MASK,
376 TARGET_PAGE_ALIGN(addr + len),
377 PAGE_RESERVED);
378 }
379#else
380 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
381 *lp = p;
382#endif
383 }
384 return p + (index & (L2_SIZE - 1));
385}
386
387#ifndef VBOX
388static inline PageDesc *page_find(target_ulong index)
389#else
390DECLINLINE(PageDesc *) page_find(target_ulong index)
391#endif
392{
393 PageDesc **lp, *p;
394 lp = page_l1_map(index);
395 if (!lp)
396 return NULL;
397
398 p = *lp;
399 if (!p)
400 return 0;
401 return p + (index & (L2_SIZE - 1));
402}
403
404static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
405{
406 void **lp, **p;
407 PhysPageDesc *pd;
408
409 p = (void **)l1_phys_map;
410#if TARGET_PHYS_ADDR_SPACE_BITS > 32
411
412#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
413#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
414#endif
415 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
416 p = *lp;
417 if (!p) {
418 /* allocate if not found */
419 if (!alloc)
420 return NULL;
421 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
422 memset(p, 0, sizeof(void *) * L1_SIZE);
423 *lp = p;
424 }
425#endif
426 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
427 pd = *lp;
428 if (!pd) {
429 int i;
430 /* allocate if not found */
431 if (!alloc)
432 return NULL;
433 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
434 *lp = pd;
435 for (i = 0; i < L2_SIZE; i++)
436 pd[i].phys_offset = IO_MEM_UNASSIGNED;
437 }
438#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
439 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
440 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
441 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
442 return pd;
443#else
444 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
445#endif
446}
447
448#ifndef VBOX
449static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
450#else
451DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
452#endif
453{
454 return phys_page_find_alloc(index, 0);
455}
456
457#if !defined(CONFIG_USER_ONLY)
458static void tlb_protect_code(ram_addr_t ram_addr);
459static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
460 target_ulong vaddr);
461#define mmap_lock() do { } while(0)
462#define mmap_unlock() do { } while(0)
463#endif
464
465#ifdef VBOX
466/** @todo nike: isn't 32M too much ? */
467#endif
468#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
469
470#if defined(CONFIG_USER_ONLY)
471/* Currently it is not recommanded to allocate big chunks of data in
472 user mode. It will change when a dedicated libc will be used */
473#define USE_STATIC_CODE_GEN_BUFFER
474#endif
475
476/* VBox allocates codegen buffer dynamically */
477#ifndef VBOX
478#ifdef USE_STATIC_CODE_GEN_BUFFER
479static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
480#endif
481#endif
482
483static void code_gen_alloc(unsigned long tb_size)
484{
485#ifdef USE_STATIC_CODE_GEN_BUFFER
486 code_gen_buffer = static_code_gen_buffer;
487 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
488 map_exec(code_gen_buffer, code_gen_buffer_size);
489#else
490 code_gen_buffer_size = tb_size;
491 if (code_gen_buffer_size == 0) {
492#if defined(CONFIG_USER_ONLY)
493 /* in user mode, phys_ram_size is not meaningful */
494 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
495#else
496 /* XXX: needs ajustments */
497 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
498#endif
499 }
500 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
501 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
502 /* The code gen buffer location may have constraints depending on
503 the host cpu and OS */
504#ifdef VBOX
505 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
506
507 if (!code_gen_buffer) {
508 LogRel(("REM: failed allocate codegen buffer %lld\n",
509 code_gen_buffer_size));
510 return;
511 }
512#else //!VBOX
513#if defined(__linux__)
514 {
515 int flags;
516 void *start = NULL;
517
518 flags = MAP_PRIVATE | MAP_ANONYMOUS;
519#if defined(__x86_64__)
520 flags |= MAP_32BIT;
521 /* Cannot map more than that */
522 if (code_gen_buffer_size > (800 * 1024 * 1024))
523 code_gen_buffer_size = (800 * 1024 * 1024);
524#elif defined(__sparc_v9__)
525 // Map the buffer below 2G, so we can use direct calls and branches
526 flags |= MAP_FIXED;
527 start = (void *) 0x60000000UL;
528 if (code_gen_buffer_size > (512 * 1024 * 1024))
529 code_gen_buffer_size = (512 * 1024 * 1024);
530#endif
531 code_gen_buffer = mmap(start, code_gen_buffer_size,
532 PROT_WRITE | PROT_READ | PROT_EXEC,
533 flags, -1, 0);
534 if (code_gen_buffer == MAP_FAILED) {
535 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
536 exit(1);
537 }
538 }
539#elif defined(__FreeBSD__)
540 {
541 int flags;
542 void *addr = NULL;
543 flags = MAP_PRIVATE | MAP_ANONYMOUS;
544#if defined(__x86_64__)
545 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
546 * 0x40000000 is free */
547 flags |= MAP_FIXED;
548 addr = (void *)0x40000000;
549 /* Cannot map more than that */
550 if (code_gen_buffer_size > (800 * 1024 * 1024))
551 code_gen_buffer_size = (800 * 1024 * 1024);
552#endif
553 code_gen_buffer = mmap(addr, code_gen_buffer_size,
554 PROT_WRITE | PROT_READ | PROT_EXEC,
555 flags, -1, 0);
556 if (code_gen_buffer == MAP_FAILED) {
557 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
558 exit(1);
559 }
560 }
561#else
562 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
563 if (!code_gen_buffer) {
564 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
565 exit(1);
566 }
567 map_exec(code_gen_buffer, code_gen_buffer_size);
568#endif
569#endif // VBOX
570#endif /* !USE_STATIC_CODE_GEN_BUFFER */
571 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
572 code_gen_buffer_max_size = code_gen_buffer_size -
573 code_gen_max_block_size();
574 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
575 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
576}
577
578/* Must be called before using the QEMU cpus. 'tb_size' is the size
579 (in bytes) allocated to the translation buffer. Zero means default
580 size. */
581void cpu_exec_init_all(unsigned long tb_size)
582{
583 cpu_gen_init();
584 code_gen_alloc(tb_size);
585 code_gen_ptr = code_gen_buffer;
586 page_init();
587#if !defined(CONFIG_USER_ONLY)
588 io_mem_init();
589#endif
590}
591
592#ifndef VBOX
593#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
594
595#define CPU_COMMON_SAVE_VERSION 1
596
597static void cpu_common_save(QEMUFile *f, void *opaque)
598{
599 CPUState *env = opaque;
600
601 qemu_put_be32s(f, &env->halted);
602 qemu_put_be32s(f, &env->interrupt_request);
603}
604
605static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
606{
607 CPUState *env = opaque;
608
609 if (version_id != CPU_COMMON_SAVE_VERSION)
610 return -EINVAL;
611
612 qemu_get_be32s(f, &env->halted);
613 qemu_get_be32s(f, &env->interrupt_request);
614 tlb_flush(env, 1);
615
616 return 0;
617}
618#endif
619#endif //!VBOX
620
621void cpu_exec_init(CPUState *env)
622{
623 CPUState **penv;
624 int cpu_index;
625
626 env->next_cpu = NULL;
627 penv = &first_cpu;
628 cpu_index = 0;
629 while (*penv != NULL) {
630 penv = (CPUState **)&(*penv)->next_cpu;
631 cpu_index++;
632 }
633 env->cpu_index = cpu_index;
634 env->nb_watchpoints = 0;
635 *penv = env;
636#ifndef VBOX
637#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
638 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
639 cpu_common_save, cpu_common_load, env);
640 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
641 cpu_save, cpu_load, env);
642#endif
643#endif // !VBOX
644}
645
646#ifndef VBOX
647static inline void invalidate_page_bitmap(PageDesc *p)
648#else
649DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
650#endif
651{
652 if (p->code_bitmap) {
653 qemu_free(p->code_bitmap);
654 p->code_bitmap = NULL;
655 }
656 p->code_write_count = 0;
657}
658
659/* set to NULL all the 'first_tb' fields in all PageDescs */
660static void page_flush_tb(void)
661{
662 int i, j;
663 PageDesc *p;
664
665 for(i = 0; i < L1_SIZE; i++) {
666 p = l1_map[i];
667 if (p) {
668 for(j = 0; j < L2_SIZE; j++) {
669 p->first_tb = NULL;
670 invalidate_page_bitmap(p);
671 p++;
672 }
673 }
674 }
675}
676
677/* flush all the translation blocks */
678/* XXX: tb_flush is currently not thread safe */
679void tb_flush(CPUState *env1)
680{
681 CPUState *env;
682#if defined(DEBUG_FLUSH)
683 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
684 (unsigned long)(code_gen_ptr - code_gen_buffer),
685 nb_tbs, nb_tbs > 0 ?
686 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
687#endif
688 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
689 cpu_abort(env1, "Internal error: code buffer overflow\n");
690
691 nb_tbs = 0;
692
693 for(env = first_cpu; env != NULL; env = env->next_cpu) {
694 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
695 }
696
697 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
698 page_flush_tb();
699
700 code_gen_ptr = code_gen_buffer;
701 /* XXX: flush processor icache at this point if cache flush is
702 expensive */
703 tb_flush_count++;
704}
705
706#ifdef DEBUG_TB_CHECK
707static void tb_invalidate_check(target_ulong address)
708{
709 TranslationBlock *tb;
710 int i;
711 address &= TARGET_PAGE_MASK;
712 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
713 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
714 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
715 address >= tb->pc + tb->size)) {
716 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
717 address, (long)tb->pc, tb->size);
718 }
719 }
720 }
721}
722
723/* verify that all the pages have correct rights for code */
724static void tb_page_check(void)
725{
726 TranslationBlock *tb;
727 int i, flags1, flags2;
728
729 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
730 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
731 flags1 = page_get_flags(tb->pc);
732 flags2 = page_get_flags(tb->pc + tb->size - 1);
733 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
734 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
735 (long)tb->pc, tb->size, flags1, flags2);
736 }
737 }
738 }
739}
740
741static void tb_jmp_check(TranslationBlock *tb)
742{
743 TranslationBlock *tb1;
744 unsigned int n1;
745
746 /* suppress any remaining jumps to this TB */
747 tb1 = tb->jmp_first;
748 for(;;) {
749 n1 = (long)tb1 & 3;
750 tb1 = (TranslationBlock *)((long)tb1 & ~3);
751 if (n1 == 2)
752 break;
753 tb1 = tb1->jmp_next[n1];
754 }
755 /* check end of list */
756 if (tb1 != tb) {
757 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
758 }
759}
760#endif // DEBUG_TB_CHECK
761
762/* invalidate one TB */
763#ifndef VBOX
764static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
765 int next_offset)
766#else
767DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
768 int next_offset)
769#endif
770{
771 TranslationBlock *tb1;
772 for(;;) {
773 tb1 = *ptb;
774 if (tb1 == tb) {
775 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
776 break;
777 }
778 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
779 }
780}
781
782#ifndef VBOX
783static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
784#else
785DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
786#endif
787{
788 TranslationBlock *tb1;
789 unsigned int n1;
790
791 for(;;) {
792 tb1 = *ptb;
793 n1 = (long)tb1 & 3;
794 tb1 = (TranslationBlock *)((long)tb1 & ~3);
795 if (tb1 == tb) {
796 *ptb = tb1->page_next[n1];
797 break;
798 }
799 ptb = &tb1->page_next[n1];
800 }
801}
802
803#ifndef VBOX
804static inline void tb_jmp_remove(TranslationBlock *tb, int n)
805#else
806DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
807#endif
808{
809 TranslationBlock *tb1, **ptb;
810 unsigned int n1;
811
812 ptb = &tb->jmp_next[n];
813 tb1 = *ptb;
814 if (tb1) {
815 /* find tb(n) in circular list */
816 for(;;) {
817 tb1 = *ptb;
818 n1 = (long)tb1 & 3;
819 tb1 = (TranslationBlock *)((long)tb1 & ~3);
820 if (n1 == n && tb1 == tb)
821 break;
822 if (n1 == 2) {
823 ptb = &tb1->jmp_first;
824 } else {
825 ptb = &tb1->jmp_next[n1];
826 }
827 }
828 /* now we can suppress tb(n) from the list */
829 *ptb = tb->jmp_next[n];
830
831 tb->jmp_next[n] = NULL;
832 }
833}
834
835/* reset the jump entry 'n' of a TB so that it is not chained to
836 another TB */
837#ifndef VBOX
838static inline void tb_reset_jump(TranslationBlock *tb, int n)
839#else
840DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
841#endif
842{
843 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
844}
845
846void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
847{
848 CPUState *env;
849 PageDesc *p;
850 unsigned int h, n1;
851 target_phys_addr_t phys_pc;
852 TranslationBlock *tb1, *tb2;
853
854 /* remove the TB from the hash list */
855 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
856 h = tb_phys_hash_func(phys_pc);
857 tb_remove(&tb_phys_hash[h], tb,
858 offsetof(TranslationBlock, phys_hash_next));
859
860 /* remove the TB from the page list */
861 if (tb->page_addr[0] != page_addr) {
862 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
863 tb_page_remove(&p->first_tb, tb);
864 invalidate_page_bitmap(p);
865 }
866 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
867 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
868 tb_page_remove(&p->first_tb, tb);
869 invalidate_page_bitmap(p);
870 }
871
872 tb_invalidated_flag = 1;
873
874 /* remove the TB from the hash list */
875 h = tb_jmp_cache_hash_func(tb->pc);
876 for(env = first_cpu; env != NULL; env = env->next_cpu) {
877 if (env->tb_jmp_cache[h] == tb)
878 env->tb_jmp_cache[h] = NULL;
879 }
880
881 /* suppress this TB from the two jump lists */
882 tb_jmp_remove(tb, 0);
883 tb_jmp_remove(tb, 1);
884
885 /* suppress any remaining jumps to this TB */
886 tb1 = tb->jmp_first;
887 for(;;) {
888 n1 = (long)tb1 & 3;
889 if (n1 == 2)
890 break;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 tb2 = tb1->jmp_next[n1];
893 tb_reset_jump(tb1, n1);
894 tb1->jmp_next[n1] = NULL;
895 tb1 = tb2;
896 }
897 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
898
899#ifndef VBOX
900 tb_phys_invalidate_count++;
901#endif
902}
903
904
905#ifdef VBOX
906void tb_invalidate_virt(CPUState *env, uint32_t eip)
907{
908# if 1
909 tb_flush(env);
910# else
911 uint8_t *cs_base, *pc;
912 unsigned int flags, h, phys_pc;
913 TranslationBlock *tb, **ptb;
914
915 flags = env->hflags;
916 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
917 cs_base = env->segs[R_CS].base;
918 pc = cs_base + eip;
919
920 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
921 flags);
922
923 if(tb)
924 {
925# ifdef DEBUG
926 printf("invalidating TB (%08X) at %08X\n", tb, eip);
927# endif
928 tb_invalidate(tb);
929 //Note: this will leak TBs, but the whole cache will be flushed
930 // when it happens too often
931 tb->pc = 0;
932 tb->cs_base = 0;
933 tb->flags = 0;
934 }
935# endif
936}
937
938# ifdef VBOX_STRICT
939/**
940 * Gets the page offset.
941 */
942unsigned long get_phys_page_offset(target_ulong addr)
943{
944 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
945 return p ? p->phys_offset : 0;
946}
947# endif /* VBOX_STRICT */
948#endif /* VBOX */
949
950#ifndef VBOX
951static inline void set_bits(uint8_t *tab, int start, int len)
952#else
953DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
954#endif
955{
956 int end, mask, end1;
957
958 end = start + len;
959 tab += start >> 3;
960 mask = 0xff << (start & 7);
961 if ((start & ~7) == (end & ~7)) {
962 if (start < end) {
963 mask &= ~(0xff << (end & 7));
964 *tab |= mask;
965 }
966 } else {
967 *tab++ |= mask;
968 start = (start + 8) & ~7;
969 end1 = end & ~7;
970 while (start < end1) {
971 *tab++ = 0xff;
972 start += 8;
973 }
974 if (start < end) {
975 mask = ~(0xff << (end & 7));
976 *tab |= mask;
977 }
978 }
979}
980
981static void build_page_bitmap(PageDesc *p)
982{
983 int n, tb_start, tb_end;
984 TranslationBlock *tb;
985
986 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
987 if (!p->code_bitmap)
988 return;
989 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
990
991 tb = p->first_tb;
992 while (tb != NULL) {
993 n = (long)tb & 3;
994 tb = (TranslationBlock *)((long)tb & ~3);
995 /* NOTE: this is subtle as a TB may span two physical pages */
996 if (n == 0) {
997 /* NOTE: tb_end may be after the end of the page, but
998 it is not a problem */
999 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1000 tb_end = tb_start + tb->size;
1001 if (tb_end > TARGET_PAGE_SIZE)
1002 tb_end = TARGET_PAGE_SIZE;
1003 } else {
1004 tb_start = 0;
1005 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1006 }
1007 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1008 tb = tb->page_next[n];
1009 }
1010}
1011
1012TranslationBlock *tb_gen_code(CPUState *env,
1013 target_ulong pc, target_ulong cs_base,
1014 int flags, int cflags)
1015{
1016 TranslationBlock *tb;
1017 uint8_t *tc_ptr;
1018 target_ulong phys_pc, phys_page2, virt_page2;
1019 int code_gen_size;
1020
1021 phys_pc = get_phys_addr_code(env, pc);
1022 tb = tb_alloc(pc);
1023 if (!tb) {
1024 /* flush must be done */
1025 tb_flush(env);
1026 /* cannot fail at this point */
1027 tb = tb_alloc(pc);
1028 /* Don't forget to invalidate previous TB info. */
1029 tb_invalidated_flag = 1;
1030 }
1031 tc_ptr = code_gen_ptr;
1032 tb->tc_ptr = tc_ptr;
1033 tb->cs_base = cs_base;
1034 tb->flags = flags;
1035 tb->cflags = cflags;
1036 cpu_gen_code(env, tb, &code_gen_size);
1037 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1038
1039 /* check next page if needed */
1040 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1041 phys_page2 = -1;
1042 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1043 phys_page2 = get_phys_addr_code(env, virt_page2);
1044 }
1045 tb_link_phys(tb, phys_pc, phys_page2);
1046 return tb;
1047}
1048
1049/* invalidate all TBs which intersect with the target physical page
1050 starting in range [start;end[. NOTE: start and end must refer to
1051 the same physical page. 'is_cpu_write_access' should be true if called
1052 from a real cpu write access: the virtual CPU will exit the current
1053 TB if code is modified inside this TB. */
1054void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1055 int is_cpu_write_access)
1056{
1057 int n, current_tb_modified, current_tb_not_found, current_flags;
1058 CPUState *env = cpu_single_env;
1059 PageDesc *p;
1060 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1061 target_ulong tb_start, tb_end;
1062 target_ulong current_pc, current_cs_base;
1063
1064 p = page_find(start >> TARGET_PAGE_BITS);
1065 if (!p)
1066 return;
1067 if (!p->code_bitmap &&
1068 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1069 is_cpu_write_access) {
1070 /* build code bitmap */
1071 build_page_bitmap(p);
1072 }
1073
1074 /* we remove all the TBs in the range [start, end[ */
1075 /* XXX: see if in some cases it could be faster to invalidate all the code */
1076 current_tb_not_found = is_cpu_write_access;
1077 current_tb_modified = 0;
1078 current_tb = NULL; /* avoid warning */
1079 current_pc = 0; /* avoid warning */
1080 current_cs_base = 0; /* avoid warning */
1081 current_flags = 0; /* avoid warning */
1082 tb = p->first_tb;
1083 while (tb != NULL) {
1084 n = (long)tb & 3;
1085 tb = (TranslationBlock *)((long)tb & ~3);
1086 tb_next = tb->page_next[n];
1087 /* NOTE: this is subtle as a TB may span two physical pages */
1088 if (n == 0) {
1089 /* NOTE: tb_end may be after the end of the page, but
1090 it is not a problem */
1091 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1092 tb_end = tb_start + tb->size;
1093 } else {
1094 tb_start = tb->page_addr[1];
1095 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1096 }
1097 if (!(tb_end <= start || tb_start >= end)) {
1098#ifdef TARGET_HAS_PRECISE_SMC
1099 if (current_tb_not_found) {
1100 current_tb_not_found = 0;
1101 current_tb = NULL;
1102 if (env->mem_io_pc) {
1103 /* now we have a real cpu fault */
1104 current_tb = tb_find_pc(env->mem_io_pc);
1105 }
1106 }
1107 if (current_tb == tb &&
1108 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1109 /* If we are modifying the current TB, we must stop
1110 its execution. We could be more precise by checking
1111 that the modification is after the current PC, but it
1112 would require a specialized function to partially
1113 restore the CPU state */
1114
1115 current_tb_modified = 1;
1116 cpu_restore_state(current_tb, env,
1117 env->mem_io_pc, NULL);
1118#if defined(TARGET_I386)
1119 current_flags = env->hflags;
1120 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1121 current_cs_base = (target_ulong)env->segs[R_CS].base;
1122 current_pc = current_cs_base + env->eip;
1123#else
1124#error unsupported CPU
1125#endif
1126 }
1127#endif /* TARGET_HAS_PRECISE_SMC */
1128 /* we need to do that to handle the case where a signal
1129 occurs while doing tb_phys_invalidate() */
1130 saved_tb = NULL;
1131 if (env) {
1132 saved_tb = env->current_tb;
1133 env->current_tb = NULL;
1134 }
1135 tb_phys_invalidate(tb, -1);
1136 if (env) {
1137 env->current_tb = saved_tb;
1138 if (env->interrupt_request && env->current_tb)
1139 cpu_interrupt(env, env->interrupt_request);
1140 }
1141 }
1142 tb = tb_next;
1143 }
1144#if !defined(CONFIG_USER_ONLY)
1145 /* if no code remaining, no need to continue to use slow writes */
1146 if (!p->first_tb) {
1147 invalidate_page_bitmap(p);
1148 if (is_cpu_write_access) {
1149 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1150 }
1151 }
1152#endif
1153#ifdef TARGET_HAS_PRECISE_SMC
1154 if (current_tb_modified) {
1155 /* we generate a block containing just the instruction
1156 modifying the memory. It will ensure that it cannot modify
1157 itself */
1158 env->current_tb = NULL;
1159 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1160 cpu_resume_from_signal(env, NULL);
1161 }
1162#endif
1163}
1164
1165
1166/* len must be <= 8 and start must be a multiple of len */
1167#ifndef VBOX
1168static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1169#else
1170DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1171#endif
1172{
1173 PageDesc *p;
1174 int offset, b;
1175#if 0
1176 if (1) {
1177 if (loglevel) {
1178 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1179 cpu_single_env->mem_io_vaddr, len,
1180 cpu_single_env->eip,
1181 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1182 }
1183 }
1184#endif
1185 p = page_find(start >> TARGET_PAGE_BITS);
1186 if (!p)
1187 return;
1188 if (p->code_bitmap) {
1189 offset = start & ~TARGET_PAGE_MASK;
1190 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1191 if (b & ((1 << len) - 1))
1192 goto do_invalidate;
1193 } else {
1194 do_invalidate:
1195 tb_invalidate_phys_page_range(start, start + len, 1);
1196 }
1197}
1198
1199
1200#if !defined(CONFIG_SOFTMMU)
1201static void tb_invalidate_phys_page(target_phys_addr_t addr,
1202 unsigned long pc, void *puc)
1203{
1204 int n, current_flags, current_tb_modified;
1205 target_ulong current_pc, current_cs_base;
1206 PageDesc *p;
1207 TranslationBlock *tb, *current_tb;
1208#ifdef TARGET_HAS_PRECISE_SMC
1209 CPUState *env = cpu_single_env;
1210#endif
1211
1212 addr &= TARGET_PAGE_MASK;
1213 p = page_find(addr >> TARGET_PAGE_BITS);
1214 if (!p)
1215 return;
1216 tb = p->first_tb;
1217 current_tb_modified = 0;
1218 current_tb = NULL;
1219 current_pc = 0; /* avoid warning */
1220 current_cs_base = 0; /* avoid warning */
1221 current_flags = 0; /* avoid warning */
1222#ifdef TARGET_HAS_PRECISE_SMC
1223 if (tb && pc != 0) {
1224 current_tb = tb_find_pc(pc);
1225 }
1226#endif
1227 while (tb != NULL) {
1228 n = (long)tb & 3;
1229 tb = (TranslationBlock *)((long)tb & ~3);
1230#ifdef TARGET_HAS_PRECISE_SMC
1231 if (current_tb == tb &&
1232 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1233 /* If we are modifying the current TB, we must stop
1234 its execution. We could be more precise by checking
1235 that the modification is after the current PC, but it
1236 would require a specialized function to partially
1237 restore the CPU state */
1238
1239 current_tb_modified = 1;
1240 cpu_restore_state(current_tb, env, pc, puc);
1241#if defined(TARGET_I386)
1242 current_flags = env->hflags;
1243 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1244 current_cs_base = (target_ulong)env->segs[R_CS].base;
1245 current_pc = current_cs_base + env->eip;
1246#else
1247#error unsupported CPU
1248#endif
1249 }
1250#endif /* TARGET_HAS_PRECISE_SMC */
1251 tb_phys_invalidate(tb, addr);
1252 tb = tb->page_next[n];
1253 }
1254 p->first_tb = NULL;
1255#ifdef TARGET_HAS_PRECISE_SMC
1256 if (current_tb_modified) {
1257 /* we generate a block containing just the instruction
1258 modifying the memory. It will ensure that it cannot modify
1259 itself */
1260 env->current_tb = NULL;
1261 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1262 cpu_resume_from_signal(env, puc);
1263 }
1264#endif
1265}
1266#endif
1267
1268/* add the tb in the target page and protect it if necessary */
1269#ifndef VBOX
1270static inline void tb_alloc_page(TranslationBlock *tb,
1271 unsigned int n, target_ulong page_addr)
1272#else
1273DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1274 unsigned int n, target_ulong page_addr)
1275#endif
1276{
1277 PageDesc *p;
1278 TranslationBlock *last_first_tb;
1279
1280 tb->page_addr[n] = page_addr;
1281 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1282 tb->page_next[n] = p->first_tb;
1283 last_first_tb = p->first_tb;
1284 p->first_tb = (TranslationBlock *)((long)tb | n);
1285 invalidate_page_bitmap(p);
1286
1287#if defined(TARGET_HAS_SMC) || 1
1288
1289#if defined(CONFIG_USER_ONLY)
1290 if (p->flags & PAGE_WRITE) {
1291 target_ulong addr;
1292 PageDesc *p2;
1293 int prot;
1294
1295 /* force the host page as non writable (writes will have a
1296 page fault + mprotect overhead) */
1297 page_addr &= qemu_host_page_mask;
1298 prot = 0;
1299 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1300 addr += TARGET_PAGE_SIZE) {
1301
1302 p2 = page_find (addr >> TARGET_PAGE_BITS);
1303 if (!p2)
1304 continue;
1305 prot |= p2->flags;
1306 p2->flags &= ~PAGE_WRITE;
1307 page_get_flags(addr);
1308 }
1309 mprotect(g2h(page_addr), qemu_host_page_size,
1310 (prot & PAGE_BITS) & ~PAGE_WRITE);
1311#ifdef DEBUG_TB_INVALIDATE
1312 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1313 page_addr);
1314#endif
1315 }
1316#else
1317 /* if some code is already present, then the pages are already
1318 protected. So we handle the case where only the first TB is
1319 allocated in a physical page */
1320 if (!last_first_tb) {
1321 tlb_protect_code(page_addr);
1322 }
1323#endif
1324
1325#endif /* TARGET_HAS_SMC */
1326}
1327
1328/* Allocate a new translation block. Flush the translation buffer if
1329 too many translation blocks or too much generated code. */
1330TranslationBlock *tb_alloc(target_ulong pc)
1331{
1332 TranslationBlock *tb;
1333
1334 if (nb_tbs >= code_gen_max_blocks ||
1335 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1336 return NULL;
1337 tb = &tbs[nb_tbs++];
1338 tb->pc = pc;
1339 tb->cflags = 0;
1340 return tb;
1341}
1342
1343void tb_free(TranslationBlock *tb)
1344{
1345 /* In practice this is mostly used for single use temporary TB
1346 Ignore the hard cases and just back up if this TB happens to
1347 be the last one generated. */
1348 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1349 code_gen_ptr = tb->tc_ptr;
1350 nb_tbs--;
1351 }
1352}
1353
1354/* add a new TB and link it to the physical page tables. phys_page2 is
1355 (-1) to indicate that only one page contains the TB. */
1356void tb_link_phys(TranslationBlock *tb,
1357 target_ulong phys_pc, target_ulong phys_page2)
1358{
1359 unsigned int h;
1360 TranslationBlock **ptb;
1361
1362 /* Grab the mmap lock to stop another thread invalidating this TB
1363 before we are done. */
1364 mmap_lock();
1365 /* add in the physical hash table */
1366 h = tb_phys_hash_func(phys_pc);
1367 ptb = &tb_phys_hash[h];
1368 tb->phys_hash_next = *ptb;
1369 *ptb = tb;
1370
1371 /* add in the page list */
1372 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1373 if (phys_page2 != -1)
1374 tb_alloc_page(tb, 1, phys_page2);
1375 else
1376 tb->page_addr[1] = -1;
1377
1378 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1379 tb->jmp_next[0] = NULL;
1380 tb->jmp_next[1] = NULL;
1381
1382 /* init original jump addresses */
1383 if (tb->tb_next_offset[0] != 0xffff)
1384 tb_reset_jump(tb, 0);
1385 if (tb->tb_next_offset[1] != 0xffff)
1386 tb_reset_jump(tb, 1);
1387
1388#ifdef DEBUG_TB_CHECK
1389 tb_page_check();
1390#endif
1391 mmap_unlock();
1392}
1393
1394/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1395 tb[1].tc_ptr. Return NULL if not found */
1396TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1397{
1398 int m_min, m_max, m;
1399 unsigned long v;
1400 TranslationBlock *tb;
1401
1402 if (nb_tbs <= 0)
1403 return NULL;
1404 if (tc_ptr < (unsigned long)code_gen_buffer ||
1405 tc_ptr >= (unsigned long)code_gen_ptr)
1406 return NULL;
1407 /* binary search (cf Knuth) */
1408 m_min = 0;
1409 m_max = nb_tbs - 1;
1410 while (m_min <= m_max) {
1411 m = (m_min + m_max) >> 1;
1412 tb = &tbs[m];
1413 v = (unsigned long)tb->tc_ptr;
1414 if (v == tc_ptr)
1415 return tb;
1416 else if (tc_ptr < v) {
1417 m_max = m - 1;
1418 } else {
1419 m_min = m + 1;
1420 }
1421 }
1422 return &tbs[m_max];
1423}
1424
1425static void tb_reset_jump_recursive(TranslationBlock *tb);
1426
1427#ifndef VBOX
1428static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1429#else
1430DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1431#endif
1432{
1433 TranslationBlock *tb1, *tb_next, **ptb;
1434 unsigned int n1;
1435
1436 tb1 = tb->jmp_next[n];
1437 if (tb1 != NULL) {
1438 /* find head of list */
1439 for(;;) {
1440 n1 = (long)tb1 & 3;
1441 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1442 if (n1 == 2)
1443 break;
1444 tb1 = tb1->jmp_next[n1];
1445 }
1446 /* we are now sure now that tb jumps to tb1 */
1447 tb_next = tb1;
1448
1449 /* remove tb from the jmp_first list */
1450 ptb = &tb_next->jmp_first;
1451 for(;;) {
1452 tb1 = *ptb;
1453 n1 = (long)tb1 & 3;
1454 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1455 if (n1 == n && tb1 == tb)
1456 break;
1457 ptb = &tb1->jmp_next[n1];
1458 }
1459 *ptb = tb->jmp_next[n];
1460 tb->jmp_next[n] = NULL;
1461
1462 /* suppress the jump to next tb in generated code */
1463 tb_reset_jump(tb, n);
1464
1465 /* suppress jumps in the tb on which we could have jumped */
1466 tb_reset_jump_recursive(tb_next);
1467 }
1468}
1469
1470static void tb_reset_jump_recursive(TranslationBlock *tb)
1471{
1472 tb_reset_jump_recursive2(tb, 0);
1473 tb_reset_jump_recursive2(tb, 1);
1474}
1475
1476#if defined(TARGET_HAS_ICE)
1477static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1478{
1479 target_ulong addr, pd;
1480 ram_addr_t ram_addr;
1481 PhysPageDesc *p;
1482
1483 addr = cpu_get_phys_page_debug(env, pc);
1484 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1485 if (!p) {
1486 pd = IO_MEM_UNASSIGNED;
1487 } else {
1488 pd = p->phys_offset;
1489 }
1490 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1491 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1492}
1493#endif
1494
1495/* Add a watchpoint. */
1496int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1497{
1498 int i;
1499
1500 for (i = 0; i < env->nb_watchpoints; i++) {
1501 if (addr == env->watchpoint[i].vaddr)
1502 return 0;
1503 }
1504 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1505 return -1;
1506
1507 i = env->nb_watchpoints++;
1508 env->watchpoint[i].vaddr = addr;
1509 env->watchpoint[i].type = type;
1510 tlb_flush_page(env, addr);
1511 /* FIXME: This flush is needed because of the hack to make memory ops
1512 terminate the TB. It can be removed once the proper IO trap and
1513 re-execute bits are in. */
1514 tb_flush(env);
1515 return i;
1516}
1517
1518/* Remove a watchpoint. */
1519int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1520{
1521 int i;
1522
1523 for (i = 0; i < env->nb_watchpoints; i++) {
1524 if (addr == env->watchpoint[i].vaddr) {
1525 env->nb_watchpoints--;
1526 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1527 tlb_flush_page(env, addr);
1528 return 0;
1529 }
1530 }
1531 return -1;
1532}
1533
1534/* Remove all watchpoints. */
1535void cpu_watchpoint_remove_all(CPUState *env) {
1536 int i;
1537
1538 for (i = 0; i < env->nb_watchpoints; i++) {
1539 tlb_flush_page(env, env->watchpoint[i].vaddr);
1540 }
1541 env->nb_watchpoints = 0;
1542}
1543
1544/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1545 breakpoint is reached */
1546int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1547{
1548#if defined(TARGET_HAS_ICE)
1549 int i;
1550
1551 for(i = 0; i < env->nb_breakpoints; i++) {
1552 if (env->breakpoints[i] == pc)
1553 return 0;
1554 }
1555
1556 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1557 return -1;
1558 env->breakpoints[env->nb_breakpoints++] = pc;
1559
1560 breakpoint_invalidate(env, pc);
1561 return 0;
1562#else
1563 return -1;
1564#endif
1565}
1566
1567/* remove all breakpoints */
1568void cpu_breakpoint_remove_all(CPUState *env) {
1569#if defined(TARGET_HAS_ICE)
1570 int i;
1571 for(i = 0; i < env->nb_breakpoints; i++) {
1572 breakpoint_invalidate(env, env->breakpoints[i]);
1573 }
1574 env->nb_breakpoints = 0;
1575#endif
1576}
1577
1578/* remove a breakpoint */
1579int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1580{
1581#if defined(TARGET_HAS_ICE)
1582 int i;
1583 for(i = 0; i < env->nb_breakpoints; i++) {
1584 if (env->breakpoints[i] == pc)
1585 goto found;
1586 }
1587 return -1;
1588 found:
1589 env->nb_breakpoints--;
1590 if (i < env->nb_breakpoints)
1591 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1592
1593 breakpoint_invalidate(env, pc);
1594 return 0;
1595#else
1596 return -1;
1597#endif
1598}
1599
1600/* enable or disable single step mode. EXCP_DEBUG is returned by the
1601 CPU loop after each instruction */
1602void cpu_single_step(CPUState *env, int enabled)
1603{
1604#if defined(TARGET_HAS_ICE)
1605 if (env->singlestep_enabled != enabled) {
1606 env->singlestep_enabled = enabled;
1607 /* must flush all the translated code to avoid inconsistancies */
1608 /* XXX: only flush what is necessary */
1609 tb_flush(env);
1610 }
1611#endif
1612}
1613
1614#ifndef VBOX
1615/* enable or disable low levels log */
1616void cpu_set_log(int log_flags)
1617{
1618 loglevel = log_flags;
1619 if (loglevel && !logfile) {
1620 logfile = fopen(logfilename, "w");
1621 if (!logfile) {
1622 perror(logfilename);
1623 _exit(1);
1624 }
1625#if !defined(CONFIG_SOFTMMU)
1626 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1627 {
1628 static uint8_t logfile_buf[4096];
1629 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1630 }
1631#else
1632 setvbuf(logfile, NULL, _IOLBF, 0);
1633#endif
1634 }
1635}
1636
1637void cpu_set_log_filename(const char *filename)
1638{
1639 logfilename = strdup(filename);
1640}
1641#endif /* !VBOX */
1642
1643/* mask must never be zero, except for A20 change call */
1644void cpu_interrupt(CPUState *env, int mask)
1645{
1646#if !defined(USE_NPTL)
1647 TranslationBlock *tb;
1648 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1649#endif
1650 int old_mask;
1651
1652 old_mask = env->interrupt_request;
1653#ifdef VBOX
1654 VM_ASSERT_EMT(env->pVM);
1655 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1656#else /* !VBOX */
1657 /* FIXME: This is probably not threadsafe. A different thread could
1658 be in the middle of a read-modify-write operation. */
1659 env->interrupt_request |= mask;
1660#endif /* !VBOX */
1661#if defined(USE_NPTL)
1662 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1663 problem and hope the cpu will stop of its own accord. For userspace
1664 emulation this often isn't actually as bad as it sounds. Often
1665 signals are used primarily to interrupt blocking syscalls. */
1666#else
1667 if (use_icount) {
1668 env->icount_decr.u16.high = 0xffff;
1669#ifndef CONFIG_USER_ONLY
1670 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1671 an async event happened and we need to process it. */
1672 if (!can_do_io(env)
1673 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1674 cpu_abort(env, "Raised interrupt while not in I/O function");
1675 }
1676#endif
1677 } else {
1678 tb = env->current_tb;
1679 /* if the cpu is currently executing code, we must unlink it and
1680 all the potentially executing TB */
1681 if (tb && !testandset(&interrupt_lock)) {
1682 env->current_tb = NULL;
1683 tb_reset_jump_recursive(tb);
1684 resetlock(&interrupt_lock);
1685 }
1686 }
1687#endif
1688}
1689
1690void cpu_reset_interrupt(CPUState *env, int mask)
1691{
1692#ifdef VBOX
1693 /*
1694 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1695 * for future changes!
1696 */
1697 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1698#else /* !VBOX */
1699 env->interrupt_request &= ~mask;
1700#endif /* !VBOX */
1701}
1702
1703#ifndef VBOX
1704CPULogItem cpu_log_items[] = {
1705 { CPU_LOG_TB_OUT_ASM, "out_asm",
1706 "show generated host assembly code for each compiled TB" },
1707 { CPU_LOG_TB_IN_ASM, "in_asm",
1708 "show target assembly code for each compiled TB" },
1709 { CPU_LOG_TB_OP, "op",
1710 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1711#ifdef TARGET_I386
1712 { CPU_LOG_TB_OP_OPT, "op_opt",
1713 "show micro ops after optimization for each compiled TB" },
1714#endif
1715 { CPU_LOG_INT, "int",
1716 "show interrupts/exceptions in short format" },
1717 { CPU_LOG_EXEC, "exec",
1718 "show trace before each executed TB (lots of logs)" },
1719 { CPU_LOG_TB_CPU, "cpu",
1720 "show CPU state before bloc translation" },
1721#ifdef TARGET_I386
1722 { CPU_LOG_PCALL, "pcall",
1723 "show protected mode far calls/returns/exceptions" },
1724#endif
1725#ifdef DEBUG_IOPORT
1726 { CPU_LOG_IOPORT, "ioport",
1727 "show all i/o ports accesses" },
1728#endif
1729 { 0, NULL, NULL },
1730};
1731
1732static int cmp1(const char *s1, int n, const char *s2)
1733{
1734 if (strlen(s2) != n)
1735 return 0;
1736 return memcmp(s1, s2, n) == 0;
1737}
1738
1739/* takes a comma separated list of log masks. Return 0 if error. */
1740int cpu_str_to_log_mask(const char *str)
1741{
1742 CPULogItem *item;
1743 int mask;
1744 const char *p, *p1;
1745
1746 p = str;
1747 mask = 0;
1748 for(;;) {
1749 p1 = strchr(p, ',');
1750 if (!p1)
1751 p1 = p + strlen(p);
1752 if(cmp1(p,p1-p,"all")) {
1753 for(item = cpu_log_items; item->mask != 0; item++) {
1754 mask |= item->mask;
1755 }
1756 } else {
1757 for(item = cpu_log_items; item->mask != 0; item++) {
1758 if (cmp1(p, p1 - p, item->name))
1759 goto found;
1760 }
1761 return 0;
1762 }
1763 found:
1764 mask |= item->mask;
1765 if (*p1 != ',')
1766 break;
1767 p = p1 + 1;
1768 }
1769 return mask;
1770}
1771#endif /* !VBOX */
1772
1773#ifndef VBOX /* VBOX: we have our own routine. */
1774void cpu_abort(CPUState *env, const char *fmt, ...)
1775{
1776 va_list ap;
1777
1778 va_start(ap, fmt);
1779 fprintf(stderr, "qemu: fatal: ");
1780 vfprintf(stderr, fmt, ap);
1781 fprintf(stderr, "\n");
1782#ifdef TARGET_I386
1783 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1784#else
1785 cpu_dump_state(env, stderr, fprintf, 0);
1786#endif
1787 va_end(ap);
1788 abort();
1789}
1790#endif /* !VBOX */
1791
1792#ifndef VBOX
1793CPUState *cpu_copy(CPUState *env)
1794{
1795 CPUState *new_env = cpu_init(env->cpu_model_str);
1796 /* preserve chaining and index */
1797 CPUState *next_cpu = new_env->next_cpu;
1798 int cpu_index = new_env->cpu_index;
1799 memcpy(new_env, env, sizeof(CPUState));
1800 new_env->next_cpu = next_cpu;
1801 new_env->cpu_index = cpu_index;
1802 return new_env;
1803}
1804#endif
1805
1806#if !defined(CONFIG_USER_ONLY)
1807
1808#ifndef VBOX
1809static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1810#else
1811DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1812#endif
1813{
1814 unsigned int i;
1815
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1819 memset (&env->tb_jmp_cache[i], 0,
1820 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1821
1822 i = tb_jmp_cache_hash_page(addr);
1823 memset (&env->tb_jmp_cache[i], 0,
1824 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1825
1826#ifdef VBOX
1827 /* inform raw mode about TLB page flush */
1828 remR3FlushPage(env, addr);
1829#endif /* VBOX */
1830}
1831
1832/* NOTE: if flush_global is true, also flush global entries (not
1833 implemented yet) */
1834void tlb_flush(CPUState *env, int flush_global)
1835{
1836 int i;
1837
1838#if defined(DEBUG_TLB)
1839 printf("tlb_flush:\n");
1840#endif
1841 /* must reset current TB so that interrupts cannot modify the
1842 links while we are modifying them */
1843 env->current_tb = NULL;
1844
1845 for(i = 0; i < CPU_TLB_SIZE; i++) {
1846 env->tlb_table[0][i].addr_read = -1;
1847 env->tlb_table[0][i].addr_write = -1;
1848 env->tlb_table[0][i].addr_code = -1;
1849 env->tlb_table[1][i].addr_read = -1;
1850 env->tlb_table[1][i].addr_write = -1;
1851 env->tlb_table[1][i].addr_code = -1;
1852#if (NB_MMU_MODES >= 3)
1853 env->tlb_table[2][i].addr_read = -1;
1854 env->tlb_table[2][i].addr_write = -1;
1855 env->tlb_table[2][i].addr_code = -1;
1856#if (NB_MMU_MODES == 4)
1857 env->tlb_table[3][i].addr_read = -1;
1858 env->tlb_table[3][i].addr_write = -1;
1859 env->tlb_table[3][i].addr_code = -1;
1860#endif
1861#endif
1862 }
1863
1864 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1865
1866#ifdef VBOX
1867 /* inform raw mode about TLB flush */
1868 remR3FlushTLB(env, flush_global);
1869#endif
1870#ifdef USE_KQEMU
1871 if (env->kqemu_enabled) {
1872 kqemu_flush(env, flush_global);
1873 }
1874#endif
1875 tlb_flush_count++;
1876}
1877
1878#ifndef VBOX
1879static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1880#else
1881DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1882#endif
1883{
1884 if (addr == (tlb_entry->addr_read &
1885 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1886 addr == (tlb_entry->addr_write &
1887 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1888 addr == (tlb_entry->addr_code &
1889 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1890 tlb_entry->addr_read = -1;
1891 tlb_entry->addr_write = -1;
1892 tlb_entry->addr_code = -1;
1893 }
1894}
1895
1896void tlb_flush_page(CPUState *env, target_ulong addr)
1897{
1898 int i;
1899
1900#if defined(DEBUG_TLB)
1901 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1902#endif
1903 /* must reset current TB so that interrupts cannot modify the
1904 links while we are modifying them */
1905 env->current_tb = NULL;
1906
1907 addr &= TARGET_PAGE_MASK;
1908 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1909 tlb_flush_entry(&env->tlb_table[0][i], addr);
1910 tlb_flush_entry(&env->tlb_table[1][i], addr);
1911#if (NB_MMU_MODES >= 3)
1912 tlb_flush_entry(&env->tlb_table[2][i], addr);
1913#if (NB_MMU_MODES == 4)
1914 tlb_flush_entry(&env->tlb_table[3][i], addr);
1915#endif
1916#endif
1917
1918 tlb_flush_jmp_cache(env, addr);
1919
1920#ifdef USE_KQEMU
1921 if (env->kqemu_enabled) {
1922 kqemu_flush_page(env, addr);
1923 }
1924#endif
1925}
1926
1927/* update the TLBs so that writes to code in the virtual page 'addr'
1928 can be detected */
1929static void tlb_protect_code(ram_addr_t ram_addr)
1930{
1931 cpu_physical_memory_reset_dirty(ram_addr,
1932 ram_addr + TARGET_PAGE_SIZE,
1933 CODE_DIRTY_FLAG);
1934#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1935 /** @todo Retest this? This function has changed... */
1936 remR3ProtectCode(cpu_single_env, ram_addr);
1937#endif
1938}
1939
1940/* update the TLB so that writes in physical page 'phys_addr' are no longer
1941 tested for self modifying code */
1942static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1943 target_ulong vaddr)
1944{
1945#ifdef VBOX
1946 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1947#endif
1948 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1949}
1950
1951#ifndef VBOX
1952static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1953 unsigned long start, unsigned long length)
1954#else
1955DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1956 unsigned long start, unsigned long length)
1957#endif
1958{
1959 unsigned long addr;
1960 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1961 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1962 if ((addr - start) < length) {
1963 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1964 }
1965 }
1966}
1967
1968void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1969 int dirty_flags)
1970{
1971 CPUState *env;
1972 unsigned long length, start1;
1973 int i, mask, len;
1974 uint8_t *p;
1975
1976 start &= TARGET_PAGE_MASK;
1977 end = TARGET_PAGE_ALIGN(end);
1978
1979 length = end - start;
1980 if (length == 0)
1981 return;
1982 len = length >> TARGET_PAGE_BITS;
1983#ifdef USE_KQEMU
1984 /* XXX: should not depend on cpu context */
1985 env = first_cpu;
1986 if (env->kqemu_enabled) {
1987 ram_addr_t addr;
1988 addr = start;
1989 for(i = 0; i < len; i++) {
1990 kqemu_set_notdirty(env, addr);
1991 addr += TARGET_PAGE_SIZE;
1992 }
1993 }
1994#endif
1995 mask = ~dirty_flags;
1996 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1997#ifdef VBOX
1998 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1999#endif
2000 for(i = 0; i < len; i++)
2001 p[i] &= mask;
2002
2003 /* we modify the TLB cache so that the dirty bit will be set again
2004 when accessing the range */
2005#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2006 start1 = start;
2007#elif !defined(VBOX)
2008 start1 = start + (unsigned long)phys_ram_base;
2009#else
2010 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
2011#endif
2012 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2013 for(i = 0; i < CPU_TLB_SIZE; i++)
2014 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2015 for(i = 0; i < CPU_TLB_SIZE; i++)
2016 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2017#if (NB_MMU_MODES >= 3)
2018 for(i = 0; i < CPU_TLB_SIZE; i++)
2019 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2020#if (NB_MMU_MODES == 4)
2021 for(i = 0; i < CPU_TLB_SIZE; i++)
2022 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2023#endif
2024#endif
2025 }
2026}
2027
2028#ifndef VBOX
2029int cpu_physical_memory_set_dirty_tracking(int enable)
2030{
2031 in_migration = enable;
2032 return 0;
2033}
2034
2035int cpu_physical_memory_get_dirty_tracking(void)
2036{
2037 return in_migration;
2038}
2039#endif
2040
2041#ifndef VBOX
2042static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2043#else
2044DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry)
2045#endif
2046{
2047 ram_addr_t ram_addr;
2048
2049 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2050 /* RAM case */
2051#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2052 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2053#elif !defined(VBOX)
2054 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2055 tlb_entry->addend - (unsigned long)phys_ram_base;
2056#else
2057 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
2058#endif
2059 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2060 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
2061 }
2062 }
2063}
2064
2065/* update the TLB according to the current state of the dirty bits */
2066void cpu_tlb_update_dirty(CPUState *env)
2067{
2068 int i;
2069 for(i = 0; i < CPU_TLB_SIZE; i++)
2070 tlb_update_dirty(&env->tlb_table[0][i]);
2071 for(i = 0; i < CPU_TLB_SIZE; i++)
2072 tlb_update_dirty(&env->tlb_table[1][i]);
2073#if (NB_MMU_MODES >= 3)
2074 for(i = 0; i < CPU_TLB_SIZE; i++)
2075 tlb_update_dirty(&env->tlb_table[2][i]);
2076#if (NB_MMU_MODES == 4)
2077 for(i = 0; i < CPU_TLB_SIZE; i++)
2078 tlb_update_dirty(&env->tlb_table[3][i]);
2079#endif
2080#endif
2081}
2082
2083#ifndef VBOX
2084static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2085#else
2086DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2087#endif
2088{
2089 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2090 tlb_entry->addr_write = vaddr;
2091}
2092
2093
2094/* update the TLB corresponding to virtual page vaddr and phys addr
2095 addr so that it is no longer dirty */
2096#ifndef VBOX
2097static inline void tlb_set_dirty(CPUState *env,
2098 unsigned long addr, target_ulong vaddr)
2099#else
2100DECLINLINE(void) tlb_set_dirty(CPUState *env,
2101 unsigned long addr, target_ulong vaddr)
2102#endif
2103{
2104 int i;
2105
2106 addr &= TARGET_PAGE_MASK;
2107 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2108 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2109 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2110#if (NB_MMU_MODES >= 3)
2111 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2112#if (NB_MMU_MODES == 4)
2113 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2114#endif
2115#endif
2116}
2117
2118/* add a new TLB entry. At most one entry for a given virtual address
2119 is permitted. Return 0 if OK or 2 if the page could not be mapped
2120 (can only happen in non SOFTMMU mode for I/O pages or pages
2121 conflicting with the host address space). */
2122int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2123 target_phys_addr_t paddr, int prot,
2124 int mmu_idx, int is_softmmu)
2125{
2126 PhysPageDesc *p;
2127 unsigned long pd;
2128 unsigned int index;
2129 target_ulong address;
2130 target_ulong code_address;
2131 target_phys_addr_t addend;
2132 int ret;
2133 CPUTLBEntry *te;
2134 int i;
2135 target_phys_addr_t iotlb;
2136
2137 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2138 if (!p) {
2139 pd = IO_MEM_UNASSIGNED;
2140 } else {
2141 pd = p->phys_offset;
2142 }
2143#if defined(DEBUG_TLB)
2144 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2145 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2146#endif
2147
2148 ret = 0;
2149 address = vaddr;
2150 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2151 /* IO memory case (romd handled later) */
2152 address |= TLB_MMIO;
2153 }
2154#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2155 addend = pd & TARGET_PAGE_MASK;
2156#elif !defined(VBOX)
2157 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2158#else
2159 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
2160#endif
2161 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2162 /* Normal RAM. */
2163 iotlb = pd & TARGET_PAGE_MASK;
2164 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2165 iotlb |= IO_MEM_NOTDIRTY;
2166 else
2167 iotlb |= IO_MEM_ROM;
2168 } else {
2169 /* IO handlers are currently passed a phsical address.
2170 It would be nice to pass an offset from the base address
2171 of that region. This would avoid having to special case RAM,
2172 and avoid full address decoding in every device.
2173 We can't use the high bits of pd for this because
2174 IO_MEM_ROMD uses these as a ram address. */
2175 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2176 }
2177
2178 code_address = address;
2179 /* Make accesses to pages with watchpoints go via the
2180 watchpoint trap routines. */
2181 for (i = 0; i < env->nb_watchpoints; i++) {
2182 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2183 iotlb = io_mem_watch + paddr;
2184 /* TODO: The memory case can be optimized by not trapping
2185 reads of pages with a write breakpoint. */
2186 address |= TLB_MMIO;
2187 }
2188 }
2189
2190 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2191 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2192 te = &env->tlb_table[mmu_idx][index];
2193 te->addend = addend - vaddr;
2194 if (prot & PAGE_READ) {
2195 te->addr_read = address;
2196 } else {
2197 te->addr_read = -1;
2198 }
2199
2200 if (prot & PAGE_EXEC) {
2201 te->addr_code = code_address;
2202 } else {
2203 te->addr_code = -1;
2204 }
2205 if (prot & PAGE_WRITE) {
2206 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2207 (pd & IO_MEM_ROMD)) {
2208 /* Write access calls the I/O callback. */
2209 te->addr_write = address | TLB_MMIO;
2210 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2211 !cpu_physical_memory_is_dirty(pd)) {
2212 te->addr_write = address | TLB_NOTDIRTY;
2213 } else {
2214 te->addr_write = address;
2215 }
2216 } else {
2217 te->addr_write = -1;
2218 }
2219#ifdef VBOX
2220 /* inform raw mode about TLB page change */
2221 remR3FlushPage(env, vaddr);
2222#endif
2223 return ret;
2224}
2225
2226/* called from signal handler: invalidate the code and unprotect the
2227 page. Return TRUE if the fault was succesfully handled. */
2228int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2229{
2230#if !defined(CONFIG_SOFTMMU)
2231 VirtPageDesc *vp;
2232
2233#if defined(DEBUG_TLB)
2234 printf("page_unprotect: addr=0x%08x\n", addr);
2235#endif
2236 addr &= TARGET_PAGE_MASK;
2237
2238 /* if it is not mapped, no need to worry here */
2239 if (addr >= MMAP_AREA_END)
2240 return 0;
2241 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2242 if (!vp)
2243 return 0;
2244 /* NOTE: in this case, validate_tag is _not_ tested as it
2245 validates only the code TLB */
2246 if (vp->valid_tag != virt_valid_tag)
2247 return 0;
2248 if (!(vp->prot & PAGE_WRITE))
2249 return 0;
2250#if defined(DEBUG_TLB)
2251 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2252 addr, vp->phys_addr, vp->prot);
2253#endif
2254 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2255 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2256 (unsigned long)addr, vp->prot);
2257 /* set the dirty bit */
2258 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2259 /* flush the code inside */
2260 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2261 return 1;
2262#elif defined(VBOX)
2263 addr &= TARGET_PAGE_MASK;
2264
2265 /* if it is not mapped, no need to worry here */
2266 if (addr >= MMAP_AREA_END)
2267 return 0;
2268 return 1;
2269#else
2270 return 0;
2271#endif
2272}
2273
2274#else
2275
2276void tlb_flush(CPUState *env, int flush_global)
2277{
2278}
2279
2280void tlb_flush_page(CPUState *env, target_ulong addr)
2281{
2282}
2283
2284int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2285 target_phys_addr_t paddr, int prot,
2286 int is_user, int is_softmmu)
2287{
2288 return 0;
2289}
2290
2291#ifndef VBOX
2292/* dump memory mappings */
2293void page_dump(FILE *f)
2294{
2295 unsigned long start, end;
2296 int i, j, prot, prot1;
2297 PageDesc *p;
2298
2299 fprintf(f, "%-8s %-8s %-8s %s\n",
2300 "start", "end", "size", "prot");
2301 start = -1;
2302 end = -1;
2303 prot = 0;
2304 for(i = 0; i <= L1_SIZE; i++) {
2305 if (i < L1_SIZE)
2306 p = l1_map[i];
2307 else
2308 p = NULL;
2309 for(j = 0;j < L2_SIZE; j++) {
2310 if (!p)
2311 prot1 = 0;
2312 else
2313 prot1 = p[j].flags;
2314 if (prot1 != prot) {
2315 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2316 if (start != -1) {
2317 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2318 start, end, end - start,
2319 prot & PAGE_READ ? 'r' : '-',
2320 prot & PAGE_WRITE ? 'w' : '-',
2321 prot & PAGE_EXEC ? 'x' : '-');
2322 }
2323 if (prot1 != 0)
2324 start = end;
2325 else
2326 start = -1;
2327 prot = prot1;
2328 }
2329 if (!p)
2330 break;
2331 }
2332 }
2333}
2334#endif /* !VBOX */
2335
2336int page_get_flags(target_ulong address)
2337{
2338 PageDesc *p;
2339
2340 p = page_find(address >> TARGET_PAGE_BITS);
2341 if (!p)
2342 return 0;
2343 return p->flags;
2344}
2345
2346/* modify the flags of a page and invalidate the code if
2347 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2348 depending on PAGE_WRITE */
2349void page_set_flags(target_ulong start, target_ulong end, int flags)
2350{
2351 PageDesc *p;
2352 target_ulong addr;
2353
2354 start = start & TARGET_PAGE_MASK;
2355 end = TARGET_PAGE_ALIGN(end);
2356 if (flags & PAGE_WRITE)
2357 flags |= PAGE_WRITE_ORG;
2358#ifdef VBOX
2359 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2360#endif
2361 spin_lock(&tb_lock);
2362 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2363 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2364 /* if the write protection is set, then we invalidate the code
2365 inside */
2366 if (!(p->flags & PAGE_WRITE) &&
2367 (flags & PAGE_WRITE) &&
2368 p->first_tb) {
2369 tb_invalidate_phys_page(addr, 0, NULL);
2370 }
2371 p->flags = flags;
2372 }
2373 spin_unlock(&tb_lock);
2374}
2375
2376/* called from signal handler: invalidate the code and unprotect the
2377 page. Return TRUE if the fault was succesfully handled. */
2378int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2379{
2380 unsigned int page_index, prot, pindex;
2381 PageDesc *p, *p1;
2382 target_ulong host_start, host_end, addr;
2383
2384 host_start = address & qemu_host_page_mask;
2385 page_index = host_start >> TARGET_PAGE_BITS;
2386 p1 = page_find(page_index);
2387 if (!p1)
2388 return 0;
2389 host_end = host_start + qemu_host_page_size;
2390 p = p1;
2391 prot = 0;
2392 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2393 prot |= p->flags;
2394 p++;
2395 }
2396 /* if the page was really writable, then we change its
2397 protection back to writable */
2398 if (prot & PAGE_WRITE_ORG) {
2399 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2400 if (!(p1[pindex].flags & PAGE_WRITE)) {
2401 mprotect((void *)g2h(host_start), qemu_host_page_size,
2402 (prot & PAGE_BITS) | PAGE_WRITE);
2403 p1[pindex].flags |= PAGE_WRITE;
2404 /* and since the content will be modified, we must invalidate
2405 the corresponding translated code. */
2406 tb_invalidate_phys_page(address, pc, puc);
2407#ifdef DEBUG_TB_CHECK
2408 tb_invalidate_check(address);
2409#endif
2410 return 1;
2411 }
2412 }
2413 return 0;
2414}
2415
2416/* call this function when system calls directly modify a memory area */
2417/* ??? This should be redundant now we have lock_user. */
2418void page_unprotect_range(target_ulong data, target_ulong data_size)
2419{
2420 target_ulong start, end, addr;
2421
2422 start = data;
2423 end = start + data_size;
2424 start &= TARGET_PAGE_MASK;
2425 end = TARGET_PAGE_ALIGN(end);
2426 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2427 page_unprotect(addr, 0, NULL);
2428 }
2429}
2430
2431static inline void tlb_set_dirty(CPUState *env,
2432 unsigned long addr, target_ulong vaddr)
2433{
2434}
2435#endif /* defined(CONFIG_USER_ONLY) */
2436
2437/* register physical memory. 'size' must be a multiple of the target
2438 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2439 io memory page */
2440void cpu_register_physical_memory(target_phys_addr_t start_addr,
2441 unsigned long size,
2442 unsigned long phys_offset)
2443{
2444 target_phys_addr_t addr, end_addr;
2445 PhysPageDesc *p;
2446 CPUState *env;
2447
2448 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2449 end_addr = start_addr + size;
2450 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2451 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2452 p->phys_offset = phys_offset;
2453#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2454 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2455 (phys_offset & IO_MEM_ROMD))
2456#else
2457 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2458 || (phys_offset & IO_MEM_ROMD)
2459 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2460#endif
2461
2462 phys_offset += TARGET_PAGE_SIZE;
2463 }
2464
2465 /* since each CPU stores ram addresses in its TLB cache, we must
2466 reset the modified entries */
2467 /* XXX: slow ! */
2468 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2469 tlb_flush(env, 1);
2470 }
2471}
2472
2473/* XXX: temporary until new memory mapping API */
2474uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2475{
2476 PhysPageDesc *p;
2477
2478 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2479 if (!p)
2480 return IO_MEM_UNASSIGNED;
2481 return p->phys_offset;
2482}
2483
2484static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2485{
2486#ifdef DEBUG_UNASSIGNED
2487 printf("Unassigned mem read 0x%08x\n", (int)addr);
2488#endif
2489 return 0;
2490}
2491
2492static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2493{
2494#ifdef DEBUG_UNASSIGNED
2495 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2496#endif
2497}
2498
2499static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2500 unassigned_mem_readb,
2501 unassigned_mem_readb,
2502 unassigned_mem_readb,
2503};
2504
2505static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2506 unassigned_mem_writeb,
2507 unassigned_mem_writeb,
2508 unassigned_mem_writeb,
2509};
2510
2511static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2512{
2513 unsigned long ram_addr;
2514 int dirty_flags;
2515#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2516 ram_addr = addr;
2517#elif !defined(VBOX)
2518 ram_addr = addr - (unsigned long)phys_ram_base;
2519#else
2520 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2521#endif
2522#ifdef VBOX
2523 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2524 dirty_flags = 0xff;
2525 else
2526#endif /* VBOX */
2527 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2528 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2529#if !defined(CONFIG_USER_ONLY)
2530 tb_invalidate_phys_page_fast(ram_addr, 1);
2531# ifdef VBOX
2532 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2533 dirty_flags = 0xff;
2534 else
2535# endif /* VBOX */
2536 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2537#endif
2538 }
2539 stb_p((uint8_t *)(long)addr, val);
2540#ifdef USE_KQEMU
2541 if (cpu_single_env->kqemu_enabled &&
2542 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2543 kqemu_modify_page(cpu_single_env, ram_addr);
2544#endif
2545 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2546#ifdef VBOX
2547 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2548#endif /* !VBOX */
2549 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2550 /* we remove the notdirty callback only if the code has been
2551 flushed */
2552 if (dirty_flags == 0xff)
2553 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2554}
2555
2556static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2557{
2558 unsigned long ram_addr;
2559 int dirty_flags;
2560#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2561 ram_addr = addr;
2562#elif !defined(VBOX)
2563 ram_addr = addr - (unsigned long)phys_ram_base;
2564#else
2565 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2566#endif
2567#ifdef VBOX
2568 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2569 dirty_flags = 0xff;
2570 else
2571#endif /* VBOX */
2572 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2573 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2574#if !defined(CONFIG_USER_ONLY)
2575 tb_invalidate_phys_page_fast(ram_addr, 2);
2576# ifdef VBOX
2577 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2578 dirty_flags = 0xff;
2579 else
2580# endif /* VBOX */
2581 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2582#endif
2583 }
2584 stw_p((uint8_t *)(long)addr, val);
2585#ifdef USE_KQEMU
2586 if (cpu_single_env->kqemu_enabled &&
2587 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2588 kqemu_modify_page(cpu_single_env, ram_addr);
2589#endif
2590 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2591#ifdef VBOX
2592 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2593#endif
2594 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2595 /* we remove the notdirty callback only if the code has been
2596 flushed */
2597 if (dirty_flags == 0xff)
2598 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2599}
2600
2601static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2602{
2603 unsigned long ram_addr;
2604 int dirty_flags;
2605#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2606 ram_addr = addr;
2607#elif !defined(VBOX)
2608 ram_addr = addr - (unsigned long)phys_ram_base;
2609#else
2610 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2611#endif
2612#ifdef VBOX
2613 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2614 dirty_flags = 0xff;
2615 else
2616#endif /* VBOX */
2617 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2618 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2619#if !defined(CONFIG_USER_ONLY)
2620 tb_invalidate_phys_page_fast(ram_addr, 4);
2621# ifdef VBOX
2622 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2623 dirty_flags = 0xff;
2624 else
2625# endif /* VBOX */
2626 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2627#endif
2628 }
2629 stl_p((uint8_t *)(long)addr, val);
2630#ifdef USE_KQEMU
2631 if (cpu_single_env->kqemu_enabled &&
2632 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2633 kqemu_modify_page(cpu_single_env, ram_addr);
2634#endif
2635 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2636#ifdef VBOX
2637 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2638#endif
2639 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2640 /* we remove the notdirty callback only if the code has been
2641 flushed */
2642 if (dirty_flags == 0xff)
2643 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2644}
2645
2646static CPUReadMemoryFunc *error_mem_read[3] = {
2647 NULL, /* never used */
2648 NULL, /* never used */
2649 NULL, /* never used */
2650};
2651
2652static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2653 notdirty_mem_writeb,
2654 notdirty_mem_writew,
2655 notdirty_mem_writel,
2656};
2657
2658static void io_mem_init(void)
2659{
2660 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2661 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2662 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2663#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
2664 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2665 io_mem_nb = 6;
2666#else
2667 io_mem_nb = 5;
2668#endif
2669
2670#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2671 /* alloc dirty bits array */
2672 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2673 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2674#endif /* !VBOX */
2675}
2676
2677/* mem_read and mem_write are arrays of functions containing the
2678 function to access byte (index 0), word (index 1) and dword (index
2679 2). All functions must be supplied. If io_index is non zero, the
2680 corresponding io zone is modified. If it is zero, a new io zone is
2681 allocated. The return value can be used with
2682 cpu_register_physical_memory(). (-1) is returned if error. */
2683int cpu_register_io_memory(int io_index,
2684 CPUReadMemoryFunc **mem_read,
2685 CPUWriteMemoryFunc **mem_write,
2686 void *opaque)
2687{
2688 int i;
2689
2690 if (io_index <= 0) {
2691 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2692 return -1;
2693 io_index = io_mem_nb++;
2694 } else {
2695 if (io_index >= IO_MEM_NB_ENTRIES)
2696 return -1;
2697 }
2698
2699 for(i = 0;i < 3; i++) {
2700 io_mem_read[io_index][i] = mem_read[i];
2701 io_mem_write[io_index][i] = mem_write[i];
2702 }
2703 io_mem_opaque[io_index] = opaque;
2704 return io_index << IO_MEM_SHIFT;
2705}
2706
2707CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2708{
2709 return io_mem_write[io_index >> IO_MEM_SHIFT];
2710}
2711
2712CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2713{
2714 return io_mem_read[io_index >> IO_MEM_SHIFT];
2715}
2716
2717/* physical memory access (slow version, mainly for debug) */
2718#if defined(CONFIG_USER_ONLY)
2719void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2720 int len, int is_write)
2721{
2722 int l, flags;
2723 target_ulong page;
2724 void * p;
2725
2726 while (len > 0) {
2727 page = addr & TARGET_PAGE_MASK;
2728 l = (page + TARGET_PAGE_SIZE) - addr;
2729 if (l > len)
2730 l = len;
2731 flags = page_get_flags(page);
2732 if (!(flags & PAGE_VALID))
2733 return;
2734 if (is_write) {
2735 if (!(flags & PAGE_WRITE))
2736 return;
2737 p = lock_user(addr, len, 0);
2738 memcpy(p, buf, len);
2739 unlock_user(p, addr, len);
2740 } else {
2741 if (!(flags & PAGE_READ))
2742 return;
2743 p = lock_user(addr, len, 1);
2744 memcpy(buf, p, len);
2745 unlock_user(p, addr, 0);
2746 }
2747 len -= l;
2748 buf += l;
2749 addr += l;
2750 }
2751}
2752
2753#else
2754void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2755 int len, int is_write)
2756{
2757 int l, io_index;
2758 uint8_t *ptr;
2759 uint32_t val;
2760 target_phys_addr_t page;
2761 unsigned long pd;
2762 PhysPageDesc *p;
2763
2764 while (len > 0) {
2765 page = addr & TARGET_PAGE_MASK;
2766 l = (page + TARGET_PAGE_SIZE) - addr;
2767 if (l > len)
2768 l = len;
2769 p = phys_page_find(page >> TARGET_PAGE_BITS);
2770 if (!p) {
2771 pd = IO_MEM_UNASSIGNED;
2772 } else {
2773 pd = p->phys_offset;
2774 }
2775
2776 if (is_write) {
2777 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2778 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2779 /* XXX: could force cpu_single_env to NULL to avoid
2780 potential bugs */
2781 if (l >= 4 && ((addr & 3) == 0)) {
2782 /* 32 bit write access */
2783#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2784 val = ldl_p(buf);
2785#else
2786 val = *(const uint32_t *)buf;
2787#endif
2788 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2789 l = 4;
2790 } else if (l >= 2 && ((addr & 1) == 0)) {
2791 /* 16 bit write access */
2792#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2793 val = lduw_p(buf);
2794#else
2795 val = *(const uint16_t *)buf;
2796#endif
2797 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2798 l = 2;
2799 } else {
2800 /* 8 bit write access */
2801#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2802 val = ldub_p(buf);
2803#else
2804 val = *(const uint8_t *)buf;
2805#endif
2806 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2807 l = 1;
2808 }
2809 } else {
2810 unsigned long addr1;
2811 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2812 /* RAM case */
2813#ifdef VBOX
2814 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2815#else
2816 ptr = phys_ram_base + addr1;
2817 memcpy(ptr, buf, l);
2818#endif
2819 if (!cpu_physical_memory_is_dirty(addr1)) {
2820 /* invalidate code */
2821 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2822 /* set dirty bit */
2823#ifdef VBOX
2824 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2825#endif
2826 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2827 (0xff & ~CODE_DIRTY_FLAG);
2828 }
2829 }
2830 } else {
2831 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2832 !(pd & IO_MEM_ROMD)) {
2833 /* I/O case */
2834 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2835 if (l >= 4 && ((addr & 3) == 0)) {
2836 /* 32 bit read access */
2837 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2838#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2839 stl_p(buf, val);
2840#else
2841 *(uint32_t *)buf = val;
2842#endif
2843 l = 4;
2844 } else if (l >= 2 && ((addr & 1) == 0)) {
2845 /* 16 bit read access */
2846 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2847#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2848 stw_p(buf, val);
2849#else
2850 *(uint16_t *)buf = val;
2851#endif
2852 l = 2;
2853 } else {
2854 /* 8 bit read access */
2855 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2856#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2857 stb_p(buf, val);
2858#else
2859 *(uint8_t *)buf = val;
2860#endif
2861 l = 1;
2862 }
2863 } else {
2864 /* RAM case */
2865#ifdef VBOX
2866 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2867#else
2868 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2869 (addr & ~TARGET_PAGE_MASK);
2870 memcpy(buf, ptr, l);
2871#endif
2872 }
2873 }
2874 len -= l;
2875 buf += l;
2876 addr += l;
2877 }
2878}
2879
2880#ifndef VBOX
2881/* used for ROM loading : can write in RAM and ROM */
2882void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2883 const uint8_t *buf, int len)
2884{
2885 int l;
2886 uint8_t *ptr;
2887 target_phys_addr_t page;
2888 unsigned long pd;
2889 PhysPageDesc *p;
2890
2891 while (len > 0) {
2892 page = addr & TARGET_PAGE_MASK;
2893 l = (page + TARGET_PAGE_SIZE) - addr;
2894 if (l > len)
2895 l = len;
2896 p = phys_page_find(page >> TARGET_PAGE_BITS);
2897 if (!p) {
2898 pd = IO_MEM_UNASSIGNED;
2899 } else {
2900 pd = p->phys_offset;
2901 }
2902
2903 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2904 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2905 !(pd & IO_MEM_ROMD)) {
2906 /* do nothing */
2907 } else {
2908 unsigned long addr1;
2909 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2910 /* ROM/RAM case */
2911 ptr = phys_ram_base + addr1;
2912 memcpy(ptr, buf, l);
2913 }
2914 len -= l;
2915 buf += l;
2916 addr += l;
2917 }
2918}
2919#endif /* !VBOX */
2920
2921
2922/* warning: addr must be aligned */
2923uint32_t ldl_phys(target_phys_addr_t addr)
2924{
2925 int io_index;
2926 uint8_t *ptr;
2927 uint32_t val;
2928 unsigned long pd;
2929 PhysPageDesc *p;
2930
2931 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2932 if (!p) {
2933 pd = IO_MEM_UNASSIGNED;
2934 } else {
2935 pd = p->phys_offset;
2936 }
2937
2938 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2939 !(pd & IO_MEM_ROMD)) {
2940 /* I/O case */
2941 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2942 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2943 } else {
2944 /* RAM case */
2945#ifndef VBOX
2946 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2947 (addr & ~TARGET_PAGE_MASK);
2948 val = ldl_p(ptr);
2949#else
2950 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2951#endif
2952 }
2953 return val;
2954}
2955
2956/* warning: addr must be aligned */
2957uint64_t ldq_phys(target_phys_addr_t addr)
2958{
2959 int io_index;
2960 uint8_t *ptr;
2961 uint64_t val;
2962 unsigned long pd;
2963 PhysPageDesc *p;
2964
2965 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2966 if (!p) {
2967 pd = IO_MEM_UNASSIGNED;
2968 } else {
2969 pd = p->phys_offset;
2970 }
2971
2972 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2973 !(pd & IO_MEM_ROMD)) {
2974 /* I/O case */
2975 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2976#ifdef TARGET_WORDS_BIGENDIAN
2977 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2978 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2979#else
2980 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2981 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2982#endif
2983 } else {
2984 /* RAM case */
2985#ifndef VBOX
2986 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2987 (addr & ~TARGET_PAGE_MASK);
2988 val = ldq_p(ptr);
2989#else
2990 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2991#endif
2992 }
2993 return val;
2994}
2995
2996/* XXX: optimize */
2997uint32_t ldub_phys(target_phys_addr_t addr)
2998{
2999 uint8_t val;
3000 cpu_physical_memory_read(addr, &val, 1);
3001 return val;
3002}
3003
3004/* XXX: optimize */
3005uint32_t lduw_phys(target_phys_addr_t addr)
3006{
3007 uint16_t val;
3008 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3009 return tswap16(val);
3010}
3011
3012/* warning: addr must be aligned. The ram page is not masked as dirty
3013 and the code inside is not invalidated. It is useful if the dirty
3014 bits are used to track modified PTEs */
3015void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3016{
3017 int io_index;
3018 uint8_t *ptr;
3019 unsigned long pd;
3020 PhysPageDesc *p;
3021
3022 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3023 if (!p) {
3024 pd = IO_MEM_UNASSIGNED;
3025 } else {
3026 pd = p->phys_offset;
3027 }
3028
3029 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3030 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3031 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3032 } else {
3033#ifndef VBOX
3034 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3035 (addr & ~TARGET_PAGE_MASK);
3036 stl_p(ptr, val);
3037#else
3038 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3039#endif
3040 }
3041}
3042
3043/* warning: addr must be aligned */
3044void stl_phys(target_phys_addr_t addr, uint32_t val)
3045{
3046 int io_index;
3047 uint8_t *ptr;
3048 unsigned long pd;
3049 PhysPageDesc *p;
3050
3051 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3052 if (!p) {
3053 pd = IO_MEM_UNASSIGNED;
3054 } else {
3055 pd = p->phys_offset;
3056 }
3057
3058 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3059 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3060 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3061 } else {
3062 unsigned long addr1;
3063 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3064 /* RAM case */
3065#ifndef VBOX
3066 ptr = phys_ram_base + addr1;
3067 stl_p(ptr, val);
3068#else
3069 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3070#endif
3071 if (!cpu_physical_memory_is_dirty(addr1)) {
3072 /* invalidate code */
3073 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3074 /* set dirty bit */
3075#ifdef VBOX
3076 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3077#endif
3078 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3079 (0xff & ~CODE_DIRTY_FLAG);
3080 }
3081 }
3082}
3083
3084/* XXX: optimize */
3085void stb_phys(target_phys_addr_t addr, uint32_t val)
3086{
3087 uint8_t v = val;
3088 cpu_physical_memory_write(addr, &v, 1);
3089}
3090
3091/* XXX: optimize */
3092void stw_phys(target_phys_addr_t addr, uint32_t val)
3093{
3094 uint16_t v = tswap16(val);
3095 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3096}
3097
3098/* XXX: optimize */
3099void stq_phys(target_phys_addr_t addr, uint64_t val)
3100{
3101 val = tswap64(val);
3102 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3103}
3104
3105#endif
3106
3107#ifndef VBOX
3108/* virtual memory access for debug */
3109int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3110 uint8_t *buf, int len, int is_write)
3111{
3112 int l;
3113 target_ulong page, phys_addr;
3114
3115 while (len > 0) {
3116 page = addr & TARGET_PAGE_MASK;
3117 phys_addr = cpu_get_phys_page_debug(env, page);
3118 /* if no physical page mapped, return an error */
3119 if (phys_addr == -1)
3120 return -1;
3121 l = (page + TARGET_PAGE_SIZE) - addr;
3122 if (l > len)
3123 l = len;
3124 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3125 buf, l, is_write);
3126 len -= l;
3127 buf += l;
3128 addr += l;
3129 }
3130 return 0;
3131}
3132
3133void dump_exec_info(FILE *f,
3134 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3135{
3136 int i, target_code_size, max_target_code_size;
3137 int direct_jmp_count, direct_jmp2_count, cross_page;
3138 TranslationBlock *tb;
3139
3140 target_code_size = 0;
3141 max_target_code_size = 0;
3142 cross_page = 0;
3143 direct_jmp_count = 0;
3144 direct_jmp2_count = 0;
3145 for(i = 0; i < nb_tbs; i++) {
3146 tb = &tbs[i];
3147 target_code_size += tb->size;
3148 if (tb->size > max_target_code_size)
3149 max_target_code_size = tb->size;
3150 if (tb->page_addr[1] != -1)
3151 cross_page++;
3152 if (tb->tb_next_offset[0] != 0xffff) {
3153 direct_jmp_count++;
3154 if (tb->tb_next_offset[1] != 0xffff) {
3155 direct_jmp2_count++;
3156 }
3157 }
3158 }
3159 /* XXX: avoid using doubles ? */
3160 cpu_fprintf(f, "TB count %d\n", nb_tbs);
3161 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3162 nb_tbs ? target_code_size / nb_tbs : 0,
3163 max_target_code_size);
3164 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3165 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3166 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3167 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3168 cross_page,
3169 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3170 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3171 direct_jmp_count,
3172 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3173 direct_jmp2_count,
3174 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3175 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3176 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3177 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3178}
3179#endif /* !VBOX */
3180
3181#if !defined(CONFIG_USER_ONLY)
3182
3183#define MMUSUFFIX _cmmu
3184#define GETPC() NULL
3185#define env cpu_single_env
3186#define SOFTMMU_CODE_ACCESS
3187
3188#define SHIFT 0
3189#include "softmmu_template.h"
3190
3191#define SHIFT 1
3192#include "softmmu_template.h"
3193
3194#define SHIFT 2
3195#include "softmmu_template.h"
3196
3197#define SHIFT 3
3198#include "softmmu_template.h"
3199
3200#undef env
3201
3202#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette