VirtualBox

source: vbox/trunk/src/recompiler/new/exec.c@ 509

Last change on this file since 509 was 104, checked in by vboxsync, 18 years ago

phys_ram_size should be RTGCPHYS.

  • Property svn:eol-style set to native
File size: 75.9 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifndef VBOX
22#ifdef _WIN32
23#include <windows.h>
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
27#endif
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35#else /* VBOX */
36# include <stdlib.h>
37# include <stdio.h>
38# include <inttypes.h>
39# include <iprt/alloc.h>
40# include <iprt/string.h>
41# include <iprt/param.h>
42#endif /* VBOX */
43
44#include "cpu.h"
45#include "exec-all.h"
46#if defined(CONFIG_USER_ONLY)
47#include <qemu.h>
48#endif
49
50//#define DEBUG_TB_INVALIDATE
51//#define DEBUG_FLUSH
52//#define DEBUG_TLB
53//#define DEBUG_UNASSIGNED
54
55/* make various TB consistency checks */
56//#define DEBUG_TB_CHECK
57//#define DEBUG_TLB_CHECK
58
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
64/* threshold to flush the translated code buffer */
65#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
66
67#define SMC_BITMAP_USE_THRESHOLD 10
68
69#define MMAP_AREA_START 0x00000000
70#define MMAP_AREA_END 0xa8000000
71
72#if defined(TARGET_SPARC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 41
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
76#else
77/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
81TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
82TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
83int nb_tbs;
84/* any access to the tbs or the page table must use this lock */
85spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86
87uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
88#if defined(__MINGW32__)
89 __attribute__((aligned (16)));
90#else
91 __attribute__((aligned (32)));
92#endif
93uint8_t *code_gen_ptr;
94
95#ifndef VBOX
96int phys_ram_size;
97int phys_ram_fd;
98#endif /* !VBOX */
99RTGCPHYS phys_ram_size;
100uint8_t *phys_ram_base;
101uint8_t *phys_ram_dirty;
102#ifdef VBOX
103/* we have memory ranges (the high PC-BIOS mapping) which
104 causes some pages to fall outside the dirty map here. */
105uint32_t phys_ram_dirty_size;
106#endif/* VBOX */
107
108CPUState *first_cpu;
109/* current CPU in the current thread. It is only valid inside
110 cpu_exec() */
111CPUState *cpu_single_env;
112
113typedef struct PageDesc {
114 /* list of TBs intersecting this ram page */
115 TranslationBlock *first_tb;
116 /* in order to optimize self modifying code, we count the number
117 of lookups we do to a given page to use a bitmap */
118 unsigned int code_write_count;
119 uint8_t *code_bitmap;
120#if defined(CONFIG_USER_ONLY)
121 unsigned long flags;
122#endif
123} PageDesc;
124
125typedef struct PhysPageDesc {
126 /* offset in host memory of the page + io_index in the low 12 bits */
127 uint32_t phys_offset;
128} PhysPageDesc;
129
130#define L2_BITS 10
131#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
132
133#define L1_SIZE (1 << L1_BITS)
134#define L2_SIZE (1 << L2_BITS)
135
136static void io_mem_init(void);
137
138unsigned long qemu_real_host_page_size;
139unsigned long qemu_host_page_bits;
140unsigned long qemu_host_page_size;
141unsigned long qemu_host_page_mask;
142
143/* XXX: for system emulation, it could just be an array */
144static PageDesc *l1_map[L1_SIZE];
145PhysPageDesc **l1_phys_map;
146
147/* io memory support */
148CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
149CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
150void *io_mem_opaque[IO_MEM_NB_ENTRIES];
151static int io_mem_nb;
152
153#ifndef VBOX
154/* log support */
155char *logfilename = "/tmp/qemu.log";
156#endif /* !VBOX */
157FILE *logfile;
158int loglevel;
159
160/* statistics */
161static int tlb_flush_count;
162static int tb_flush_count;
163#ifndef VBOX
164static int tb_phys_invalidate_count;
165#endif /* !VBOX */
166
167static void page_init(void)
168{
169 /* NOTE: we can always suppose that qemu_host_page_size >=
170 TARGET_PAGE_SIZE */
171#ifdef VBOX
172 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
173 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
174 qemu_real_host_page_size = PAGE_SIZE;
175#else /* !VBOX */
176#ifdef _WIN32
177 {
178 SYSTEM_INFO system_info;
179 DWORD old_protect;
180
181 GetSystemInfo(&system_info);
182 qemu_real_host_page_size = system_info.dwPageSize;
183
184 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
185 PAGE_EXECUTE_READWRITE, &old_protect);
186 }
187#else
188 qemu_real_host_page_size = getpagesize();
189 {
190 unsigned long start, end;
191
192 start = (unsigned long)code_gen_buffer;
193 start &= ~(qemu_real_host_page_size - 1);
194
195 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
196 end += qemu_real_host_page_size - 1;
197 end &= ~(qemu_real_host_page_size - 1);
198
199 mprotect((void *)start, end - start,
200 PROT_READ | PROT_WRITE | PROT_EXEC);
201 }
202#endif
203#endif /* !VBOX */
204
205 if (qemu_host_page_size == 0)
206 qemu_host_page_size = qemu_real_host_page_size;
207 if (qemu_host_page_size < TARGET_PAGE_SIZE)
208 qemu_host_page_size = TARGET_PAGE_SIZE;
209 qemu_host_page_bits = 0;
210 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
211 qemu_host_page_bits++;
212 qemu_host_page_mask = ~(qemu_host_page_size - 1);
213 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
214 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
215}
216
217static inline PageDesc *page_find_alloc(unsigned int index)
218{
219 PageDesc **lp, *p;
220
221 lp = &l1_map[index >> L2_BITS];
222 p = *lp;
223 if (!p) {
224 /* allocate if not found */
225 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
226 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
227 *lp = p;
228 }
229 return p + (index & (L2_SIZE - 1));
230}
231
232static inline PageDesc *page_find(unsigned int index)
233{
234 PageDesc *p;
235
236 p = l1_map[index >> L2_BITS];
237 if (!p)
238 return 0;
239 return p + (index & (L2_SIZE - 1));
240}
241
242static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
243{
244 void **lp, **p;
245 PhysPageDesc *pd;
246
247 p = (void **)l1_phys_map;
248#if TARGET_PHYS_ADDR_SPACE_BITS > 32
249
250#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
251#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
252#endif
253 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
254 p = *lp;
255 if (!p) {
256 /* allocate if not found */
257 if (!alloc)
258 return NULL;
259 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
260 memset(p, 0, sizeof(void *) * L1_SIZE);
261 *lp = p;
262 }
263#endif
264 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
265 pd = *lp;
266 if (!pd) {
267 int i;
268 /* allocate if not found */
269 if (!alloc)
270 return NULL;
271 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
272 *lp = pd;
273 for (i = 0; i < L2_SIZE; i++)
274 pd[i].phys_offset = IO_MEM_UNASSIGNED;
275 }
276 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
277}
278
279static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
280{
281 return phys_page_find_alloc(index, 0);
282}
283
284#if !defined(CONFIG_USER_ONLY)
285static void tlb_protect_code(ram_addr_t ram_addr);
286static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
287 target_ulong vaddr);
288#endif
289
290void cpu_exec_init(CPUState *env)
291{
292 CPUState **penv;
293 int cpu_index;
294
295 if (!code_gen_ptr) {
296 code_gen_ptr = code_gen_buffer;
297 page_init();
298 io_mem_init();
299 }
300 env->next_cpu = NULL;
301 penv = &first_cpu;
302 cpu_index = 0;
303 while (*penv != NULL) {
304 penv = (CPUState **)&(*penv)->next_cpu;
305 cpu_index++;
306 }
307 env->cpu_index = cpu_index;
308 *penv = env;
309}
310
311static inline void invalidate_page_bitmap(PageDesc *p)
312{
313 if (p->code_bitmap) {
314 qemu_free(p->code_bitmap);
315 p->code_bitmap = NULL;
316 }
317 p->code_write_count = 0;
318}
319
320/* set to NULL all the 'first_tb' fields in all PageDescs */
321static void page_flush_tb(void)
322{
323 int i, j;
324 PageDesc *p;
325
326 for(i = 0; i < L1_SIZE; i++) {
327 p = l1_map[i];
328 if (p) {
329 for(j = 0; j < L2_SIZE; j++) {
330 p->first_tb = NULL;
331 invalidate_page_bitmap(p);
332 p++;
333 }
334 }
335 }
336}
337
338/* flush all the translation blocks */
339/* XXX: tb_flush is currently not thread safe */
340void tb_flush(CPUState *env1)
341{
342 CPUState *env;
343#if defined(DEBUG_FLUSH)
344 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
345 code_gen_ptr - code_gen_buffer,
346 nb_tbs,
347 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
348#endif
349 nb_tbs = 0;
350
351 for(env = first_cpu; env != NULL; env = env->next_cpu) {
352 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
353 }
354
355 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
356 page_flush_tb();
357
358 code_gen_ptr = code_gen_buffer;
359 /* XXX: flush processor icache at this point if cache flush is
360 expensive */
361 tb_flush_count++;
362}
363
364#ifdef DEBUG_TB_CHECK
365
366static void tb_invalidate_check(unsigned long address)
367{
368 TranslationBlock *tb;
369 int i;
370 address &= TARGET_PAGE_MASK;
371 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
372 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
373 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
374 address >= tb->pc + tb->size)) {
375 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
376 address, (long)tb->pc, tb->size);
377 }
378 }
379 }
380}
381
382/* verify that all the pages have correct rights for code */
383static void tb_page_check(void)
384{
385 TranslationBlock *tb;
386 int i, flags1, flags2;
387
388 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
389 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
390 flags1 = page_get_flags(tb->pc);
391 flags2 = page_get_flags(tb->pc + tb->size - 1);
392 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
393 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
394 (long)tb->pc, tb->size, flags1, flags2);
395 }
396 }
397 }
398}
399
400void tb_jmp_check(TranslationBlock *tb)
401{
402 TranslationBlock *tb1;
403 unsigned int n1;
404
405 /* suppress any remaining jumps to this TB */
406 tb1 = tb->jmp_first;
407 for(;;) {
408 n1 = (long)tb1 & 3;
409 tb1 = (TranslationBlock *)((long)tb1 & ~3);
410 if (n1 == 2)
411 break;
412 tb1 = tb1->jmp_next[n1];
413 }
414 /* check end of list */
415 if (tb1 != tb) {
416 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
417 }
418}
419
420#endif
421
422/* invalidate one TB */
423static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
424 int next_offset)
425{
426 TranslationBlock *tb1;
427 for(;;) {
428 tb1 = *ptb;
429 if (tb1 == tb) {
430 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
431 break;
432 }
433 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
434 }
435}
436
437static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
438{
439 TranslationBlock *tb1;
440 unsigned int n1;
441
442 for(;;) {
443 tb1 = *ptb;
444 n1 = (long)tb1 & 3;
445 tb1 = (TranslationBlock *)((long)tb1 & ~3);
446 if (tb1 == tb) {
447 *ptb = tb1->page_next[n1];
448 break;
449 }
450 ptb = &tb1->page_next[n1];
451 }
452}
453
454static inline void tb_jmp_remove(TranslationBlock *tb, int n)
455{
456 TranslationBlock *tb1, **ptb;
457 unsigned int n1;
458
459 ptb = &tb->jmp_next[n];
460 tb1 = *ptb;
461 if (tb1) {
462 /* find tb(n) in circular list */
463 for(;;) {
464 tb1 = *ptb;
465 n1 = (long)tb1 & 3;
466 tb1 = (TranslationBlock *)((long)tb1 & ~3);
467 if (n1 == n && tb1 == tb)
468 break;
469 if (n1 == 2) {
470 ptb = &tb1->jmp_first;
471 } else {
472 ptb = &tb1->jmp_next[n1];
473 }
474 }
475 /* now we can suppress tb(n) from the list */
476 *ptb = tb->jmp_next[n];
477
478 tb->jmp_next[n] = NULL;
479 }
480}
481
482/* reset the jump entry 'n' of a TB so that it is not chained to
483 another TB */
484static inline void tb_reset_jump(TranslationBlock *tb, int n)
485{
486 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
487}
488
489static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
490{
491 CPUState *env;
492 PageDesc *p;
493 unsigned int h, n1;
494 target_ulong phys_pc;
495 TranslationBlock *tb1, *tb2;
496
497 /* remove the TB from the hash list */
498 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
499 h = tb_phys_hash_func(phys_pc);
500 tb_remove(&tb_phys_hash[h], tb,
501 offsetof(TranslationBlock, phys_hash_next));
502
503 /* remove the TB from the page list */
504 if (tb->page_addr[0] != page_addr) {
505 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
506 tb_page_remove(&p->first_tb, tb);
507 invalidate_page_bitmap(p);
508 }
509 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
510 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
511 tb_page_remove(&p->first_tb, tb);
512 invalidate_page_bitmap(p);
513 }
514
515 tb_invalidated_flag = 1;
516
517 /* remove the TB from the hash list */
518 h = tb_jmp_cache_hash_func(tb->pc);
519 for(env = first_cpu; env != NULL; env = env->next_cpu) {
520 if (env->tb_jmp_cache[h] == tb)
521 env->tb_jmp_cache[h] = NULL;
522 }
523
524 /* suppress this TB from the two jump lists */
525 tb_jmp_remove(tb, 0);
526 tb_jmp_remove(tb, 1);
527
528 /* suppress any remaining jumps to this TB */
529 tb1 = tb->jmp_first;
530 for(;;) {
531 n1 = (long)tb1 & 3;
532 if (n1 == 2)
533 break;
534 tb1 = (TranslationBlock *)((long)tb1 & ~3);
535 tb2 = tb1->jmp_next[n1];
536 tb_reset_jump(tb1, n1);
537 tb1->jmp_next[n1] = NULL;
538 tb1 = tb2;
539 }
540 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
541
542#ifndef VBOX
543 tb_phys_invalidate_count++;
544#endif /* !VBOX */
545}
546
547#ifdef VBOX
548void tb_invalidate_virt(CPUState *env, uint32_t eip)
549{
550# if 1
551 tb_flush(env);
552# else
553 uint8_t *cs_base, *pc;
554 unsigned int flags, h, phys_pc;
555 TranslationBlock *tb, **ptb;
556
557 flags = env->hflags;
558 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
559 cs_base = env->segs[R_CS].base;
560 pc = cs_base + eip;
561
562 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
563 flags);
564
565 if(tb)
566 {
567# ifdef DEBUG
568 printf("invalidating TB (%08X) at %08X\n", tb, eip);
569# endif
570 tb_invalidate(tb);
571 //Note: this will leak TBs, but the whole cache will be flushed
572 // when it happens too often
573 tb->pc = 0;
574 tb->cs_base = 0;
575 tb->flags = 0;
576 }
577# endif
578}
579
580# ifdef VBOX_STRICT
581/**
582 * Gets the page offset.
583 */
584unsigned long get_phys_page_offset(target_ulong addr)
585{
586 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
587 return p ? p->phys_offset : 0;
588}
589# endif /* VBOX_STRICT */
590#endif /* VBOX */
591
592static inline void set_bits(uint8_t *tab, int start, int len)
593{
594 int end, mask, end1;
595
596 end = start + len;
597 tab += start >> 3;
598 mask = 0xff << (start & 7);
599 if ((start & ~7) == (end & ~7)) {
600 if (start < end) {
601 mask &= ~(0xff << (end & 7));
602 *tab |= mask;
603 }
604 } else {
605 *tab++ |= mask;
606 start = (start + 8) & ~7;
607 end1 = end & ~7;
608 while (start < end1) {
609 *tab++ = 0xff;
610 start += 8;
611 }
612 if (start < end) {
613 mask = ~(0xff << (end & 7));
614 *tab |= mask;
615 }
616 }
617}
618
619static void build_page_bitmap(PageDesc *p)
620{
621 int n, tb_start, tb_end;
622 TranslationBlock *tb;
623
624 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
625 if (!p->code_bitmap)
626 return;
627 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
628
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 /* NOTE: this is subtle as a TB may span two physical pages */
634 if (n == 0) {
635 /* NOTE: tb_end may be after the end of the page, but
636 it is not a problem */
637 tb_start = tb->pc & ~TARGET_PAGE_MASK;
638 tb_end = tb_start + tb->size;
639 if (tb_end > TARGET_PAGE_SIZE)
640 tb_end = TARGET_PAGE_SIZE;
641 } else {
642 tb_start = 0;
643 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
644 }
645 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
646 tb = tb->page_next[n];
647 }
648}
649
650#ifdef TARGET_HAS_PRECISE_SMC
651
652static void tb_gen_code(CPUState *env,
653 target_ulong pc, target_ulong cs_base, int flags,
654 int cflags)
655{
656 TranslationBlock *tb;
657 uint8_t *tc_ptr;
658 target_ulong phys_pc, phys_page2, virt_page2;
659 int code_gen_size;
660
661 phys_pc = get_phys_addr_code(env, pc);
662 tb = tb_alloc(pc);
663 if (!tb) {
664 /* flush must be done */
665 tb_flush(env);
666 /* cannot fail at this point */
667 tb = tb_alloc(pc);
668 }
669 tc_ptr = code_gen_ptr;
670 tb->tc_ptr = tc_ptr;
671 tb->cs_base = cs_base;
672 tb->flags = flags;
673 tb->cflags = cflags;
674 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
675 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
676
677 /* check next page if needed */
678 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
679 phys_page2 = -1;
680 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
681 phys_page2 = get_phys_addr_code(env, virt_page2);
682 }
683 tb_link_phys(tb, phys_pc, phys_page2);
684}
685#endif
686
687/* invalidate all TBs which intersect with the target physical page
688 starting in range [start;end[. NOTE: start and end must refer to
689 the same physical page. 'is_cpu_write_access' should be true if called
690 from a real cpu write access: the virtual CPU will exit the current
691 TB if code is modified inside this TB. */
692void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
693 int is_cpu_write_access)
694{
695 int n, current_tb_modified, current_tb_not_found, current_flags;
696 CPUState *env = cpu_single_env;
697 PageDesc *p;
698 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
699 target_ulong tb_start, tb_end;
700 target_ulong current_pc, current_cs_base;
701
702 p = page_find(start >> TARGET_PAGE_BITS);
703 if (!p)
704 return;
705 if (!p->code_bitmap &&
706 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
707 is_cpu_write_access) {
708 /* build code bitmap */
709 build_page_bitmap(p);
710 }
711
712 /* we remove all the TBs in the range [start, end[ */
713 /* XXX: see if in some cases it could be faster to invalidate all the code */
714 current_tb_not_found = is_cpu_write_access;
715 current_tb_modified = 0;
716 current_tb = NULL; /* avoid warning */
717 current_pc = 0; /* avoid warning */
718 current_cs_base = 0; /* avoid warning */
719 current_flags = 0; /* avoid warning */
720 tb = p->first_tb;
721 while (tb != NULL) {
722 n = (long)tb & 3;
723 tb = (TranslationBlock *)((long)tb & ~3);
724 tb_next = tb->page_next[n];
725 /* NOTE: this is subtle as a TB may span two physical pages */
726 if (n == 0) {
727 /* NOTE: tb_end may be after the end of the page, but
728 it is not a problem */
729 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
730 tb_end = tb_start + tb->size;
731 } else {
732 tb_start = tb->page_addr[1];
733 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
734 }
735 if (!(tb_end <= start || tb_start >= end)) {
736#ifdef TARGET_HAS_PRECISE_SMC
737 if (current_tb_not_found) {
738 current_tb_not_found = 0;
739 current_tb = NULL;
740 if (env->mem_write_pc) {
741 /* now we have a real cpu fault */
742 current_tb = tb_find_pc(env->mem_write_pc);
743 }
744 }
745 if (current_tb == tb &&
746 !(current_tb->cflags & CF_SINGLE_INSN)) {
747 /* If we are modifying the current TB, we must stop
748 its execution. We could be more precise by checking
749 that the modification is after the current PC, but it
750 would require a specialized function to partially
751 restore the CPU state */
752
753 current_tb_modified = 1;
754 cpu_restore_state(current_tb, env,
755 env->mem_write_pc, NULL);
756#if defined(TARGET_I386)
757 current_flags = env->hflags;
758 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
759 current_cs_base = (target_ulong)env->segs[R_CS].base;
760 current_pc = current_cs_base + env->eip;
761#else
762#error unsupported CPU
763#endif
764 }
765#endif /* TARGET_HAS_PRECISE_SMC */
766 /* we need to do that to handle the case where a signal
767 occurs while doing tb_phys_invalidate() */
768 saved_tb = NULL;
769 if (env) {
770 saved_tb = env->current_tb;
771 env->current_tb = NULL;
772 }
773 tb_phys_invalidate(tb, -1);
774 if (env) {
775 env->current_tb = saved_tb;
776 if (env->interrupt_request && env->current_tb)
777 cpu_interrupt(env, env->interrupt_request);
778 }
779 }
780 tb = tb_next;
781 }
782#if !defined(CONFIG_USER_ONLY)
783 /* if no code remaining, no need to continue to use slow writes */
784 if (!p->first_tb) {
785 invalidate_page_bitmap(p);
786 if (is_cpu_write_access) {
787 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
788 }
789 }
790#endif
791#ifdef TARGET_HAS_PRECISE_SMC
792 if (current_tb_modified) {
793 /* we generate a block containing just the instruction
794 modifying the memory. It will ensure that it cannot modify
795 itself */
796 env->current_tb = NULL;
797 tb_gen_code(env, current_pc, current_cs_base, current_flags,
798 CF_SINGLE_INSN);
799 cpu_resume_from_signal(env, NULL);
800 }
801#endif
802}
803
804/* len must be <= 8 and start must be a multiple of len */
805static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
806{
807 PageDesc *p;
808 int offset, b;
809#if 0
810 if (1) {
811 if (loglevel) {
812 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
813 cpu_single_env->mem_write_vaddr, len,
814 cpu_single_env->eip,
815 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
816 }
817 }
818#endif
819 p = page_find(start >> TARGET_PAGE_BITS);
820 if (!p)
821 return;
822 if (p->code_bitmap) {
823 offset = start & ~TARGET_PAGE_MASK;
824 b = p->code_bitmap[offset >> 3] >> (offset & 7);
825 if (b & ((1 << len) - 1))
826 goto do_invalidate;
827 } else {
828 do_invalidate:
829 tb_invalidate_phys_page_range(start, start + len, 1);
830 }
831}
832
833#if !defined(CONFIG_SOFTMMU)
834static void tb_invalidate_phys_page(target_ulong addr,
835 unsigned long pc, void *puc)
836{
837 int n, current_flags, current_tb_modified;
838 target_ulong current_pc, current_cs_base;
839 PageDesc *p;
840 TranslationBlock *tb, *current_tb;
841#ifdef TARGET_HAS_PRECISE_SMC
842 CPUState *env = cpu_single_env;
843#endif
844
845 addr &= TARGET_PAGE_MASK;
846 p = page_find(addr >> TARGET_PAGE_BITS);
847 if (!p)
848 return;
849 tb = p->first_tb;
850 current_tb_modified = 0;
851 current_tb = NULL;
852 current_pc = 0; /* avoid warning */
853 current_cs_base = 0; /* avoid warning */
854 current_flags = 0; /* avoid warning */
855#ifdef TARGET_HAS_PRECISE_SMC
856 if (tb && pc != 0) {
857 current_tb = tb_find_pc(pc);
858 }
859#endif
860 while (tb != NULL) {
861 n = (long)tb & 3;
862 tb = (TranslationBlock *)((long)tb & ~3);
863#ifdef TARGET_HAS_PRECISE_SMC
864 if (current_tb == tb &&
865 !(current_tb->cflags & CF_SINGLE_INSN)) {
866 /* If we are modifying the current TB, we must stop
867 its execution. We could be more precise by checking
868 that the modification is after the current PC, but it
869 would require a specialized function to partially
870 restore the CPU state */
871
872 current_tb_modified = 1;
873 cpu_restore_state(current_tb, env, pc, puc);
874#if defined(TARGET_I386)
875 current_flags = env->hflags;
876 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
877 current_cs_base = (target_ulong)env->segs[R_CS].base;
878 current_pc = current_cs_base + env->eip;
879#else
880#error unsupported CPU
881#endif
882 }
883#endif /* TARGET_HAS_PRECISE_SMC */
884 tb_phys_invalidate(tb, addr);
885 tb = tb->page_next[n];
886 }
887 p->first_tb = NULL;
888#ifdef TARGET_HAS_PRECISE_SMC
889 if (current_tb_modified) {
890 /* we generate a block containing just the instruction
891 modifying the memory. It will ensure that it cannot modify
892 itself */
893 env->current_tb = NULL;
894 tb_gen_code(env, current_pc, current_cs_base, current_flags,
895 CF_SINGLE_INSN);
896 cpu_resume_from_signal(env, puc);
897 }
898#endif
899}
900#endif
901
902/* add the tb in the target page and protect it if necessary */
903static inline void tb_alloc_page(TranslationBlock *tb,
904 unsigned int n, target_ulong page_addr)
905{
906 PageDesc *p;
907 TranslationBlock *last_first_tb;
908
909 tb->page_addr[n] = page_addr;
910 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
911 tb->page_next[n] = p->first_tb;
912 last_first_tb = p->first_tb;
913 p->first_tb = (TranslationBlock *)((long)tb | n);
914 invalidate_page_bitmap(p);
915
916#if defined(TARGET_HAS_SMC) || 1
917
918#if defined(CONFIG_USER_ONLY)
919 if (p->flags & PAGE_WRITE) {
920 target_ulong addr;
921 PageDesc *p2;
922 int prot;
923
924 /* force the host page as non writable (writes will have a
925 page fault + mprotect overhead) */
926 page_addr &= qemu_host_page_mask;
927 prot = 0;
928 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
929 addr += TARGET_PAGE_SIZE) {
930
931 p2 = page_find (addr >> TARGET_PAGE_BITS);
932 if (!p2)
933 continue;
934 prot |= p2->flags;
935 p2->flags &= ~PAGE_WRITE;
936 page_get_flags(addr);
937 }
938 mprotect(g2h(page_addr), qemu_host_page_size,
939 (prot & PAGE_BITS) & ~PAGE_WRITE);
940#ifdef DEBUG_TB_INVALIDATE
941 printf("protecting code page: 0x%08lx\n",
942 page_addr);
943#endif
944 }
945#else
946 /* if some code is already present, then the pages are already
947 protected. So we handle the case where only the first TB is
948 allocated in a physical page */
949 if (!last_first_tb) {
950 tlb_protect_code(page_addr);
951 }
952#endif
953
954#endif /* TARGET_HAS_SMC */
955}
956
957/* Allocate a new translation block. Flush the translation buffer if
958 too many translation blocks or too much generated code. */
959TranslationBlock *tb_alloc(target_ulong pc)
960{
961 TranslationBlock *tb;
962
963 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
964 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
965 return NULL;
966 tb = &tbs[nb_tbs++];
967 tb->pc = pc;
968 tb->cflags = 0;
969 return tb;
970}
971
972/* add a new TB and link it to the physical page tables. phys_page2 is
973 (-1) to indicate that only one page contains the TB. */
974void tb_link_phys(TranslationBlock *tb,
975 target_ulong phys_pc, target_ulong phys_page2)
976{
977 unsigned int h;
978 TranslationBlock **ptb;
979
980 /* add in the physical hash table */
981 h = tb_phys_hash_func(phys_pc);
982 ptb = &tb_phys_hash[h];
983 tb->phys_hash_next = *ptb;
984 *ptb = tb;
985
986 /* add in the page list */
987 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
988 if (phys_page2 != -1)
989 tb_alloc_page(tb, 1, phys_page2);
990 else
991 tb->page_addr[1] = -1;
992
993 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
994 tb->jmp_next[0] = NULL;
995 tb->jmp_next[1] = NULL;
996#ifdef USE_CODE_COPY
997 tb->cflags &= ~CF_FP_USED;
998 if (tb->cflags & CF_TB_FP_USED)
999 tb->cflags |= CF_FP_USED;
1000#endif
1001
1002 /* init original jump addresses */
1003 if (tb->tb_next_offset[0] != 0xffff)
1004 tb_reset_jump(tb, 0);
1005 if (tb->tb_next_offset[1] != 0xffff)
1006 tb_reset_jump(tb, 1);
1007
1008#ifdef DEBUG_TB_CHECK
1009 tb_page_check();
1010#endif
1011}
1012
1013/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1014 tb[1].tc_ptr. Return NULL if not found */
1015TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1016{
1017 int m_min, m_max, m;
1018 unsigned long v;
1019 TranslationBlock *tb;
1020
1021 if (nb_tbs <= 0)
1022 return NULL;
1023 if (tc_ptr < (unsigned long)code_gen_buffer ||
1024 tc_ptr >= (unsigned long)code_gen_ptr)
1025 return NULL;
1026 /* binary search (cf Knuth) */
1027 m_min = 0;
1028 m_max = nb_tbs - 1;
1029 while (m_min <= m_max) {
1030 m = (m_min + m_max) >> 1;
1031 tb = &tbs[m];
1032 v = (unsigned long)tb->tc_ptr;
1033 if (v == tc_ptr)
1034 return tb;
1035 else if (tc_ptr < v) {
1036 m_max = m - 1;
1037 } else {
1038 m_min = m + 1;
1039 }
1040 }
1041 return &tbs[m_max];
1042}
1043
1044static void tb_reset_jump_recursive(TranslationBlock *tb);
1045
1046static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1047{
1048 TranslationBlock *tb1, *tb_next, **ptb;
1049 unsigned int n1;
1050
1051 tb1 = tb->jmp_next[n];
1052 if (tb1 != NULL) {
1053 /* find head of list */
1054 for(;;) {
1055 n1 = (long)tb1 & 3;
1056 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1057 if (n1 == 2)
1058 break;
1059 tb1 = tb1->jmp_next[n1];
1060 }
1061 /* we are now sure now that tb jumps to tb1 */
1062 tb_next = tb1;
1063
1064 /* remove tb from the jmp_first list */
1065 ptb = &tb_next->jmp_first;
1066 for(;;) {
1067 tb1 = *ptb;
1068 n1 = (long)tb1 & 3;
1069 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1070 if (n1 == n && tb1 == tb)
1071 break;
1072 ptb = &tb1->jmp_next[n1];
1073 }
1074 *ptb = tb->jmp_next[n];
1075 tb->jmp_next[n] = NULL;
1076
1077 /* suppress the jump to next tb in generated code */
1078 tb_reset_jump(tb, n);
1079
1080 /* suppress jumps in the tb on which we could have jumped */
1081 tb_reset_jump_recursive(tb_next);
1082 }
1083}
1084
1085static void tb_reset_jump_recursive(TranslationBlock *tb)
1086{
1087 tb_reset_jump_recursive2(tb, 0);
1088 tb_reset_jump_recursive2(tb, 1);
1089}
1090
1091#if defined(TARGET_HAS_ICE)
1092static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1093{
1094 target_ulong addr, pd;
1095 ram_addr_t ram_addr;
1096 PhysPageDesc *p;
1097
1098 addr = cpu_get_phys_page_debug(env, pc);
1099 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1100 if (!p) {
1101 pd = IO_MEM_UNASSIGNED;
1102 } else {
1103 pd = p->phys_offset;
1104 }
1105 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1106 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1107}
1108#endif
1109
1110/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1111 breakpoint is reached */
1112int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1113{
1114#if defined(TARGET_HAS_ICE)
1115 int i;
1116
1117 for(i = 0; i < env->nb_breakpoints; i++) {
1118 if (env->breakpoints[i] == pc)
1119 return 0;
1120 }
1121
1122 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1123 return -1;
1124 env->breakpoints[env->nb_breakpoints++] = pc;
1125
1126 breakpoint_invalidate(env, pc);
1127 return 0;
1128#else
1129 return -1;
1130#endif
1131}
1132
1133/* remove a breakpoint */
1134int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1135{
1136#if defined(TARGET_HAS_ICE)
1137 int i;
1138 for(i = 0; i < env->nb_breakpoints; i++) {
1139 if (env->breakpoints[i] == pc)
1140 goto found;
1141 }
1142 return -1;
1143 found:
1144 env->nb_breakpoints--;
1145 if (i < env->nb_breakpoints)
1146 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1147
1148 breakpoint_invalidate(env, pc);
1149 return 0;
1150#else
1151 return -1;
1152#endif
1153}
1154
1155/* enable or disable single step mode. EXCP_DEBUG is returned by the
1156 CPU loop after each instruction */
1157void cpu_single_step(CPUState *env, int enabled)
1158{
1159#if defined(TARGET_HAS_ICE)
1160 if (env->singlestep_enabled != enabled) {
1161 env->singlestep_enabled = enabled;
1162 /* must flush all the translated code to avoid inconsistancies */
1163 /* XXX: only flush what is necessary */
1164 tb_flush(env);
1165 }
1166#endif
1167}
1168
1169#ifndef VBOX
1170/* enable or disable low levels log */
1171void cpu_set_log(int log_flags)
1172{
1173 loglevel = log_flags;
1174 if (loglevel && !logfile) {
1175 logfile = fopen(logfilename, "w");
1176 if (!logfile) {
1177 perror(logfilename);
1178 _exit(1);
1179 }
1180#if !defined(CONFIG_SOFTMMU)
1181 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1182 {
1183 static uint8_t logfile_buf[4096];
1184 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1185 }
1186#else
1187 setvbuf(logfile, NULL, _IOLBF, 0);
1188#endif
1189 }
1190}
1191
1192void cpu_set_log_filename(const char *filename)
1193{
1194 logfilename = strdup(filename);
1195}
1196#endif /* !VBOX */
1197
1198/* mask must never be zero, except for A20 change call */
1199void cpu_interrupt(CPUState *env, int mask)
1200{
1201 TranslationBlock *tb;
1202 static int interrupt_lock;
1203
1204#ifdef VBOX
1205 VM_ASSERT_EMT(env->pVM);
1206 ASMAtomicOrS32(&env->interrupt_request, mask);
1207#else /* !VBOX */
1208 env->interrupt_request |= mask;
1209#endif /* !VBOX */
1210 /* if the cpu is currently executing code, we must unlink it and
1211 all the potentially executing TB */
1212 tb = env->current_tb;
1213 if (tb && !testandset(&interrupt_lock)) {
1214 env->current_tb = NULL;
1215 tb_reset_jump_recursive(tb);
1216 interrupt_lock = 0;
1217 }
1218}
1219
1220void cpu_reset_interrupt(CPUState *env, int mask)
1221{
1222#ifdef VBOX
1223 /*
1224 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1225 * for future changes!
1226 */
1227 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1228#else /* !VBOX */
1229 env->interrupt_request &= ~mask;
1230#endif /* !VBOX */
1231}
1232
1233#ifndef VBOX
1234CPULogItem cpu_log_items[] = {
1235 { CPU_LOG_TB_OUT_ASM, "out_asm",
1236 "show generated host assembly code for each compiled TB" },
1237 { CPU_LOG_TB_IN_ASM, "in_asm",
1238 "show target assembly code for each compiled TB" },
1239 { CPU_LOG_TB_OP, "op",
1240 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1241#ifdef TARGET_I386
1242 { CPU_LOG_TB_OP_OPT, "op_opt",
1243 "show micro ops after optimization for each compiled TB" },
1244#endif
1245 { CPU_LOG_INT, "int",
1246 "show interrupts/exceptions in short format" },
1247 { CPU_LOG_EXEC, "exec",
1248 "show trace before each executed TB (lots of logs)" },
1249 { CPU_LOG_TB_CPU, "cpu",
1250 "show CPU state before bloc translation" },
1251#ifdef TARGET_I386
1252 { CPU_LOG_PCALL, "pcall",
1253 "show protected mode far calls/returns/exceptions" },
1254#endif
1255#ifdef DEBUG_IOPORT
1256 { CPU_LOG_IOPORT, "ioport",
1257 "show all i/o ports accesses" },
1258#endif
1259 { 0, NULL, NULL },
1260};
1261
1262static int cmp1(const char *s1, int n, const char *s2)
1263{
1264 if (strlen(s2) != n)
1265 return 0;
1266 return memcmp(s1, s2, n) == 0;
1267}
1268
1269/* takes a comma separated list of log masks. Return 0 if error. */
1270int cpu_str_to_log_mask(const char *str)
1271{
1272 CPULogItem *item;
1273 int mask;
1274 const char *p, *p1;
1275
1276 p = str;
1277 mask = 0;
1278 for(;;) {
1279 p1 = strchr(p, ',');
1280 if (!p1)
1281 p1 = p + strlen(p);
1282 if(cmp1(p,p1-p,"all")) {
1283 for(item = cpu_log_items; item->mask != 0; item++) {
1284 mask |= item->mask;
1285 }
1286 } else {
1287 for(item = cpu_log_items; item->mask != 0; item++) {
1288 if (cmp1(p, p1 - p, item->name))
1289 goto found;
1290 }
1291 return 0;
1292 }
1293 found:
1294 mask |= item->mask;
1295 if (*p1 != ',')
1296 break;
1297 p = p1 + 1;
1298 }
1299 return mask;
1300}
1301#endif /* !VBOX */
1302
1303#if !defined(VBOX) /* VBOX: we have our own routine. */
1304void cpu_abort(CPUState *env, const char *fmt, ...)
1305{
1306 va_list ap;
1307
1308 va_start(ap, fmt);
1309 fprintf(stderr, "qemu: fatal: ");
1310 vfprintf(stderr, fmt, ap);
1311 fprintf(stderr, "\n");
1312#ifdef TARGET_I386
1313 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1314#else
1315 cpu_dump_state(env, stderr, fprintf, 0);
1316#endif
1317 va_end(ap);
1318 abort();
1319}
1320#endif /* !VBOX */
1321
1322#if !defined(CONFIG_USER_ONLY)
1323
1324/* NOTE: if flush_global is true, also flush global entries (not
1325 implemented yet) */
1326void tlb_flush(CPUState *env, int flush_global)
1327{
1328 int i;
1329
1330#if defined(DEBUG_TLB)
1331 printf("tlb_flush:\n");
1332#endif
1333 /* must reset current TB so that interrupts cannot modify the
1334 links while we are modifying them */
1335 env->current_tb = NULL;
1336
1337 for(i = 0; i < CPU_TLB_SIZE; i++) {
1338 env->tlb_table[0][i].addr_read = -1;
1339 env->tlb_table[0][i].addr_write = -1;
1340 env->tlb_table[0][i].addr_code = -1;
1341 env->tlb_table[1][i].addr_read = -1;
1342 env->tlb_table[1][i].addr_write = -1;
1343 env->tlb_table[1][i].addr_code = -1;
1344 }
1345
1346 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1347
1348#if !defined(CONFIG_SOFTMMU)
1349 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1350#endif
1351#ifdef VBOX
1352 /* inform raw mode about TLB flush */
1353 remR3FlushTLB(env, flush_global);
1354#endif
1355#ifdef USE_KQEMU
1356 if (env->kqemu_enabled) {
1357 kqemu_flush(env, flush_global);
1358 }
1359#endif
1360 tlb_flush_count++;
1361}
1362
1363static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1364{
1365 if (addr == (tlb_entry->addr_read &
1366 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1367 addr == (tlb_entry->addr_write &
1368 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1369 addr == (tlb_entry->addr_code &
1370 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1371 tlb_entry->addr_read = -1;
1372 tlb_entry->addr_write = -1;
1373 tlb_entry->addr_code = -1;
1374 }
1375}
1376
1377void tlb_flush_page(CPUState *env, target_ulong addr)
1378{
1379 int i;
1380 TranslationBlock *tb;
1381
1382#if defined(DEBUG_TLB)
1383 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1384#endif
1385 /* must reset current TB so that interrupts cannot modify the
1386 links while we are modifying them */
1387 env->current_tb = NULL;
1388
1389 addr &= TARGET_PAGE_MASK;
1390 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1391 tlb_flush_entry(&env->tlb_table[0][i], addr);
1392 tlb_flush_entry(&env->tlb_table[1][i], addr);
1393
1394 /* Discard jump cache entries for any tb which might potentially
1395 overlap the flushed page. */
1396 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1397 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1398
1399 i = tb_jmp_cache_hash_page(addr);
1400 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1401
1402#if !defined(CONFIG_SOFTMMU)
1403 if (addr < MMAP_AREA_END)
1404 munmap((void *)addr, TARGET_PAGE_SIZE);
1405#endif
1406#ifdef VBOX
1407 /* inform raw mode about TLB page flush */
1408 remR3FlushPage(env, addr);
1409#endif /* VBOX */
1410#ifdef USE_KQEMU
1411 if (env->kqemu_enabled) {
1412 kqemu_flush_page(env, addr);
1413 }
1414#endif
1415}
1416
1417/* update the TLBs so that writes to code in the virtual page 'addr'
1418 can be detected */
1419static void tlb_protect_code(ram_addr_t ram_addr)
1420{
1421 cpu_physical_memory_reset_dirty(ram_addr,
1422 ram_addr + TARGET_PAGE_SIZE,
1423 CODE_DIRTY_FLAG);
1424#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1425 /** @todo Retest this? This function has changed... */
1426 remR3ProtectCode(cpu_single_env, ram_addr);
1427#endif
1428}
1429
1430/* update the TLB so that writes in physical page 'phys_addr' are no longer
1431 tested for self modifying code */
1432static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1433 target_ulong vaddr)
1434{
1435#ifdef VBOX
1436 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1437#endif
1438 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1439}
1440
1441static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1442 unsigned long start, unsigned long length)
1443{
1444 unsigned long addr;
1445 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1446 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1447 if ((addr - start) < length) {
1448 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1449 }
1450 }
1451}
1452
1453void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1454 int dirty_flags)
1455{
1456 CPUState *env;
1457 unsigned long length, start1;
1458 int i, mask, len;
1459 uint8_t *p;
1460
1461 start &= TARGET_PAGE_MASK;
1462 end = TARGET_PAGE_ALIGN(end);
1463
1464 length = end - start;
1465 if (length == 0)
1466 return;
1467 len = length >> TARGET_PAGE_BITS;
1468#ifdef USE_KQEMU
1469 /* XXX: should not depend on cpu context */
1470 env = first_cpu;
1471 if (env->kqemu_enabled) {
1472 ram_addr_t addr;
1473 addr = start;
1474 for(i = 0; i < len; i++) {
1475 kqemu_set_notdirty(env, addr);
1476 addr += TARGET_PAGE_SIZE;
1477 }
1478 }
1479#endif
1480 mask = ~dirty_flags;
1481 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1482#ifdef VBOX
1483 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1484#endif
1485 for(i = 0; i < len; i++)
1486 p[i] &= mask;
1487
1488 /* we modify the TLB cache so that the dirty bit will be set again
1489 when accessing the range */
1490 start1 = start + (unsigned long)phys_ram_base;
1491 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1492 for(i = 0; i < CPU_TLB_SIZE; i++)
1493 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1494 for(i = 0; i < CPU_TLB_SIZE; i++)
1495 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1496 }
1497
1498#if !defined(CONFIG_SOFTMMU)
1499#ifdef VBOX /**@todo remove this check */
1500# error "We shouldn't get here..."
1501#endif
1502 /* XXX: this is expensive */
1503 {
1504 VirtPageDesc *p;
1505 int j;
1506 target_ulong addr;
1507
1508 for(i = 0; i < L1_SIZE; i++) {
1509 p = l1_virt_map[i];
1510 if (p) {
1511 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1512 for(j = 0; j < L2_SIZE; j++) {
1513 if (p->valid_tag == virt_valid_tag &&
1514 p->phys_addr >= start && p->phys_addr < end &&
1515 (p->prot & PROT_WRITE)) {
1516 if (addr < MMAP_AREA_END) {
1517 mprotect((void *)addr, TARGET_PAGE_SIZE,
1518 p->prot & ~PROT_WRITE);
1519 }
1520 }
1521 addr += TARGET_PAGE_SIZE;
1522 p++;
1523 }
1524 }
1525 }
1526 }
1527#endif
1528}
1529
1530static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1531{
1532 ram_addr_t ram_addr;
1533
1534 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1535 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1536 tlb_entry->addend - (unsigned long)phys_ram_base;
1537 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1538 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1539 }
1540 }
1541}
1542
1543/* update the TLB according to the current state of the dirty bits */
1544void cpu_tlb_update_dirty(CPUState *env)
1545{
1546 int i;
1547 for(i = 0; i < CPU_TLB_SIZE; i++)
1548 tlb_update_dirty(&env->tlb_table[0][i]);
1549 for(i = 0; i < CPU_TLB_SIZE; i++)
1550 tlb_update_dirty(&env->tlb_table[1][i]);
1551}
1552
1553static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1554 unsigned long start)
1555{
1556 unsigned long addr;
1557 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1558 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1559 if (addr == start) {
1560 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1561 }
1562 }
1563}
1564
1565/* update the TLB corresponding to virtual page vaddr and phys addr
1566 addr so that it is no longer dirty */
1567static inline void tlb_set_dirty(CPUState *env,
1568 unsigned long addr, target_ulong vaddr)
1569{
1570 int i;
1571
1572 addr &= TARGET_PAGE_MASK;
1573 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1574 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1575 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1576}
1577
1578/* add a new TLB entry. At most one entry for a given virtual address
1579 is permitted. Return 0 if OK or 2 if the page could not be mapped
1580 (can only happen in non SOFTMMU mode for I/O pages or pages
1581 conflicting with the host address space). */
1582int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1583 target_phys_addr_t paddr, int prot,
1584 int is_user, int is_softmmu)
1585{
1586 PhysPageDesc *p;
1587 unsigned long pd;
1588 unsigned int index;
1589 target_ulong address;
1590 target_phys_addr_t addend;
1591 int ret;
1592 CPUTLBEntry *te;
1593
1594 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1595 if (!p) {
1596 pd = IO_MEM_UNASSIGNED;
1597 } else {
1598 pd = p->phys_offset;
1599 }
1600#if defined(DEBUG_TLB)
1601 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1602 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1603#endif
1604
1605 ret = 0;
1606#if !defined(CONFIG_SOFTMMU)
1607 if (is_softmmu)
1608#endif
1609 {
1610 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1611 /* IO memory case */
1612 address = vaddr | pd;
1613 addend = paddr;
1614 } else {
1615 /* standard memory */
1616 address = vaddr;
1617 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1618 }
1619
1620 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1621 addend -= vaddr;
1622 te = &env->tlb_table[is_user][index];
1623 te->addend = addend;
1624 if (prot & PAGE_READ) {
1625 te->addr_read = address;
1626 } else {
1627 te->addr_read = -1;
1628 }
1629 if (prot & PAGE_EXEC) {
1630 te->addr_code = address;
1631 } else {
1632 te->addr_code = -1;
1633 }
1634 if (prot & PAGE_WRITE) {
1635 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1636 (pd & IO_MEM_ROMD)) {
1637 /* write access calls the I/O callback */
1638 te->addr_write = vaddr |
1639 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1640 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1641 !cpu_physical_memory_is_dirty(pd)) {
1642 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1643 } else {
1644 te->addr_write = address;
1645 }
1646 } else {
1647 te->addr_write = -1;
1648 }
1649#ifdef VBOX
1650 /* inform raw mode about TLB page change */
1651 /** @todo double check and fix this interface. OLD: remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user); */
1652 remR3SetPage(env, te, te, prot, is_user);
1653#endif
1654 }
1655#if !defined(CONFIG_SOFTMMU)
1656 else {
1657 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1658 /* IO access: no mapping is done as it will be handled by the
1659 soft MMU */
1660 if (!(env->hflags & HF_SOFTMMU_MASK))
1661 ret = 2;
1662 } else {
1663 void *map_addr;
1664
1665 if (vaddr >= MMAP_AREA_END) {
1666 ret = 2;
1667 } else {
1668 if (prot & PROT_WRITE) {
1669 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1670#if defined(TARGET_HAS_SMC) || 1
1671 first_tb ||
1672#endif
1673 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1674 !cpu_physical_memory_is_dirty(pd))) {
1675 /* ROM: we do as if code was inside */
1676 /* if code is present, we only map as read only and save the
1677 original mapping */
1678 VirtPageDesc *vp;
1679
1680 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1681 vp->phys_addr = pd;
1682 vp->prot = prot;
1683 vp->valid_tag = virt_valid_tag;
1684 prot &= ~PAGE_WRITE;
1685 }
1686 }
1687 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1688 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1689 if (map_addr == MAP_FAILED) {
1690 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1691 paddr, vaddr);
1692 }
1693 }
1694 }
1695 }
1696#endif
1697 return ret;
1698}
1699
1700/* called from signal handler: invalidate the code and unprotect the
1701 page. Return TRUE if the fault was succesfully handled. */
1702int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1703{
1704#if !defined(CONFIG_SOFTMMU)
1705 VirtPageDesc *vp;
1706
1707#if defined(DEBUG_TLB)
1708 printf("page_unprotect: addr=0x%08x\n", addr);
1709#endif
1710 addr &= TARGET_PAGE_MASK;
1711
1712 /* if it is not mapped, no need to worry here */
1713 if (addr >= MMAP_AREA_END)
1714 return 0;
1715 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1716 if (!vp)
1717 return 0;
1718 /* NOTE: in this case, validate_tag is _not_ tested as it
1719 validates only the code TLB */
1720 if (vp->valid_tag != virt_valid_tag)
1721 return 0;
1722 if (!(vp->prot & PAGE_WRITE))
1723 return 0;
1724#if defined(DEBUG_TLB)
1725 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1726 addr, vp->phys_addr, vp->prot);
1727#endif
1728 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1729 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1730 (unsigned long)addr, vp->prot);
1731 /* set the dirty bit */
1732#ifdef VBOX
1733 if (RT_LIKELY((vp->phys_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1734#endif
1735 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1736 /* flush the code inside */
1737 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1738 return 1;
1739#elif defined(VBOX)
1740 addr &= TARGET_PAGE_MASK;
1741
1742 /* if it is not mapped, no need to worry here */
1743 if (addr >= MMAP_AREA_END)
1744 return 0;
1745 return 1;
1746#else
1747 return 0;
1748#endif
1749}
1750
1751#else
1752
1753void tlb_flush(CPUState *env, int flush_global)
1754{
1755}
1756
1757void tlb_flush_page(CPUState *env, target_ulong addr)
1758{
1759}
1760
1761int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1762 target_phys_addr_t paddr, int prot,
1763 int is_user, int is_softmmu)
1764{
1765 return 0;
1766}
1767
1768#ifndef VBOX
1769/* dump memory mappings */
1770void page_dump(FILE *f)
1771{
1772 unsigned long start, end;
1773 int i, j, prot, prot1;
1774 PageDesc *p;
1775
1776 fprintf(f, "%-8s %-8s %-8s %s\n",
1777 "start", "end", "size", "prot");
1778 start = -1;
1779 end = -1;
1780 prot = 0;
1781 for(i = 0; i <= L1_SIZE; i++) {
1782 if (i < L1_SIZE)
1783 p = l1_map[i];
1784 else
1785 p = NULL;
1786 for(j = 0;j < L2_SIZE; j++) {
1787 if (!p)
1788 prot1 = 0;
1789 else
1790 prot1 = p[j].flags;
1791 if (prot1 != prot) {
1792 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1793 if (start != -1) {
1794 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1795 start, end, end - start,
1796 prot & PAGE_READ ? 'r' : '-',
1797 prot & PAGE_WRITE ? 'w' : '-',
1798 prot & PAGE_EXEC ? 'x' : '-');
1799 }
1800 if (prot1 != 0)
1801 start = end;
1802 else
1803 start = -1;
1804 prot = prot1;
1805 }
1806 if (!p)
1807 break;
1808 }
1809 }
1810}
1811#endif /* !VBOX */
1812
1813int page_get_flags(target_ulong address)
1814{
1815 PageDesc *p;
1816
1817 p = page_find(address >> TARGET_PAGE_BITS);
1818 if (!p)
1819 return 0;
1820 return p->flags;
1821}
1822
1823/* modify the flags of a page and invalidate the code if
1824 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1825 depending on PAGE_WRITE */
1826void page_set_flags(target_ulong start, target_ulong end, int flags)
1827{
1828 PageDesc *p;
1829 target_ulong addr;
1830
1831 start = start & TARGET_PAGE_MASK;
1832 end = TARGET_PAGE_ALIGN(end);
1833 if (flags & PAGE_WRITE)
1834 flags |= PAGE_WRITE_ORG;
1835#if defined(VBOX)
1836 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1837#endif
1838 spin_lock(&tb_lock);
1839 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1840 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1841 /* if the write protection is set, then we invalidate the code
1842 inside */
1843 if (!(p->flags & PAGE_WRITE) &&
1844 (flags & PAGE_WRITE) &&
1845 p->first_tb) {
1846 tb_invalidate_phys_page(addr, 0, NULL);
1847 }
1848 p->flags = flags;
1849 }
1850 spin_unlock(&tb_lock);
1851}
1852
1853/* called from signal handler: invalidate the code and unprotect the
1854 page. Return TRUE if the fault was succesfully handled. */
1855int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1856{
1857 unsigned int page_index, prot, pindex;
1858 PageDesc *p, *p1;
1859 target_ulong host_start, host_end, addr;
1860
1861 host_start = address & qemu_host_page_mask;
1862 page_index = host_start >> TARGET_PAGE_BITS;
1863 p1 = page_find(page_index);
1864 if (!p1)
1865 return 0;
1866 host_end = host_start + qemu_host_page_size;
1867 p = p1;
1868 prot = 0;
1869 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1870 prot |= p->flags;
1871 p++;
1872 }
1873 /* if the page was really writable, then we change its
1874 protection back to writable */
1875 if (prot & PAGE_WRITE_ORG) {
1876 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1877 if (!(p1[pindex].flags & PAGE_WRITE)) {
1878 mprotect((void *)g2h(host_start), qemu_host_page_size,
1879 (prot & PAGE_BITS) | PAGE_WRITE);
1880 p1[pindex].flags |= PAGE_WRITE;
1881 /* and since the content will be modified, we must invalidate
1882 the corresponding translated code. */
1883 tb_invalidate_phys_page(address, pc, puc);
1884#ifdef DEBUG_TB_CHECK
1885 tb_invalidate_check(address);
1886#endif
1887 return 1;
1888 }
1889 }
1890 return 0;
1891}
1892
1893/* call this function when system calls directly modify a memory area */
1894/* ??? This should be redundant now we have lock_user. */
1895void page_unprotect_range(target_ulong data, target_ulong data_size)
1896{
1897 target_ulong start, end, addr;
1898
1899 start = data;
1900 end = start + data_size;
1901 start &= TARGET_PAGE_MASK;
1902 end = TARGET_PAGE_ALIGN(end);
1903 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1904 page_unprotect(addr, 0, NULL);
1905 }
1906}
1907
1908static inline void tlb_set_dirty(CPUState *env,
1909 unsigned long addr, target_ulong vaddr)
1910{
1911}
1912#endif /* defined(CONFIG_USER_ONLY) */
1913
1914/* register physical memory. 'size' must be a multiple of the target
1915 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1916 io memory page */
1917void cpu_register_physical_memory(target_phys_addr_t start_addr,
1918 unsigned long size,
1919 unsigned long phys_offset)
1920{
1921 target_phys_addr_t addr, end_addr;
1922 PhysPageDesc *p;
1923 CPUState *env;
1924
1925 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1926 end_addr = start_addr + size;
1927 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1928 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1929 p->phys_offset = phys_offset;
1930 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1931 (phys_offset & IO_MEM_ROMD))
1932 phys_offset += TARGET_PAGE_SIZE;
1933 }
1934
1935 /* since each CPU stores ram addresses in its TLB cache, we must
1936 reset the modified entries */
1937 /* XXX: slow ! */
1938 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1939 tlb_flush(env, 1);
1940 }
1941}
1942
1943/* XXX: temporary until new memory mapping API */
1944uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1945{
1946 PhysPageDesc *p;
1947
1948 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1949 if (!p)
1950 return IO_MEM_UNASSIGNED;
1951 return p->phys_offset;
1952}
1953
1954static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1955{
1956#ifdef DEBUG_UNASSIGNED
1957 printf("Unassigned mem read 0x%08x\n", (int)addr);
1958#endif
1959 return 0;
1960}
1961
1962static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1963{
1964#ifdef DEBUG_UNASSIGNED
1965 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1966#endif
1967}
1968
1969static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1970 unassigned_mem_readb,
1971 unassigned_mem_readb,
1972 unassigned_mem_readb,
1973};
1974
1975static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1976 unassigned_mem_writeb,
1977 unassigned_mem_writeb,
1978 unassigned_mem_writeb,
1979};
1980
1981static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1982{
1983 unsigned long ram_addr;
1984 int dirty_flags;
1985 ram_addr = addr - (unsigned long)phys_ram_base;
1986#ifdef VBOX
1987 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1988 dirty_flags = 0xff;
1989 else
1990#endif /* VBOX */
1991 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1992 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1993#if !defined(CONFIG_USER_ONLY)
1994 tb_invalidate_phys_page_fast(ram_addr, 1);
1995# ifdef VBOX
1996 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1997 dirty_flags = 0xff;
1998 else
1999# endif /* VBOX */
2000 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2001#endif
2002 }
2003 stb_p((uint8_t *)(long)addr, val);
2004#ifdef USE_KQEMU
2005 if (cpu_single_env->kqemu_enabled &&
2006 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2007 kqemu_modify_page(cpu_single_env, ram_addr);
2008#endif
2009 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2010#ifdef VBOX
2011 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2012#endif /* !VBOX */
2013 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2014 /* we remove the notdirty callback only if the code has been
2015 flushed */
2016 if (dirty_flags == 0xff)
2017 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2018}
2019
2020static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2021{
2022 unsigned long ram_addr;
2023 int dirty_flags;
2024 ram_addr = addr - (unsigned long)phys_ram_base;
2025#ifdef VBOX
2026 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2027 dirty_flags = 0xff;
2028 else
2029#endif /* VBOX */
2030 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2031 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2032#if !defined(CONFIG_USER_ONLY)
2033 tb_invalidate_phys_page_fast(ram_addr, 2);
2034# ifdef VBOX
2035 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2036 dirty_flags = 0xff;
2037 else
2038# endif /* VBOX */
2039 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2040#endif
2041 }
2042 stw_p((uint8_t *)(long)addr, val);
2043#ifdef USE_KQEMU
2044 if (cpu_single_env->kqemu_enabled &&
2045 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2046 kqemu_modify_page(cpu_single_env, ram_addr);
2047#endif
2048 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2049#ifdef VBOX
2050 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2051#endif
2052 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2053 /* we remove the notdirty callback only if the code has been
2054 flushed */
2055 if (dirty_flags == 0xff)
2056 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2057}
2058
2059static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2060{
2061 unsigned long ram_addr;
2062 int dirty_flags;
2063 ram_addr = addr - (unsigned long)phys_ram_base;
2064#ifdef VBOX
2065 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2066 dirty_flags = 0xff;
2067 else
2068#endif /* VBOX */
2069 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2070 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2071#if !defined(CONFIG_USER_ONLY)
2072 tb_invalidate_phys_page_fast(ram_addr, 4);
2073# ifdef VBOX
2074 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2075 dirty_flags = 0xff;
2076 else
2077# endif /* VBOX */
2078 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2079#endif
2080 }
2081 stl_p((uint8_t *)(long)addr, val);
2082#ifdef USE_KQEMU
2083 if (cpu_single_env->kqemu_enabled &&
2084 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2085 kqemu_modify_page(cpu_single_env, ram_addr);
2086#endif
2087 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2088#ifdef VBOX
2089 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2090#endif
2091 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2092 /* we remove the notdirty callback only if the code has been
2093 flushed */
2094 if (dirty_flags == 0xff)
2095 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2096}
2097
2098static CPUReadMemoryFunc *error_mem_read[3] = {
2099 NULL, /* never used */
2100 NULL, /* never used */
2101 NULL, /* never used */
2102};
2103
2104static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2105 notdirty_mem_writeb,
2106 notdirty_mem_writew,
2107 notdirty_mem_writel,
2108};
2109
2110static void io_mem_init(void)
2111{
2112 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2113 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2114 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2115 io_mem_nb = 5;
2116
2117#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2118 /* alloc dirty bits array */
2119 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2120 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2121#endif /* !VBOX */
2122}
2123
2124/* mem_read and mem_write are arrays of functions containing the
2125 function to access byte (index 0), word (index 1) and dword (index
2126 2). All functions must be supplied. If io_index is non zero, the
2127 corresponding io zone is modified. If it is zero, a new io zone is
2128 allocated. The return value can be used with
2129 cpu_register_physical_memory(). (-1) is returned if error. */
2130int cpu_register_io_memory(int io_index,
2131 CPUReadMemoryFunc **mem_read,
2132 CPUWriteMemoryFunc **mem_write,
2133 void *opaque)
2134{
2135 int i;
2136
2137 if (io_index <= 0) {
2138 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2139 return -1;
2140 io_index = io_mem_nb++;
2141 } else {
2142 if (io_index >= IO_MEM_NB_ENTRIES)
2143 return -1;
2144 }
2145
2146 for(i = 0;i < 3; i++) {
2147 io_mem_read[io_index][i] = mem_read[i];
2148 io_mem_write[io_index][i] = mem_write[i];
2149 }
2150 io_mem_opaque[io_index] = opaque;
2151 return io_index << IO_MEM_SHIFT;
2152}
2153
2154CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2155{
2156 return io_mem_write[io_index >> IO_MEM_SHIFT];
2157}
2158
2159CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2160{
2161 return io_mem_read[io_index >> IO_MEM_SHIFT];
2162}
2163
2164/* physical memory access (slow version, mainly for debug) */
2165#if defined(CONFIG_USER_ONLY)
2166void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2167 int len, int is_write)
2168{
2169 int l, flags;
2170 target_ulong page;
2171 void * p;
2172
2173 while (len > 0) {
2174 page = addr & TARGET_PAGE_MASK;
2175 l = (page + TARGET_PAGE_SIZE) - addr;
2176 if (l > len)
2177 l = len;
2178 flags = page_get_flags(page);
2179 if (!(flags & PAGE_VALID))
2180 return;
2181 if (is_write) {
2182 if (!(flags & PAGE_WRITE))
2183 return;
2184 p = lock_user(addr, len, 0);
2185 memcpy(p, buf, len);
2186 unlock_user(p, addr, len);
2187 } else {
2188 if (!(flags & PAGE_READ))
2189 return;
2190 p = lock_user(addr, len, 1);
2191 memcpy(buf, p, len);
2192 unlock_user(p, addr, 0);
2193 }
2194 len -= l;
2195 buf += l;
2196 addr += l;
2197 }
2198}
2199
2200#else
2201void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2202 int len, int is_write)
2203{
2204 int l, io_index;
2205 uint8_t *ptr;
2206 uint32_t val;
2207 target_phys_addr_t page;
2208 unsigned long pd;
2209 PhysPageDesc *p;
2210
2211 while (len > 0) {
2212 page = addr & TARGET_PAGE_MASK;
2213 l = (page + TARGET_PAGE_SIZE) - addr;
2214 if (l > len)
2215 l = len;
2216 p = phys_page_find(page >> TARGET_PAGE_BITS);
2217 if (!p) {
2218 pd = IO_MEM_UNASSIGNED;
2219 } else {
2220 pd = p->phys_offset;
2221 }
2222
2223 if (is_write) {
2224 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2225 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2226 /* XXX: could force cpu_single_env to NULL to avoid
2227 potential bugs */
2228 if (l >= 4 && ((addr & 3) == 0)) {
2229 /* 32 bit write access */
2230 val = ldl_p(buf);
2231 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2232 l = 4;
2233 } else if (l >= 2 && ((addr & 1) == 0)) {
2234 /* 16 bit write access */
2235 val = lduw_p(buf);
2236 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2237 l = 2;
2238 } else {
2239 /* 8 bit write access */
2240 val = ldub_p(buf);
2241 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2242 l = 1;
2243 }
2244 } else {
2245 unsigned long addr1;
2246 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2247 /* RAM case */
2248 ptr = phys_ram_base + addr1;
2249#ifdef VBOX
2250 remR3PhysWrite(ptr, buf, l);
2251#else
2252 memcpy(ptr, buf, l);
2253#endif
2254 if (!cpu_physical_memory_is_dirty(addr1)) {
2255 /* invalidate code */
2256 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2257 /* set dirty bit */
2258#ifdef VBOX
2259 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2260#endif
2261 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2262 (0xff & ~CODE_DIRTY_FLAG);
2263 }
2264 }
2265 } else {
2266 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2267 !(pd & IO_MEM_ROMD)) {
2268 /* I/O case */
2269 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2270 if (l >= 4 && ((addr & 3) == 0)) {
2271 /* 32 bit read access */
2272 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2273 stl_p(buf, val);
2274 l = 4;
2275 } else if (l >= 2 && ((addr & 1) == 0)) {
2276 /* 16 bit read access */
2277 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2278 stw_p(buf, val);
2279 l = 2;
2280 } else {
2281 /* 8 bit read access */
2282 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2283 stb_p(buf, val);
2284 l = 1;
2285 }
2286 } else {
2287 /* RAM case */
2288 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2289 (addr & ~TARGET_PAGE_MASK);
2290#ifdef VBOX
2291 remR3PhysRead(ptr, buf, l);
2292#else
2293 memcpy(buf, ptr, l);
2294#endif
2295 }
2296 }
2297 len -= l;
2298 buf += l;
2299 addr += l;
2300 }
2301}
2302
2303/* used for ROM loading : can write in RAM and ROM */
2304void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2305 const uint8_t *buf, int len)
2306{
2307 int l;
2308 uint8_t *ptr;
2309 target_phys_addr_t page;
2310 unsigned long pd;
2311 PhysPageDesc *p;
2312
2313 while (len > 0) {
2314 page = addr & TARGET_PAGE_MASK;
2315 l = (page + TARGET_PAGE_SIZE) - addr;
2316 if (l > len)
2317 l = len;
2318 p = phys_page_find(page >> TARGET_PAGE_BITS);
2319 if (!p) {
2320 pd = IO_MEM_UNASSIGNED;
2321 } else {
2322 pd = p->phys_offset;
2323 }
2324
2325 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2326 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2327 !(pd & IO_MEM_ROMD)) {
2328 /* do nothing */
2329 } else {
2330 unsigned long addr1;
2331 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2332 /* ROM/RAM case */
2333 ptr = phys_ram_base + addr1;
2334 memcpy(ptr, buf, l);
2335 }
2336 len -= l;
2337 buf += l;
2338 addr += l;
2339 }
2340}
2341
2342
2343/* warning: addr must be aligned */
2344uint32_t ldl_phys(target_phys_addr_t addr)
2345{
2346 int io_index;
2347 uint8_t *ptr;
2348 uint32_t val;
2349 unsigned long pd;
2350 PhysPageDesc *p;
2351
2352 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2353 if (!p) {
2354 pd = IO_MEM_UNASSIGNED;
2355 } else {
2356 pd = p->phys_offset;
2357 }
2358
2359 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2360 !(pd & IO_MEM_ROMD)) {
2361 /* I/O case */
2362 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2363 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2364 } else {
2365 /* RAM case */
2366 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2367 (addr & ~TARGET_PAGE_MASK);
2368 val = ldl_p(ptr);
2369 }
2370 return val;
2371}
2372
2373/* warning: addr must be aligned */
2374uint64_t ldq_phys(target_phys_addr_t addr)
2375{
2376 int io_index;
2377 uint8_t *ptr;
2378 uint64_t val;
2379 unsigned long pd;
2380 PhysPageDesc *p;
2381
2382 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2383 if (!p) {
2384 pd = IO_MEM_UNASSIGNED;
2385 } else {
2386 pd = p->phys_offset;
2387 }
2388
2389 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2390 !(pd & IO_MEM_ROMD)) {
2391 /* I/O case */
2392 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2393#ifdef TARGET_WORDS_BIGENDIAN
2394 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2395 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2396#else
2397 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2398 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2399#endif
2400 } else {
2401 /* RAM case */
2402 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2403 (addr & ~TARGET_PAGE_MASK);
2404 val = ldq_p(ptr);
2405 }
2406 return val;
2407}
2408
2409/* XXX: optimize */
2410uint32_t ldub_phys(target_phys_addr_t addr)
2411{
2412 uint8_t val;
2413 cpu_physical_memory_read(addr, &val, 1);
2414 return val;
2415}
2416
2417/* XXX: optimize */
2418uint32_t lduw_phys(target_phys_addr_t addr)
2419{
2420 uint16_t val;
2421 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2422 return tswap16(val);
2423}
2424
2425/* warning: addr must be aligned. The ram page is not masked as dirty
2426 and the code inside is not invalidated. It is useful if the dirty
2427 bits are used to track modified PTEs */
2428void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2429{
2430 int io_index;
2431 uint8_t *ptr;
2432 unsigned long pd;
2433 PhysPageDesc *p;
2434
2435 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2436 if (!p) {
2437 pd = IO_MEM_UNASSIGNED;
2438 } else {
2439 pd = p->phys_offset;
2440 }
2441
2442 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2443 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2444 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2445 } else {
2446 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2447 (addr & ~TARGET_PAGE_MASK);
2448 stl_p(ptr, val);
2449 }
2450}
2451
2452/* warning: addr must be aligned */
2453void stl_phys(target_phys_addr_t addr, uint32_t val)
2454{
2455 int io_index;
2456 uint8_t *ptr;
2457 unsigned long pd;
2458 PhysPageDesc *p;
2459
2460 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2461 if (!p) {
2462 pd = IO_MEM_UNASSIGNED;
2463 } else {
2464 pd = p->phys_offset;
2465 }
2466
2467 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2468 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2469 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2470 } else {
2471 unsigned long addr1;
2472 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2473 /* RAM case */
2474 ptr = phys_ram_base + addr1;
2475 stl_p(ptr, val);
2476 if (!cpu_physical_memory_is_dirty(addr1)) {
2477 /* invalidate code */
2478 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2479 /* set dirty bit */
2480#ifdef VBOX
2481 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2482#endif
2483 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2484 (0xff & ~CODE_DIRTY_FLAG);
2485 }
2486 }
2487}
2488
2489/* XXX: optimize */
2490void stb_phys(target_phys_addr_t addr, uint32_t val)
2491{
2492 uint8_t v = val;
2493 cpu_physical_memory_write(addr, &v, 1);
2494}
2495
2496/* XXX: optimize */
2497void stw_phys(target_phys_addr_t addr, uint32_t val)
2498{
2499 uint16_t v = tswap16(val);
2500 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2501}
2502
2503/* XXX: optimize */
2504void stq_phys(target_phys_addr_t addr, uint64_t val)
2505{
2506 val = tswap64(val);
2507 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2508}
2509
2510#endif
2511
2512/* virtual memory access for debug */
2513int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2514 uint8_t *buf, int len, int is_write)
2515{
2516 int l;
2517 target_ulong page, phys_addr;
2518
2519 while (len > 0) {
2520 page = addr & TARGET_PAGE_MASK;
2521 phys_addr = cpu_get_phys_page_debug(env, page);
2522 /* if no physical page mapped, return an error */
2523 if (phys_addr == -1)
2524 return -1;
2525 l = (page + TARGET_PAGE_SIZE) - addr;
2526 if (l > len)
2527 l = len;
2528 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2529 buf, l, is_write);
2530 len -= l;
2531 buf += l;
2532 addr += l;
2533 }
2534 return 0;
2535}
2536
2537#ifndef VBOX
2538void dump_exec_info(FILE *f,
2539 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2540{
2541 int i, target_code_size, max_target_code_size;
2542 int direct_jmp_count, direct_jmp2_count, cross_page;
2543 TranslationBlock *tb;
2544
2545 target_code_size = 0;
2546 max_target_code_size = 0;
2547 cross_page = 0;
2548 direct_jmp_count = 0;
2549 direct_jmp2_count = 0;
2550 for(i = 0; i < nb_tbs; i++) {
2551 tb = &tbs[i];
2552 target_code_size += tb->size;
2553 if (tb->size > max_target_code_size)
2554 max_target_code_size = tb->size;
2555 if (tb->page_addr[1] != -1)
2556 cross_page++;
2557 if (tb->tb_next_offset[0] != 0xffff) {
2558 direct_jmp_count++;
2559 if (tb->tb_next_offset[1] != 0xffff) {
2560 direct_jmp2_count++;
2561 }
2562 }
2563 }
2564 /* XXX: avoid using doubles ? */
2565 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2566 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2567 nb_tbs ? target_code_size / nb_tbs : 0,
2568 max_target_code_size);
2569 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2570 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2571 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2572 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2573 cross_page,
2574 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2575 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2576 direct_jmp_count,
2577 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2578 direct_jmp2_count,
2579 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2580 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2581 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2582 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2583}
2584#endif /* !VBOX */
2585
2586#if !defined(CONFIG_USER_ONLY)
2587
2588#define MMUSUFFIX _cmmu
2589#define GETPC() NULL
2590#define env cpu_single_env
2591#define SOFTMMU_CODE_ACCESS
2592
2593#define SHIFT 0
2594#include "softmmu_template.h"
2595
2596#define SHIFT 1
2597#include "softmmu_template.h"
2598
2599#define SHIFT 2
2600#include "softmmu_template.h"
2601
2602#define SHIFT 3
2603#include "softmmu_template.h"
2604
2605#undef env
2606
2607#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette