VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 18595

Last change on this file since 18595 was 18595, checked in by vboxsync, 16 years ago

REM: synced over TLB and TB stats from the old code.

  • Property svn:eol-style set to native
File size: 113.6 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140RTGCPHYS phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184# ifdef VBOX /* > 4GB please. */
185#define L1_BITS (TARGET_PHYS_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
186# else
187#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
188# endif
189#endif
190
191#define L1_SIZE (1 << L1_BITS)
192#define L2_SIZE (1 << L2_BITS)
193
194static void io_mem_init(void);
195
196unsigned long qemu_real_host_page_size;
197unsigned long qemu_host_page_bits;
198unsigned long qemu_host_page_size;
199unsigned long qemu_host_page_mask;
200
201/* XXX: for system emulation, it could just be an array */
202static PageDesc *l1_map[L1_SIZE];
203static PhysPageDesc **l1_phys_map;
204
205#if !defined(CONFIG_USER_ONLY)
206static void io_mem_init(void);
207
208/* io memory support */
209CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
210CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
211void *io_mem_opaque[IO_MEM_NB_ENTRIES];
212static int io_mem_nb;
213static int io_mem_watch;
214#endif
215
216#ifndef VBOX
217/* log support */
218static const char *logfilename = "/tmp/qemu.log";
219#endif /* !VBOX */
220FILE *logfile;
221int loglevel;
222#ifndef VBOX
223static int log_append = 0;
224#endif
225
226/* statistics */
227#ifndef VBOX
228static int tlb_flush_count;
229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
232uint32_t tlb_flush_count;
233uint32_t tb_flush_count;
234uint32_t tb_phys_invalidate_count;
235#endif /* VBOX */
236
237#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
238typedef struct subpage_t {
239 target_phys_addr_t base;
240 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
241 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
242 void *opaque[TARGET_PAGE_SIZE][2][4];
243} subpage_t;
244
245
246#ifndef VBOX
247#ifdef _WIN32
248static void map_exec(void *addr, long size)
249{
250 DWORD old_protect;
251 VirtualProtect(addr, size,
252 PAGE_EXECUTE_READWRITE, &old_protect);
253
254}
255#else
256static void map_exec(void *addr, long size)
257{
258 unsigned long start, end, page_size;
259
260 page_size = getpagesize();
261 start = (unsigned long)addr;
262 start &= ~(page_size - 1);
263
264 end = (unsigned long)addr + size;
265 end += page_size - 1;
266 end &= ~(page_size - 1);
267
268 mprotect((void *)start, end - start,
269 PROT_READ | PROT_WRITE | PROT_EXEC);
270}
271#endif
272#else // VBOX
273static void map_exec(void *addr, long size)
274{
275 RTMemProtect(addr, size,
276 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
277}
278#endif
279
280static void page_init(void)
281{
282 /* NOTE: we can always suppose that qemu_host_page_size >=
283 TARGET_PAGE_SIZE */
284#ifdef VBOX
285 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
286 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
287 qemu_real_host_page_size = PAGE_SIZE;
288#else /* !VBOX */
289#ifdef _WIN32
290 {
291 SYSTEM_INFO system_info;
292 DWORD old_protect;
293
294 GetSystemInfo(&system_info);
295 qemu_real_host_page_size = system_info.dwPageSize;
296 }
297#else
298 qemu_real_host_page_size = getpagesize();
299#endif
300#endif /* !VBOX */
301
302 if (qemu_host_page_size == 0)
303 qemu_host_page_size = qemu_real_host_page_size;
304 if (qemu_host_page_size < TARGET_PAGE_SIZE)
305 qemu_host_page_size = TARGET_PAGE_SIZE;
306 qemu_host_page_bits = 0;
307#ifndef VBOX
308 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
309#else
310 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
311#endif
312 qemu_host_page_bits++;
313 qemu_host_page_mask = ~(qemu_host_page_size - 1);
314 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
315 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
316#ifdef VBOX
317 /* We use other means to set reserved bit on our pages */
318#else
319#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
320 {
321 long long startaddr, endaddr;
322 FILE *f;
323 int n;
324
325 mmap_lock();
326 last_brk = (unsigned long)sbrk(0);
327 f = fopen("/proc/self/maps", "r");
328 if (f) {
329 do {
330 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
331 if (n == 2) {
332 startaddr = MIN(startaddr,
333 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
334 endaddr = MIN(endaddr,
335 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
336 page_set_flags(startaddr & TARGET_PAGE_MASK,
337 TARGET_PAGE_ALIGN(endaddr),
338 PAGE_RESERVED);
339 }
340 } while (!feof(f));
341 fclose(f);
342 }
343 mmap_unlock();
344 }
345#endif
346#endif
347}
348
349#ifndef VBOX
350static inline PageDesc **page_l1_map(target_ulong index)
351#else
352DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
353#endif
354{
355#if TARGET_LONG_BITS > 32
356 /* Host memory outside guest VM. For 32-bit targets we have already
357 excluded high addresses. */
358# ifndef VBOX
359 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
360 return NULL;
361# else /* VBOX */
362 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE,
363 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x\n",
364 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE),
365 NULL);
366# endif /* VBOX */
367
368#endif
369 return &l1_map[index >> L2_BITS];
370}
371
372#ifndef VBOX
373static inline PageDesc *page_find_alloc(target_ulong index)
374#else
375DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
376#endif
377{
378 PageDesc **lp, *p;
379 lp = page_l1_map(index);
380 if (!lp)
381 return NULL;
382
383 p = *lp;
384 if (!p) {
385 /* allocate if not found */
386#if defined(CONFIG_USER_ONLY)
387 unsigned long addr;
388 size_t len = sizeof(PageDesc) * L2_SIZE;
389 /* Don't use qemu_malloc because it may recurse. */
390 p = mmap(0, len, PROT_READ | PROT_WRITE,
391 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
392 *lp = p;
393 addr = h2g(p);
394 if (addr == (target_ulong)addr) {
395 page_set_flags(addr & TARGET_PAGE_MASK,
396 TARGET_PAGE_ALIGN(addr + len),
397 PAGE_RESERVED);
398 }
399#else
400 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
401 *lp = p;
402#endif
403 }
404 return p + (index & (L2_SIZE - 1));
405}
406
407#ifndef VBOX
408static inline PageDesc *page_find(target_ulong index)
409#else
410DECLINLINE(PageDesc *) page_find(target_ulong index)
411#endif
412{
413 PageDesc **lp, *p;
414 lp = page_l1_map(index);
415 if (!lp)
416 return NULL;
417
418 p = *lp;
419 if (!p)
420 return 0;
421 return p + (index & (L2_SIZE - 1));
422}
423
424static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
425{
426 void **lp, **p;
427 PhysPageDesc *pd;
428
429 p = (void **)l1_phys_map;
430#if TARGET_PHYS_ADDR_SPACE_BITS > 32
431
432#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
433#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
434#endif
435 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
436 p = *lp;
437 if (!p) {
438 /* allocate if not found */
439 if (!alloc)
440 return NULL;
441 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
442 memset(p, 0, sizeof(void *) * L1_SIZE);
443 *lp = p;
444 }
445#endif
446 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
447 pd = *lp;
448 if (!pd) {
449 int i;
450 /* allocate if not found */
451 if (!alloc)
452 return NULL;
453 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
454 *lp = pd;
455 for (i = 0; i < L2_SIZE; i++)
456 pd[i].phys_offset = IO_MEM_UNASSIGNED;
457 }
458#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
459 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
460 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
461 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
462 return pd;
463#else
464 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
465#endif
466}
467
468#ifndef VBOX
469static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
470#else
471DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
472#endif
473{
474 return phys_page_find_alloc(index, 0);
475}
476
477#if !defined(CONFIG_USER_ONLY)
478static void tlb_protect_code(ram_addr_t ram_addr);
479static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
480 target_ulong vaddr);
481#define mmap_lock() do { } while(0)
482#define mmap_unlock() do { } while(0)
483#endif
484
485#ifdef VBOX
486/*
487 * We don't need such huge codegen buffer size, as execute most of the code
488 * in raw or hwacc mode
489 */
490#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
491#else
492#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
493#endif
494
495#if defined(CONFIG_USER_ONLY)
496/* Currently it is not recommanded to allocate big chunks of data in
497 user mode. It will change when a dedicated libc will be used */
498#define USE_STATIC_CODE_GEN_BUFFER
499#endif
500
501/* VBox allocates codegen buffer dynamically */
502#ifndef VBOX
503#ifdef USE_STATIC_CODE_GEN_BUFFER
504static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
505#endif
506#endif
507
508static void code_gen_alloc(unsigned long tb_size)
509{
510#ifdef USE_STATIC_CODE_GEN_BUFFER
511 code_gen_buffer = static_code_gen_buffer;
512 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
513 map_exec(code_gen_buffer, code_gen_buffer_size);
514#else
515#ifdef VBOX
516 /* We cannot use phys_ram_size here, as it's 0 now,
517 * it only gets initialized once RAM registration callback
518 * (REMR3NotifyPhysRamRegister()) called.
519 */
520 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
521#else
522 code_gen_buffer_size = tb_size;
523 if (code_gen_buffer_size == 0) {
524#if defined(CONFIG_USER_ONLY)
525 /* in user mode, phys_ram_size is not meaningful */
526 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
527#else
528 /* XXX: needs ajustments */
529 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
530#endif
531
532 }
533 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
534 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
535#endif /* VBOX */
536
537 /* The code gen buffer location may have constraints depending on
538 the host cpu and OS */
539#ifdef VBOX
540 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
541
542 if (!code_gen_buffer) {
543 LogRel(("REM: failed allocate codegen buffer %lld\n",
544 code_gen_buffer_size));
545 return;
546 }
547#else //!VBOX
548#if defined(__linux__)
549 {
550 int flags;
551 void *start = NULL;
552
553 flags = MAP_PRIVATE | MAP_ANONYMOUS;
554#if defined(__x86_64__)
555 flags |= MAP_32BIT;
556 /* Cannot map more than that */
557 if (code_gen_buffer_size > (800 * 1024 * 1024))
558 code_gen_buffer_size = (800 * 1024 * 1024);
559#elif defined(__sparc_v9__)
560 // Map the buffer below 2G, so we can use direct calls and branches
561 flags |= MAP_FIXED;
562 start = (void *) 0x60000000UL;
563 if (code_gen_buffer_size > (512 * 1024 * 1024))
564 code_gen_buffer_size = (512 * 1024 * 1024);
565#endif
566 code_gen_buffer = mmap(start, code_gen_buffer_size,
567 PROT_WRITE | PROT_READ | PROT_EXEC,
568 flags, -1, 0);
569 if (code_gen_buffer == MAP_FAILED) {
570 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
571 exit(1);
572 }
573 }
574#elif defined(__FreeBSD__)
575 {
576 int flags;
577 void *addr = NULL;
578 flags = MAP_PRIVATE | MAP_ANONYMOUS;
579#if defined(__x86_64__)
580 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
581 * 0x40000000 is free */
582 flags |= MAP_FIXED;
583 addr = (void *)0x40000000;
584 /* Cannot map more than that */
585 if (code_gen_buffer_size > (800 * 1024 * 1024))
586 code_gen_buffer_size = (800 * 1024 * 1024);
587#endif
588 code_gen_buffer = mmap(addr, code_gen_buffer_size,
589 PROT_WRITE | PROT_READ | PROT_EXEC,
590 flags, -1, 0);
591 if (code_gen_buffer == MAP_FAILED) {
592 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
593 exit(1);
594 }
595 }
596#else
597 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
598 if (!code_gen_buffer) {
599 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
600 exit(1);
601 }
602 map_exec(code_gen_buffer, code_gen_buffer_size);
603#endif
604 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
605#endif /* !VBOX */
606#endif /* !USE_STATIC_CODE_GEN_BUFFER */
607#ifndef VBOX
608 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
609#else
610 map_exec(code_gen_prologue, _1K);
611#endif
612
613 code_gen_buffer_max_size = code_gen_buffer_size -
614 code_gen_max_block_size();
615 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
616 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
617}
618
619/* Must be called before using the QEMU cpus. 'tb_size' is the size
620 (in bytes) allocated to the translation buffer. Zero means default
621 size. */
622void cpu_exec_init_all(unsigned long tb_size)
623{
624 cpu_gen_init();
625 code_gen_alloc(tb_size);
626 code_gen_ptr = code_gen_buffer;
627 page_init();
628#if !defined(CONFIG_USER_ONLY)
629 io_mem_init();
630#endif
631}
632
633#ifndef VBOX
634#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
635
636#define CPU_COMMON_SAVE_VERSION 1
637
638static void cpu_common_save(QEMUFile *f, void *opaque)
639{
640 CPUState *env = opaque;
641
642 qemu_put_be32s(f, &env->halted);
643 qemu_put_be32s(f, &env->interrupt_request);
644}
645
646static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
647{
648 CPUState *env = opaque;
649
650 if (version_id != CPU_COMMON_SAVE_VERSION)
651 return -EINVAL;
652
653 qemu_get_be32s(f, &env->halted);
654 qemu_get_be32s(f, &env->interrupt_request);
655 tlb_flush(env, 1);
656
657 return 0;
658}
659#endif
660#endif //!VBOX
661
662void cpu_exec_init(CPUState *env)
663{
664 CPUState **penv;
665 int cpu_index;
666
667 env->next_cpu = NULL;
668 penv = &first_cpu;
669 cpu_index = 0;
670 while (*penv != NULL) {
671 penv = (CPUState **)&(*penv)->next_cpu;
672 cpu_index++;
673 }
674 env->cpu_index = cpu_index;
675 env->nb_watchpoints = 0;
676 *penv = env;
677#ifndef VBOX
678#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
679 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
680 cpu_common_save, cpu_common_load, env);
681 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
682 cpu_save, cpu_load, env);
683#endif
684#endif // !VBOX
685}
686
687#ifndef VBOX
688static inline void invalidate_page_bitmap(PageDesc *p)
689#else
690DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
691#endif
692{
693 if (p->code_bitmap) {
694 qemu_free(p->code_bitmap);
695 p->code_bitmap = NULL;
696 }
697 p->code_write_count = 0;
698}
699
700/* set to NULL all the 'first_tb' fields in all PageDescs */
701static void page_flush_tb(void)
702{
703 int i, j;
704 PageDesc *p;
705
706 for(i = 0; i < L1_SIZE; i++) {
707 p = l1_map[i];
708 if (p) {
709 for(j = 0; j < L2_SIZE; j++) {
710 p->first_tb = NULL;
711 invalidate_page_bitmap(p);
712 p++;
713 }
714 }
715 }
716}
717
718/* flush all the translation blocks */
719/* XXX: tb_flush is currently not thread safe */
720void tb_flush(CPUState *env1)
721{
722 CPUState *env;
723#if defined(DEBUG_FLUSH)
724 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
725 (unsigned long)(code_gen_ptr - code_gen_buffer),
726 nb_tbs, nb_tbs > 0 ?
727 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
728#endif
729 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
730 cpu_abort(env1, "Internal error: code buffer overflow\n");
731
732 nb_tbs = 0;
733
734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
735 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
736 }
737
738 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
739 page_flush_tb();
740
741 code_gen_ptr = code_gen_buffer;
742 /* XXX: flush processor icache at this point if cache flush is
743 expensive */
744 tb_flush_count++;
745}
746
747#ifdef DEBUG_TB_CHECK
748static void tb_invalidate_check(target_ulong address)
749{
750 TranslationBlock *tb;
751 int i;
752 address &= TARGET_PAGE_MASK;
753 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
754 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
755 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
756 address >= tb->pc + tb->size)) {
757 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
758 address, (long)tb->pc, tb->size);
759 }
760 }
761 }
762}
763
764/* verify that all the pages have correct rights for code */
765static void tb_page_check(void)
766{
767 TranslationBlock *tb;
768 int i, flags1, flags2;
769
770 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
771 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
772 flags1 = page_get_flags(tb->pc);
773 flags2 = page_get_flags(tb->pc + tb->size - 1);
774 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
775 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
776 (long)tb->pc, tb->size, flags1, flags2);
777 }
778 }
779 }
780}
781
782static void tb_jmp_check(TranslationBlock *tb)
783{
784 TranslationBlock *tb1;
785 unsigned int n1;
786
787 /* suppress any remaining jumps to this TB */
788 tb1 = tb->jmp_first;
789 for(;;) {
790 n1 = (long)tb1 & 3;
791 tb1 = (TranslationBlock *)((long)tb1 & ~3);
792 if (n1 == 2)
793 break;
794 tb1 = tb1->jmp_next[n1];
795 }
796 /* check end of list */
797 if (tb1 != tb) {
798 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
799 }
800}
801#endif // DEBUG_TB_CHECK
802
803/* invalidate one TB */
804#ifndef VBOX
805static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
806 int next_offset)
807#else
808DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
809 int next_offset)
810#endif
811{
812 TranslationBlock *tb1;
813 for(;;) {
814 tb1 = *ptb;
815 if (tb1 == tb) {
816 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
817 break;
818 }
819 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
820 }
821}
822
823#ifndef VBOX
824static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
825#else
826DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
827#endif
828{
829 TranslationBlock *tb1;
830 unsigned int n1;
831
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (tb1 == tb) {
837 *ptb = tb1->page_next[n1];
838 break;
839 }
840 ptb = &tb1->page_next[n1];
841 }
842}
843
844#ifndef VBOX
845static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846#else
847DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
848#endif
849{
850 TranslationBlock *tb1, **ptb;
851 unsigned int n1;
852
853 ptb = &tb->jmp_next[n];
854 tb1 = *ptb;
855 if (tb1) {
856 /* find tb(n) in circular list */
857 for(;;) {
858 tb1 = *ptb;
859 n1 = (long)tb1 & 3;
860 tb1 = (TranslationBlock *)((long)tb1 & ~3);
861 if (n1 == n && tb1 == tb)
862 break;
863 if (n1 == 2) {
864 ptb = &tb1->jmp_first;
865 } else {
866 ptb = &tb1->jmp_next[n1];
867 }
868 }
869 /* now we can suppress tb(n) from the list */
870 *ptb = tb->jmp_next[n];
871
872 tb->jmp_next[n] = NULL;
873 }
874}
875
876/* reset the jump entry 'n' of a TB so that it is not chained to
877 another TB */
878#ifndef VBOX
879static inline void tb_reset_jump(TranslationBlock *tb, int n)
880#else
881DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
882#endif
883{
884 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
885}
886
887void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
888{
889 CPUState *env;
890 PageDesc *p;
891 unsigned int h, n1;
892 target_phys_addr_t phys_pc;
893 TranslationBlock *tb1, *tb2;
894
895 /* remove the TB from the hash list */
896 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
897 h = tb_phys_hash_func(phys_pc);
898 tb_remove(&tb_phys_hash[h], tb,
899 offsetof(TranslationBlock, phys_hash_next));
900
901 /* remove the TB from the page list */
902 if (tb->page_addr[0] != page_addr) {
903 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
904 tb_page_remove(&p->first_tb, tb);
905 invalidate_page_bitmap(p);
906 }
907 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
908 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
909 tb_page_remove(&p->first_tb, tb);
910 invalidate_page_bitmap(p);
911 }
912
913 tb_invalidated_flag = 1;
914
915 /* remove the TB from the hash list */
916 h = tb_jmp_cache_hash_func(tb->pc);
917 for(env = first_cpu; env != NULL; env = env->next_cpu) {
918 if (env->tb_jmp_cache[h] == tb)
919 env->tb_jmp_cache[h] = NULL;
920 }
921
922 /* suppress this TB from the two jump lists */
923 tb_jmp_remove(tb, 0);
924 tb_jmp_remove(tb, 1);
925
926 /* suppress any remaining jumps to this TB */
927 tb1 = tb->jmp_first;
928 for(;;) {
929 n1 = (long)tb1 & 3;
930 if (n1 == 2)
931 break;
932 tb1 = (TranslationBlock *)((long)tb1 & ~3);
933 tb2 = tb1->jmp_next[n1];
934 tb_reset_jump(tb1, n1);
935 tb1->jmp_next[n1] = NULL;
936 tb1 = tb2;
937 }
938 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
939
940 tb_phys_invalidate_count++;
941}
942
943
944#ifdef VBOX
945void tb_invalidate_virt(CPUState *env, uint32_t eip)
946{
947# if 1
948 tb_flush(env);
949# else
950 uint8_t *cs_base, *pc;
951 unsigned int flags, h, phys_pc;
952 TranslationBlock *tb, **ptb;
953
954 flags = env->hflags;
955 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
956 cs_base = env->segs[R_CS].base;
957 pc = cs_base + eip;
958
959 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
960 flags);
961
962 if(tb)
963 {
964# ifdef DEBUG
965 printf("invalidating TB (%08X) at %08X\n", tb, eip);
966# endif
967 tb_invalidate(tb);
968 //Note: this will leak TBs, but the whole cache will be flushed
969 // when it happens too often
970 tb->pc = 0;
971 tb->cs_base = 0;
972 tb->flags = 0;
973 }
974# endif
975}
976
977# ifdef VBOX_STRICT
978/**
979 * Gets the page offset.
980 */
981unsigned long get_phys_page_offset(target_ulong addr)
982{
983 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
984 return p ? p->phys_offset : 0;
985}
986# endif /* VBOX_STRICT */
987#endif /* VBOX */
988
989#ifndef VBOX
990static inline void set_bits(uint8_t *tab, int start, int len)
991#else
992DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
993#endif
994{
995 int end, mask, end1;
996
997 end = start + len;
998 tab += start >> 3;
999 mask = 0xff << (start & 7);
1000 if ((start & ~7) == (end & ~7)) {
1001 if (start < end) {
1002 mask &= ~(0xff << (end & 7));
1003 *tab |= mask;
1004 }
1005 } else {
1006 *tab++ |= mask;
1007 start = (start + 8) & ~7;
1008 end1 = end & ~7;
1009 while (start < end1) {
1010 *tab++ = 0xff;
1011 start += 8;
1012 }
1013 if (start < end) {
1014 mask = ~(0xff << (end & 7));
1015 *tab |= mask;
1016 }
1017 }
1018}
1019
1020static void build_page_bitmap(PageDesc *p)
1021{
1022 int n, tb_start, tb_end;
1023 TranslationBlock *tb;
1024
1025 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1026 if (!p->code_bitmap)
1027 return;
1028 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1029
1030 tb = p->first_tb;
1031 while (tb != NULL) {
1032 n = (long)tb & 3;
1033 tb = (TranslationBlock *)((long)tb & ~3);
1034 /* NOTE: this is subtle as a TB may span two physical pages */
1035 if (n == 0) {
1036 /* NOTE: tb_end may be after the end of the page, but
1037 it is not a problem */
1038 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1039 tb_end = tb_start + tb->size;
1040 if (tb_end > TARGET_PAGE_SIZE)
1041 tb_end = TARGET_PAGE_SIZE;
1042 } else {
1043 tb_start = 0;
1044 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1045 }
1046 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1047 tb = tb->page_next[n];
1048 }
1049}
1050
1051TranslationBlock *tb_gen_code(CPUState *env,
1052 target_ulong pc, target_ulong cs_base,
1053 int flags, int cflags)
1054{
1055 TranslationBlock *tb;
1056 uint8_t *tc_ptr;
1057 target_ulong phys_pc, phys_page2, virt_page2;
1058 int code_gen_size;
1059
1060 phys_pc = get_phys_addr_code(env, pc);
1061 tb = tb_alloc(pc);
1062 if (!tb) {
1063 /* flush must be done */
1064 tb_flush(env);
1065 /* cannot fail at this point */
1066 tb = tb_alloc(pc);
1067 /* Don't forget to invalidate previous TB info. */
1068 tb_invalidated_flag = 1;
1069 }
1070 tc_ptr = code_gen_ptr;
1071 tb->tc_ptr = tc_ptr;
1072 tb->cs_base = cs_base;
1073 tb->flags = flags;
1074 tb->cflags = cflags;
1075 cpu_gen_code(env, tb, &code_gen_size);
1076 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1077
1078 /* check next page if needed */
1079 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1080 phys_page2 = -1;
1081 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1082 phys_page2 = get_phys_addr_code(env, virt_page2);
1083 }
1084 tb_link_phys(tb, phys_pc, phys_page2);
1085 return tb;
1086}
1087
1088/* invalidate all TBs which intersect with the target physical page
1089 starting in range [start;end[. NOTE: start and end must refer to
1090 the same physical page. 'is_cpu_write_access' should be true if called
1091 from a real cpu write access: the virtual CPU will exit the current
1092 TB if code is modified inside this TB. */
1093void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1094 int is_cpu_write_access)
1095{
1096 int n, current_tb_modified, current_tb_not_found, current_flags;
1097 CPUState *env = cpu_single_env;
1098 PageDesc *p;
1099 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1100 target_ulong tb_start, tb_end;
1101 target_ulong current_pc, current_cs_base;
1102
1103 p = page_find(start >> TARGET_PAGE_BITS);
1104 if (!p)
1105 return;
1106 if (!p->code_bitmap &&
1107 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1108 is_cpu_write_access) {
1109 /* build code bitmap */
1110 build_page_bitmap(p);
1111 }
1112
1113 /* we remove all the TBs in the range [start, end[ */
1114 /* XXX: see if in some cases it could be faster to invalidate all the code */
1115 current_tb_not_found = is_cpu_write_access;
1116 current_tb_modified = 0;
1117 current_tb = NULL; /* avoid warning */
1118 current_pc = 0; /* avoid warning */
1119 current_cs_base = 0; /* avoid warning */
1120 current_flags = 0; /* avoid warning */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
1123 n = (long)tb & 3;
1124 tb = (TranslationBlock *)((long)tb & ~3);
1125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 }
1136 if (!(tb_end <= start || tb_start >= end)) {
1137#ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
1141 if (env->mem_io_pc) {
1142 /* now we have a real cpu fault */
1143 current_tb = tb_find_pc(env->mem_io_pc);
1144 }
1145 }
1146 if (current_tb == tb &&
1147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
1153
1154 current_tb_modified = 1;
1155 cpu_restore_state(current_tb, env,
1156 env->mem_io_pc, NULL);
1157#if defined(TARGET_I386)
1158 current_flags = env->hflags;
1159 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1160 current_cs_base = (target_ulong)env->segs[R_CS].base;
1161 current_pc = current_cs_base + env->eip;
1162#else
1163#error unsupported CPU
1164#endif
1165 }
1166#endif /* TARGET_HAS_PRECISE_SMC */
1167 /* we need to do that to handle the case where a signal
1168 occurs while doing tb_phys_invalidate() */
1169 saved_tb = NULL;
1170 if (env) {
1171 saved_tb = env->current_tb;
1172 env->current_tb = NULL;
1173 }
1174 tb_phys_invalidate(tb, -1);
1175 if (env) {
1176 env->current_tb = saved_tb;
1177 if (env->interrupt_request && env->current_tb)
1178 cpu_interrupt(env, env->interrupt_request);
1179 }
1180 }
1181 tb = tb_next;
1182 }
1183#if !defined(CONFIG_USER_ONLY)
1184 /* if no code remaining, no need to continue to use slow writes */
1185 if (!p->first_tb) {
1186 invalidate_page_bitmap(p);
1187 if (is_cpu_write_access) {
1188 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1189 }
1190 }
1191#endif
1192#ifdef TARGET_HAS_PRECISE_SMC
1193 if (current_tb_modified) {
1194 /* we generate a block containing just the instruction
1195 modifying the memory. It will ensure that it cannot modify
1196 itself */
1197 env->current_tb = NULL;
1198 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1199 cpu_resume_from_signal(env, NULL);
1200 }
1201#endif
1202}
1203
1204
1205/* len must be <= 8 and start must be a multiple of len */
1206#ifndef VBOX
1207static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1208#else
1209DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1210#endif
1211{
1212 PageDesc *p;
1213 int offset, b;
1214#if 0
1215 if (1) {
1216 if (loglevel) {
1217 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1218 cpu_single_env->mem_io_vaddr, len,
1219 cpu_single_env->eip,
1220 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1221 }
1222 }
1223#endif
1224 p = page_find(start >> TARGET_PAGE_BITS);
1225 if (!p)
1226 return;
1227 if (p->code_bitmap) {
1228 offset = start & ~TARGET_PAGE_MASK;
1229 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1230 if (b & ((1 << len) - 1))
1231 goto do_invalidate;
1232 } else {
1233 do_invalidate:
1234 tb_invalidate_phys_page_range(start, start + len, 1);
1235 }
1236}
1237
1238
1239#if !defined(CONFIG_SOFTMMU)
1240static void tb_invalidate_phys_page(target_phys_addr_t addr,
1241 unsigned long pc, void *puc)
1242{
1243 int n, current_flags, current_tb_modified;
1244 target_ulong current_pc, current_cs_base;
1245 PageDesc *p;
1246 TranslationBlock *tb, *current_tb;
1247#ifdef TARGET_HAS_PRECISE_SMC
1248 CPUState *env = cpu_single_env;
1249#endif
1250
1251 addr &= TARGET_PAGE_MASK;
1252 p = page_find(addr >> TARGET_PAGE_BITS);
1253 if (!p)
1254 return;
1255 tb = p->first_tb;
1256 current_tb_modified = 0;
1257 current_tb = NULL;
1258 current_pc = 0; /* avoid warning */
1259 current_cs_base = 0; /* avoid warning */
1260 current_flags = 0; /* avoid warning */
1261#ifdef TARGET_HAS_PRECISE_SMC
1262 if (tb && pc != 0) {
1263 current_tb = tb_find_pc(pc);
1264 }
1265#endif
1266 while (tb != NULL) {
1267 n = (long)tb & 3;
1268 tb = (TranslationBlock *)((long)tb & ~3);
1269#ifdef TARGET_HAS_PRECISE_SMC
1270 if (current_tb == tb &&
1271 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1272 /* If we are modifying the current TB, we must stop
1273 its execution. We could be more precise by checking
1274 that the modification is after the current PC, but it
1275 would require a specialized function to partially
1276 restore the CPU state */
1277
1278 current_tb_modified = 1;
1279 cpu_restore_state(current_tb, env, pc, puc);
1280#if defined(TARGET_I386)
1281 current_flags = env->hflags;
1282 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1283 current_cs_base = (target_ulong)env->segs[R_CS].base;
1284 current_pc = current_cs_base + env->eip;
1285#else
1286#error unsupported CPU
1287#endif
1288 }
1289#endif /* TARGET_HAS_PRECISE_SMC */
1290 tb_phys_invalidate(tb, addr);
1291 tb = tb->page_next[n];
1292 }
1293 p->first_tb = NULL;
1294#ifdef TARGET_HAS_PRECISE_SMC
1295 if (current_tb_modified) {
1296 /* we generate a block containing just the instruction
1297 modifying the memory. It will ensure that it cannot modify
1298 itself */
1299 env->current_tb = NULL;
1300 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1301 cpu_resume_from_signal(env, puc);
1302 }
1303#endif
1304}
1305#endif
1306
1307/* add the tb in the target page and protect it if necessary */
1308#ifndef VBOX
1309static inline void tb_alloc_page(TranslationBlock *tb,
1310 unsigned int n, target_ulong page_addr)
1311#else
1312DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1313 unsigned int n, target_ulong page_addr)
1314#endif
1315{
1316 PageDesc *p;
1317 TranslationBlock *last_first_tb;
1318
1319 tb->page_addr[n] = page_addr;
1320 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1321 tb->page_next[n] = p->first_tb;
1322 last_first_tb = p->first_tb;
1323 p->first_tb = (TranslationBlock *)((long)tb | n);
1324 invalidate_page_bitmap(p);
1325
1326#if defined(TARGET_HAS_SMC) || 1
1327
1328#if defined(CONFIG_USER_ONLY)
1329 if (p->flags & PAGE_WRITE) {
1330 target_ulong addr;
1331 PageDesc *p2;
1332 int prot;
1333
1334 /* force the host page as non writable (writes will have a
1335 page fault + mprotect overhead) */
1336 page_addr &= qemu_host_page_mask;
1337 prot = 0;
1338 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1339 addr += TARGET_PAGE_SIZE) {
1340
1341 p2 = page_find (addr >> TARGET_PAGE_BITS);
1342 if (!p2)
1343 continue;
1344 prot |= p2->flags;
1345 p2->flags &= ~PAGE_WRITE;
1346 page_get_flags(addr);
1347 }
1348 mprotect(g2h(page_addr), qemu_host_page_size,
1349 (prot & PAGE_BITS) & ~PAGE_WRITE);
1350#ifdef DEBUG_TB_INVALIDATE
1351 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1352 page_addr);
1353#endif
1354 }
1355#else
1356 /* if some code is already present, then the pages are already
1357 protected. So we handle the case where only the first TB is
1358 allocated in a physical page */
1359 if (!last_first_tb) {
1360 tlb_protect_code(page_addr);
1361 }
1362#endif
1363
1364#endif /* TARGET_HAS_SMC */
1365}
1366
1367/* Allocate a new translation block. Flush the translation buffer if
1368 too many translation blocks or too much generated code. */
1369TranslationBlock *tb_alloc(target_ulong pc)
1370{
1371 TranslationBlock *tb;
1372
1373 if (nb_tbs >= code_gen_max_blocks ||
1374#ifndef VBOX
1375 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1376#else
1377 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1378#endif
1379 return NULL;
1380 tb = &tbs[nb_tbs++];
1381 tb->pc = pc;
1382 tb->cflags = 0;
1383 return tb;
1384}
1385
1386void tb_free(TranslationBlock *tb)
1387{
1388 /* In practice this is mostly used for single use temporary TB
1389 Ignore the hard cases and just back up if this TB happens to
1390 be the last one generated. */
1391 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1392 code_gen_ptr = tb->tc_ptr;
1393 nb_tbs--;
1394 }
1395}
1396
1397/* add a new TB and link it to the physical page tables. phys_page2 is
1398 (-1) to indicate that only one page contains the TB. */
1399void tb_link_phys(TranslationBlock *tb,
1400 target_ulong phys_pc, target_ulong phys_page2)
1401{
1402 unsigned int h;
1403 TranslationBlock **ptb;
1404
1405 /* Grab the mmap lock to stop another thread invalidating this TB
1406 before we are done. */
1407 mmap_lock();
1408 /* add in the physical hash table */
1409 h = tb_phys_hash_func(phys_pc);
1410 ptb = &tb_phys_hash[h];
1411 tb->phys_hash_next = *ptb;
1412 *ptb = tb;
1413
1414 /* add in the page list */
1415 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1416 if (phys_page2 != -1)
1417 tb_alloc_page(tb, 1, phys_page2);
1418 else
1419 tb->page_addr[1] = -1;
1420
1421 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1422 tb->jmp_next[0] = NULL;
1423 tb->jmp_next[1] = NULL;
1424
1425 /* init original jump addresses */
1426 if (tb->tb_next_offset[0] != 0xffff)
1427 tb_reset_jump(tb, 0);
1428 if (tb->tb_next_offset[1] != 0xffff)
1429 tb_reset_jump(tb, 1);
1430
1431#ifdef DEBUG_TB_CHECK
1432 tb_page_check();
1433#endif
1434 mmap_unlock();
1435}
1436
1437/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1438 tb[1].tc_ptr. Return NULL if not found */
1439TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1440{
1441 int m_min, m_max, m;
1442 unsigned long v;
1443 TranslationBlock *tb;
1444
1445 if (nb_tbs <= 0)
1446 return NULL;
1447 if (tc_ptr < (unsigned long)code_gen_buffer ||
1448 tc_ptr >= (unsigned long)code_gen_ptr)
1449 return NULL;
1450 /* binary search (cf Knuth) */
1451 m_min = 0;
1452 m_max = nb_tbs - 1;
1453 while (m_min <= m_max) {
1454 m = (m_min + m_max) >> 1;
1455 tb = &tbs[m];
1456 v = (unsigned long)tb->tc_ptr;
1457 if (v == tc_ptr)
1458 return tb;
1459 else if (tc_ptr < v) {
1460 m_max = m - 1;
1461 } else {
1462 m_min = m + 1;
1463 }
1464 }
1465 return &tbs[m_max];
1466}
1467
1468static void tb_reset_jump_recursive(TranslationBlock *tb);
1469
1470#ifndef VBOX
1471static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1472#else
1473DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1474#endif
1475{
1476 TranslationBlock *tb1, *tb_next, **ptb;
1477 unsigned int n1;
1478
1479 tb1 = tb->jmp_next[n];
1480 if (tb1 != NULL) {
1481 /* find head of list */
1482 for(;;) {
1483 n1 = (long)tb1 & 3;
1484 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1485 if (n1 == 2)
1486 break;
1487 tb1 = tb1->jmp_next[n1];
1488 }
1489 /* we are now sure now that tb jumps to tb1 */
1490 tb_next = tb1;
1491
1492 /* remove tb from the jmp_first list */
1493 ptb = &tb_next->jmp_first;
1494 for(;;) {
1495 tb1 = *ptb;
1496 n1 = (long)tb1 & 3;
1497 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1498 if (n1 == n && tb1 == tb)
1499 break;
1500 ptb = &tb1->jmp_next[n1];
1501 }
1502 *ptb = tb->jmp_next[n];
1503 tb->jmp_next[n] = NULL;
1504
1505 /* suppress the jump to next tb in generated code */
1506 tb_reset_jump(tb, n);
1507
1508 /* suppress jumps in the tb on which we could have jumped */
1509 tb_reset_jump_recursive(tb_next);
1510 }
1511}
1512
1513static void tb_reset_jump_recursive(TranslationBlock *tb)
1514{
1515 tb_reset_jump_recursive2(tb, 0);
1516 tb_reset_jump_recursive2(tb, 1);
1517}
1518
1519#if defined(TARGET_HAS_ICE)
1520static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1521{
1522 target_ulong addr, pd;
1523 ram_addr_t ram_addr;
1524 PhysPageDesc *p;
1525
1526 addr = cpu_get_phys_page_debug(env, pc);
1527 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1528 if (!p) {
1529 pd = IO_MEM_UNASSIGNED;
1530 } else {
1531 pd = p->phys_offset;
1532 }
1533 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1534 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1535}
1536#endif
1537
1538/* Add a watchpoint. */
1539int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1540{
1541 int i;
1542
1543 for (i = 0; i < env->nb_watchpoints; i++) {
1544 if (addr == env->watchpoint[i].vaddr)
1545 return 0;
1546 }
1547 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1548 return -1;
1549
1550 i = env->nb_watchpoints++;
1551 env->watchpoint[i].vaddr = addr;
1552 env->watchpoint[i].type = type;
1553 tlb_flush_page(env, addr);
1554 /* FIXME: This flush is needed because of the hack to make memory ops
1555 terminate the TB. It can be removed once the proper IO trap and
1556 re-execute bits are in. */
1557 tb_flush(env);
1558 return i;
1559}
1560
1561/* Remove a watchpoint. */
1562int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1563{
1564 int i;
1565
1566 for (i = 0; i < env->nb_watchpoints; i++) {
1567 if (addr == env->watchpoint[i].vaddr) {
1568 env->nb_watchpoints--;
1569 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1570 tlb_flush_page(env, addr);
1571 return 0;
1572 }
1573 }
1574 return -1;
1575}
1576
1577/* Remove all watchpoints. */
1578void cpu_watchpoint_remove_all(CPUState *env) {
1579 int i;
1580
1581 for (i = 0; i < env->nb_watchpoints; i++) {
1582 tlb_flush_page(env, env->watchpoint[i].vaddr);
1583 }
1584 env->nb_watchpoints = 0;
1585}
1586
1587/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1588 breakpoint is reached */
1589int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1590{
1591#if defined(TARGET_HAS_ICE)
1592 int i;
1593
1594 for(i = 0; i < env->nb_breakpoints; i++) {
1595 if (env->breakpoints[i] == pc)
1596 return 0;
1597 }
1598
1599 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1600 return -1;
1601 env->breakpoints[env->nb_breakpoints++] = pc;
1602
1603 breakpoint_invalidate(env, pc);
1604 return 0;
1605#else
1606 return -1;
1607#endif
1608}
1609
1610/* remove all breakpoints */
1611void cpu_breakpoint_remove_all(CPUState *env) {
1612#if defined(TARGET_HAS_ICE)
1613 int i;
1614 for(i = 0; i < env->nb_breakpoints; i++) {
1615 breakpoint_invalidate(env, env->breakpoints[i]);
1616 }
1617 env->nb_breakpoints = 0;
1618#endif
1619}
1620
1621/* remove a breakpoint */
1622int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1623{
1624#if defined(TARGET_HAS_ICE)
1625 int i;
1626 for(i = 0; i < env->nb_breakpoints; i++) {
1627 if (env->breakpoints[i] == pc)
1628 goto found;
1629 }
1630 return -1;
1631 found:
1632 env->nb_breakpoints--;
1633 if (i < env->nb_breakpoints)
1634 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1635
1636 breakpoint_invalidate(env, pc);
1637 return 0;
1638#else
1639 return -1;
1640#endif
1641}
1642
1643/* enable or disable single step mode. EXCP_DEBUG is returned by the
1644 CPU loop after each instruction */
1645void cpu_single_step(CPUState *env, int enabled)
1646{
1647#if defined(TARGET_HAS_ICE)
1648 if (env->singlestep_enabled != enabled) {
1649 env->singlestep_enabled = enabled;
1650 /* must flush all the translated code to avoid inconsistancies */
1651 /* XXX: only flush what is necessary */
1652 tb_flush(env);
1653 }
1654#endif
1655}
1656
1657#ifndef VBOX
1658/* enable or disable low levels log */
1659void cpu_set_log(int log_flags)
1660{
1661 loglevel = log_flags;
1662 if (loglevel && !logfile) {
1663 logfile = fopen(logfilename, "w");
1664 if (!logfile) {
1665 perror(logfilename);
1666 _exit(1);
1667 }
1668#if !defined(CONFIG_SOFTMMU)
1669 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1670 {
1671 static uint8_t logfile_buf[4096];
1672 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1673 }
1674#else
1675 setvbuf(logfile, NULL, _IOLBF, 0);
1676#endif
1677 }
1678}
1679
1680void cpu_set_log_filename(const char *filename)
1681{
1682 logfilename = strdup(filename);
1683}
1684#endif /* !VBOX */
1685
1686/* mask must never be zero, except for A20 change call */
1687void cpu_interrupt(CPUState *env, int mask)
1688{
1689#if !defined(USE_NPTL)
1690 TranslationBlock *tb;
1691 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1692#endif
1693 int old_mask;
1694
1695 old_mask = env->interrupt_request;
1696#ifdef VBOX
1697 VM_ASSERT_EMT(env->pVM);
1698 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1699#else /* !VBOX */
1700 /* FIXME: This is probably not threadsafe. A different thread could
1701 be in the middle of a read-modify-write operation. */
1702 env->interrupt_request |= mask;
1703#endif /* !VBOX */
1704#if defined(USE_NPTL)
1705 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1706 problem and hope the cpu will stop of its own accord. For userspace
1707 emulation this often isn't actually as bad as it sounds. Often
1708 signals are used primarily to interrupt blocking syscalls. */
1709#else
1710 if (use_icount) {
1711 env->icount_decr.u16.high = 0xffff;
1712#ifndef CONFIG_USER_ONLY
1713 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1714 an async event happened and we need to process it. */
1715 if (!can_do_io(env)
1716 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1717 cpu_abort(env, "Raised interrupt while not in I/O function");
1718 }
1719#endif
1720 } else {
1721 tb = env->current_tb;
1722 /* if the cpu is currently executing code, we must unlink it and
1723 all the potentially executing TB */
1724 if (tb && !testandset(&interrupt_lock)) {
1725 env->current_tb = NULL;
1726 tb_reset_jump_recursive(tb);
1727 resetlock(&interrupt_lock);
1728 }
1729 }
1730#endif
1731}
1732
1733void cpu_reset_interrupt(CPUState *env, int mask)
1734{
1735#ifdef VBOX
1736 /*
1737 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1738 * for future changes!
1739 */
1740 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1741#else /* !VBOX */
1742 env->interrupt_request &= ~mask;
1743#endif /* !VBOX */
1744}
1745
1746#ifndef VBOX
1747CPULogItem cpu_log_items[] = {
1748 { CPU_LOG_TB_OUT_ASM, "out_asm",
1749 "show generated host assembly code for each compiled TB" },
1750 { CPU_LOG_TB_IN_ASM, "in_asm",
1751 "show target assembly code for each compiled TB" },
1752 { CPU_LOG_TB_OP, "op",
1753 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1754#ifdef TARGET_I386
1755 { CPU_LOG_TB_OP_OPT, "op_opt",
1756 "show micro ops after optimization for each compiled TB" },
1757#endif
1758 { CPU_LOG_INT, "int",
1759 "show interrupts/exceptions in short format" },
1760 { CPU_LOG_EXEC, "exec",
1761 "show trace before each executed TB (lots of logs)" },
1762 { CPU_LOG_TB_CPU, "cpu",
1763 "show CPU state before bloc translation" },
1764#ifdef TARGET_I386
1765 { CPU_LOG_PCALL, "pcall",
1766 "show protected mode far calls/returns/exceptions" },
1767#endif
1768#ifdef DEBUG_IOPORT
1769 { CPU_LOG_IOPORT, "ioport",
1770 "show all i/o ports accesses" },
1771#endif
1772 { 0, NULL, NULL },
1773};
1774
1775static int cmp1(const char *s1, int n, const char *s2)
1776{
1777 if (strlen(s2) != n)
1778 return 0;
1779 return memcmp(s1, s2, n) == 0;
1780}
1781
1782/* takes a comma separated list of log masks. Return 0 if error. */
1783int cpu_str_to_log_mask(const char *str)
1784{
1785 CPULogItem *item;
1786 int mask;
1787 const char *p, *p1;
1788
1789 p = str;
1790 mask = 0;
1791 for(;;) {
1792 p1 = strchr(p, ',');
1793 if (!p1)
1794 p1 = p + strlen(p);
1795 if(cmp1(p,p1-p,"all")) {
1796 for(item = cpu_log_items; item->mask != 0; item++) {
1797 mask |= item->mask;
1798 }
1799 } else {
1800 for(item = cpu_log_items; item->mask != 0; item++) {
1801 if (cmp1(p, p1 - p, item->name))
1802 goto found;
1803 }
1804 return 0;
1805 }
1806 found:
1807 mask |= item->mask;
1808 if (*p1 != ',')
1809 break;
1810 p = p1 + 1;
1811 }
1812 return mask;
1813}
1814#endif /* !VBOX */
1815
1816#ifndef VBOX /* VBOX: we have our own routine. */
1817void cpu_abort(CPUState *env, const char *fmt, ...)
1818{
1819 va_list ap;
1820
1821 va_start(ap, fmt);
1822 fprintf(stderr, "qemu: fatal: ");
1823 vfprintf(stderr, fmt, ap);
1824 fprintf(stderr, "\n");
1825#ifdef TARGET_I386
1826 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1827#else
1828 cpu_dump_state(env, stderr, fprintf, 0);
1829#endif
1830 va_end(ap);
1831 abort();
1832}
1833#endif /* !VBOX */
1834
1835#ifndef VBOX
1836CPUState *cpu_copy(CPUState *env)
1837{
1838 CPUState *new_env = cpu_init(env->cpu_model_str);
1839 /* preserve chaining and index */
1840 CPUState *next_cpu = new_env->next_cpu;
1841 int cpu_index = new_env->cpu_index;
1842 memcpy(new_env, env, sizeof(CPUState));
1843 new_env->next_cpu = next_cpu;
1844 new_env->cpu_index = cpu_index;
1845 return new_env;
1846}
1847#endif
1848
1849#if !defined(CONFIG_USER_ONLY)
1850
1851#ifndef VBOX
1852static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1853#else
1854DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1855#endif
1856{
1857 unsigned int i;
1858
1859 /* Discard jump cache entries for any tb which might potentially
1860 overlap the flushed page. */
1861 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1862 memset (&env->tb_jmp_cache[i], 0,
1863 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1864
1865 i = tb_jmp_cache_hash_page(addr);
1866 memset (&env->tb_jmp_cache[i], 0,
1867 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1868
1869#ifdef VBOX
1870 /* inform raw mode about TLB page flush */
1871 remR3FlushPage(env, addr);
1872#endif /* VBOX */
1873}
1874
1875/* NOTE: if flush_global is true, also flush global entries (not
1876 implemented yet) */
1877void tlb_flush(CPUState *env, int flush_global)
1878{
1879 int i;
1880#if defined(DEBUG_TLB)
1881 printf("tlb_flush:\n");
1882#endif
1883 /* must reset current TB so that interrupts cannot modify the
1884 links while we are modifying them */
1885 env->current_tb = NULL;
1886
1887 for(i = 0; i < CPU_TLB_SIZE; i++) {
1888 env->tlb_table[0][i].addr_read = -1;
1889 env->tlb_table[0][i].addr_write = -1;
1890 env->tlb_table[0][i].addr_code = -1;
1891 env->tlb_table[1][i].addr_read = -1;
1892 env->tlb_table[1][i].addr_write = -1;
1893 env->tlb_table[1][i].addr_code = -1;
1894#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1895 env->phys_addends[0][i] = -1;
1896 env->phys_addends[1][i] = -1;
1897#endif
1898#if (NB_MMU_MODES >= 3)
1899 env->tlb_table[2][i].addr_read = -1;
1900 env->tlb_table[2][i].addr_write = -1;
1901 env->tlb_table[2][i].addr_code = -1;
1902#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1903 env->phys_addends[2][i] = -1;
1904#endif
1905#if (NB_MMU_MODES == 4)
1906 env->tlb_table[3][i].addr_read = -1;
1907 env->tlb_table[3][i].addr_write = -1;
1908 env->tlb_table[3][i].addr_code = -1;
1909#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1910 env->phys_addends[3][i] = -1;
1911#endif
1912#endif
1913#endif
1914 }
1915
1916 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1917
1918#ifdef VBOX
1919 /* inform raw mode about TLB flush */
1920 remR3FlushTLB(env, flush_global);
1921#endif
1922#ifdef USE_KQEMU
1923 if (env->kqemu_enabled) {
1924 kqemu_flush(env, flush_global);
1925 }
1926#endif
1927 tlb_flush_count++;
1928}
1929
1930#ifndef VBOX
1931static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1932#else
1933DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1934#endif
1935{
1936 if (addr == (tlb_entry->addr_read &
1937 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1938 addr == (tlb_entry->addr_write &
1939 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1940 addr == (tlb_entry->addr_code &
1941 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1942 tlb_entry->addr_read = -1;
1943 tlb_entry->addr_write = -1;
1944 tlb_entry->addr_code = -1;
1945 }
1946}
1947
1948void tlb_flush_page(CPUState *env, target_ulong addr)
1949{
1950 int i;
1951
1952#if defined(DEBUG_TLB)
1953 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1954#endif
1955 /* must reset current TB so that interrupts cannot modify the
1956 links while we are modifying them */
1957 env->current_tb = NULL;
1958
1959 addr &= TARGET_PAGE_MASK;
1960 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1961 tlb_flush_entry(&env->tlb_table[0][i], addr);
1962 tlb_flush_entry(&env->tlb_table[1][i], addr);
1963#if (NB_MMU_MODES >= 3)
1964 tlb_flush_entry(&env->tlb_table[2][i], addr);
1965#if (NB_MMU_MODES == 4)
1966 tlb_flush_entry(&env->tlb_table[3][i], addr);
1967#endif
1968#endif
1969
1970 tlb_flush_jmp_cache(env, addr);
1971
1972#ifdef USE_KQEMU
1973 if (env->kqemu_enabled) {
1974 kqemu_flush_page(env, addr);
1975 }
1976#endif
1977}
1978
1979/* update the TLBs so that writes to code in the virtual page 'addr'
1980 can be detected */
1981static void tlb_protect_code(ram_addr_t ram_addr)
1982{
1983 cpu_physical_memory_reset_dirty(ram_addr,
1984 ram_addr + TARGET_PAGE_SIZE,
1985 CODE_DIRTY_FLAG);
1986#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1987 /** @todo Retest this? This function has changed... */
1988 remR3ProtectCode(cpu_single_env, ram_addr);
1989#endif
1990}
1991
1992/* update the TLB so that writes in physical page 'phys_addr' are no longer
1993 tested for self modifying code */
1994static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1995 target_ulong vaddr)
1996{
1997#ifdef VBOX
1998 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1999#endif
2000 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2001}
2002
2003#ifndef VBOX
2004static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2005 unsigned long start, unsigned long length)
2006#else
2007DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2008 unsigned long start, unsigned long length)
2009#endif
2010{
2011 unsigned long addr;
2012
2013#ifdef VBOX
2014 if (start & 3)
2015 return;
2016#endif
2017 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2018 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2019 if ((addr - start) < length) {
2020 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2021 }
2022 }
2023}
2024
2025void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2026 int dirty_flags)
2027{
2028 CPUState *env;
2029 unsigned long length, start1;
2030 int i, mask, len;
2031 uint8_t *p;
2032
2033 start &= TARGET_PAGE_MASK;
2034 end = TARGET_PAGE_ALIGN(end);
2035
2036 length = end - start;
2037 if (length == 0)
2038 return;
2039 len = length >> TARGET_PAGE_BITS;
2040#ifdef USE_KQEMU
2041 /* XXX: should not depend on cpu context */
2042 env = first_cpu;
2043 if (env->kqemu_enabled) {
2044 ram_addr_t addr;
2045 addr = start;
2046 for(i = 0; i < len; i++) {
2047 kqemu_set_notdirty(env, addr);
2048 addr += TARGET_PAGE_SIZE;
2049 }
2050 }
2051#endif
2052 mask = ~dirty_flags;
2053 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2054#ifdef VBOX
2055 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2056#endif
2057 for(i = 0; i < len; i++)
2058 p[i] &= mask;
2059
2060 /* we modify the TLB cache so that the dirty bit will be set again
2061 when accessing the range */
2062#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2063 start1 = start;
2064#elif !defined(VBOX)
2065 start1 = start + (unsigned long)phys_ram_base;
2066#else
2067 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo this can be harmful with VBOX_WITH_NEW_PHYS_CODE, fix interface/whatever. */
2068#endif
2069 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2070 for(i = 0; i < CPU_TLB_SIZE; i++)
2071 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2072 for(i = 0; i < CPU_TLB_SIZE; i++)
2073 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2074#if (NB_MMU_MODES >= 3)
2075 for(i = 0; i < CPU_TLB_SIZE; i++)
2076 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2077#if (NB_MMU_MODES == 4)
2078 for(i = 0; i < CPU_TLB_SIZE; i++)
2079 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2080#endif
2081#endif
2082 }
2083}
2084
2085#ifndef VBOX
2086int cpu_physical_memory_set_dirty_tracking(int enable)
2087{
2088 in_migration = enable;
2089 return 0;
2090}
2091
2092int cpu_physical_memory_get_dirty_tracking(void)
2093{
2094 return in_migration;
2095}
2096#endif
2097
2098#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2099DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2100#else
2101static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2102#endif
2103{
2104 ram_addr_t ram_addr;
2105
2106 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2107 /* RAM case */
2108#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2109 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2110#elif !defined(VBOX)
2111 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2112 tlb_entry->addend - (unsigned long)phys_ram_base;
2113#else
2114 Assert(phys_addend != -1);
2115 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2116#endif
2117 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2118 tlb_entry->addr_write |= TLB_NOTDIRTY;
2119 }
2120 }
2121}
2122
2123/* update the TLB according to the current state of the dirty bits */
2124void cpu_tlb_update_dirty(CPUState *env)
2125{
2126 int i;
2127#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2128 for(i = 0; i < CPU_TLB_SIZE; i++)
2129 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2130 for(i = 0; i < CPU_TLB_SIZE; i++)
2131 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2132#if (NB_MMU_MODES >= 3)
2133 for(i = 0; i < CPU_TLB_SIZE; i++)
2134 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2135#if (NB_MMU_MODES == 4)
2136 for(i = 0; i < CPU_TLB_SIZE; i++)
2137 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2138#endif
2139#endif
2140#else /* VBOX */
2141 for(i = 0; i < CPU_TLB_SIZE; i++)
2142 tlb_update_dirty(&env->tlb_table[0][i]);
2143 for(i = 0; i < CPU_TLB_SIZE; i++)
2144 tlb_update_dirty(&env->tlb_table[1][i]);
2145#if (NB_MMU_MODES >= 3)
2146 for(i = 0; i < CPU_TLB_SIZE; i++)
2147 tlb_update_dirty(&env->tlb_table[2][i]);
2148#if (NB_MMU_MODES == 4)
2149 for(i = 0; i < CPU_TLB_SIZE; i++)
2150 tlb_update_dirty(&env->tlb_table[3][i]);
2151#endif
2152#endif
2153#endif /* VBOX */
2154}
2155
2156#ifndef VBOX
2157static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2158#else
2159DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2160#endif
2161{
2162 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2163 tlb_entry->addr_write = vaddr;
2164}
2165
2166
2167/* update the TLB corresponding to virtual page vaddr and phys addr
2168 addr so that it is no longer dirty */
2169#ifndef VBOX
2170static inline void tlb_set_dirty(CPUState *env,
2171 unsigned long addr, target_ulong vaddr)
2172#else
2173DECLINLINE(void) tlb_set_dirty(CPUState *env,
2174 unsigned long addr, target_ulong vaddr)
2175#endif
2176{
2177 int i;
2178
2179 addr &= TARGET_PAGE_MASK;
2180 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2181 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2182 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2183#if (NB_MMU_MODES >= 3)
2184 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2185#if (NB_MMU_MODES == 4)
2186 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2187#endif
2188#endif
2189}
2190
2191/* add a new TLB entry. At most one entry for a given virtual address
2192 is permitted. Return 0 if OK or 2 if the page could not be mapped
2193 (can only happen in non SOFTMMU mode for I/O pages or pages
2194 conflicting with the host address space). */
2195int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2196 target_phys_addr_t paddr, int prot,
2197 int mmu_idx, int is_softmmu)
2198{
2199 PhysPageDesc *p;
2200 unsigned long pd;
2201 unsigned int index;
2202 target_ulong address;
2203 target_ulong code_address;
2204 target_phys_addr_t addend;
2205 int ret;
2206 CPUTLBEntry *te;
2207 int i;
2208 target_phys_addr_t iotlb;
2209#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2210 int read_mods = 0, write_mods = 0, code_mods = 0;
2211#endif
2212
2213 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2214 if (!p) {
2215 pd = IO_MEM_UNASSIGNED;
2216 } else {
2217 pd = p->phys_offset;
2218 }
2219#if defined(DEBUG_TLB)
2220 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2221 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2222#endif
2223
2224 ret = 0;
2225 address = vaddr;
2226 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2227 /* IO memory case (romd handled later) */
2228 address |= TLB_MMIO;
2229 }
2230#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2231 addend = pd & TARGET_PAGE_MASK;
2232#elif !defined(VBOX)
2233 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2234#else
2235 /** @todo this is racing the phys_page_find call above since it may register
2236 * a new chunk of memory... */
2237 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2238 pd & TARGET_PAGE_MASK,
2239 !!(prot & PAGE_WRITE));
2240#endif
2241
2242 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2243 /* Normal RAM. */
2244 iotlb = pd & TARGET_PAGE_MASK;
2245 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2246 iotlb |= IO_MEM_NOTDIRTY;
2247 else
2248 iotlb |= IO_MEM_ROM;
2249 } else {
2250 /* IO handlers are currently passed a phsical address.
2251 It would be nice to pass an offset from the base address
2252 of that region. This would avoid having to special case RAM,
2253 and avoid full address decoding in every device.
2254 We can't use the high bits of pd for this because
2255 IO_MEM_ROMD uses these as a ram address. */
2256 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2257 }
2258
2259 code_address = address;
2260
2261#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2262 if (addend & 0x3)
2263 {
2264 if (addend & 0x2)
2265 {
2266 /* catch write */
2267 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2268 write_mods |= TLB_MMIO;
2269 }
2270 else if (addend & 0x1)
2271 {
2272 /* catch all */
2273 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2274 {
2275 read_mods |= TLB_MMIO;
2276 write_mods |= TLB_MMIO;
2277 code_mods |= TLB_MMIO;
2278 }
2279 }
2280 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2281 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2282 addend &= ~(target_ulong)0x3;
2283 }
2284#endif
2285
2286 /* Make accesses to pages with watchpoints go via the
2287 watchpoint trap routines. */
2288 for (i = 0; i < env->nb_watchpoints; i++) {
2289 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2290 iotlb = io_mem_watch + paddr;
2291 /* TODO: The memory case can be optimized by not trapping
2292 reads of pages with a write breakpoint. */
2293 address |= TLB_MMIO;
2294 }
2295 }
2296
2297 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2298 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2299 te = &env->tlb_table[mmu_idx][index];
2300 te->addend = addend - vaddr;
2301 if (prot & PAGE_READ) {
2302 te->addr_read = address;
2303 } else {
2304 te->addr_read = -1;
2305 }
2306
2307 if (prot & PAGE_EXEC) {
2308 te->addr_code = code_address;
2309 } else {
2310 te->addr_code = -1;
2311 }
2312 if (prot & PAGE_WRITE) {
2313 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2314 (pd & IO_MEM_ROMD)) {
2315 /* Write access calls the I/O callback. */
2316 te->addr_write = address | TLB_MMIO;
2317 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2318 !cpu_physical_memory_is_dirty(pd)) {
2319 te->addr_write = address | TLB_NOTDIRTY;
2320 } else {
2321 te->addr_write = address;
2322 }
2323 } else {
2324 te->addr_write = -1;
2325 }
2326
2327#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2328 if (prot & PAGE_READ)
2329 te->addr_read |= read_mods;
2330 if (prot & PAGE_EXEC)
2331 te->addr_code |= code_mods;
2332 if (prot & PAGE_WRITE)
2333 te->addr_write |= write_mods;
2334
2335 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2336#endif
2337
2338#ifdef VBOX
2339 /* inform raw mode about TLB page change */
2340 remR3FlushPage(env, vaddr);
2341#endif
2342 return ret;
2343}
2344#if 0
2345/* called from signal handler: invalidate the code and unprotect the
2346 page. Return TRUE if the fault was succesfully handled. */
2347int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2348{
2349#if !defined(CONFIG_SOFTMMU)
2350 VirtPageDesc *vp;
2351
2352#if defined(DEBUG_TLB)
2353 printf("page_unprotect: addr=0x%08x\n", addr);
2354#endif
2355 addr &= TARGET_PAGE_MASK;
2356
2357 /* if it is not mapped, no need to worry here */
2358 if (addr >= MMAP_AREA_END)
2359 return 0;
2360 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2361 if (!vp)
2362 return 0;
2363 /* NOTE: in this case, validate_tag is _not_ tested as it
2364 validates only the code TLB */
2365 if (vp->valid_tag != virt_valid_tag)
2366 return 0;
2367 if (!(vp->prot & PAGE_WRITE))
2368 return 0;
2369#if defined(DEBUG_TLB)
2370 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2371 addr, vp->phys_addr, vp->prot);
2372#endif
2373 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2374 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2375 (unsigned long)addr, vp->prot);
2376 /* set the dirty bit */
2377 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2378 /* flush the code inside */
2379 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2380 return 1;
2381#elif defined(VBOX)
2382 addr &= TARGET_PAGE_MASK;
2383
2384 /* if it is not mapped, no need to worry here */
2385 if (addr >= MMAP_AREA_END)
2386 return 0;
2387 return 1;
2388#else
2389 return 0;
2390#endif
2391}
2392#endif /* 0 */
2393
2394#else
2395
2396void tlb_flush(CPUState *env, int flush_global)
2397{
2398}
2399
2400void tlb_flush_page(CPUState *env, target_ulong addr)
2401{
2402}
2403
2404int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2405 target_phys_addr_t paddr, int prot,
2406 int mmu_idx, int is_softmmu)
2407{
2408 return 0;
2409}
2410
2411#ifndef VBOX
2412/* dump memory mappings */
2413void page_dump(FILE *f)
2414{
2415 unsigned long start, end;
2416 int i, j, prot, prot1;
2417 PageDesc *p;
2418
2419 fprintf(f, "%-8s %-8s %-8s %s\n",
2420 "start", "end", "size", "prot");
2421 start = -1;
2422 end = -1;
2423 prot = 0;
2424 for(i = 0; i <= L1_SIZE; i++) {
2425 if (i < L1_SIZE)
2426 p = l1_map[i];
2427 else
2428 p = NULL;
2429 for(j = 0;j < L2_SIZE; j++) {
2430 if (!p)
2431 prot1 = 0;
2432 else
2433 prot1 = p[j].flags;
2434 if (prot1 != prot) {
2435 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2436 if (start != -1) {
2437 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2438 start, end, end - start,
2439 prot & PAGE_READ ? 'r' : '-',
2440 prot & PAGE_WRITE ? 'w' : '-',
2441 prot & PAGE_EXEC ? 'x' : '-');
2442 }
2443 if (prot1 != 0)
2444 start = end;
2445 else
2446 start = -1;
2447 prot = prot1;
2448 }
2449 if (!p)
2450 break;
2451 }
2452 }
2453}
2454#endif /* !VBOX */
2455
2456int page_get_flags(target_ulong address)
2457{
2458 PageDesc *p;
2459
2460 p = page_find(address >> TARGET_PAGE_BITS);
2461 if (!p)
2462 return 0;
2463 return p->flags;
2464}
2465
2466/* modify the flags of a page and invalidate the code if
2467 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2468 depending on PAGE_WRITE */
2469void page_set_flags(target_ulong start, target_ulong end, int flags)
2470{
2471 PageDesc *p;
2472 target_ulong addr;
2473
2474 start = start & TARGET_PAGE_MASK;
2475 end = TARGET_PAGE_ALIGN(end);
2476 if (flags & PAGE_WRITE)
2477 flags |= PAGE_WRITE_ORG;
2478#ifdef VBOX
2479 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2480#endif
2481 spin_lock(&tb_lock);
2482 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2483 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2484 /* if the write protection is set, then we invalidate the code
2485 inside */
2486 if (!(p->flags & PAGE_WRITE) &&
2487 (flags & PAGE_WRITE) &&
2488 p->first_tb) {
2489 tb_invalidate_phys_page(addr, 0, NULL);
2490 }
2491 p->flags = flags;
2492 }
2493 spin_unlock(&tb_lock);
2494}
2495
2496int page_check_range(target_ulong start, target_ulong len, int flags)
2497{
2498 PageDesc *p;
2499 target_ulong end;
2500 target_ulong addr;
2501
2502 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2503 start = start & TARGET_PAGE_MASK;
2504
2505 if( end < start )
2506 /* we've wrapped around */
2507 return -1;
2508 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2509 p = page_find(addr >> TARGET_PAGE_BITS);
2510 if( !p )
2511 return -1;
2512 if( !(p->flags & PAGE_VALID) )
2513 return -1;
2514
2515 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2516 return -1;
2517 if (flags & PAGE_WRITE) {
2518 if (!(p->flags & PAGE_WRITE_ORG))
2519 return -1;
2520 /* unprotect the page if it was put read-only because it
2521 contains translated code */
2522 if (!(p->flags & PAGE_WRITE)) {
2523 if (!page_unprotect(addr, 0, NULL))
2524 return -1;
2525 }
2526 return 0;
2527 }
2528 }
2529 return 0;
2530}
2531
2532/* called from signal handler: invalidate the code and unprotect the
2533 page. Return TRUE if the fault was succesfully handled. */
2534int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2535{
2536 unsigned int page_index, prot, pindex;
2537 PageDesc *p, *p1;
2538 target_ulong host_start, host_end, addr;
2539
2540 /* Technically this isn't safe inside a signal handler. However we
2541 know this only ever happens in a synchronous SEGV handler, so in
2542 practice it seems to be ok. */
2543 mmap_lock();
2544
2545 host_start = address & qemu_host_page_mask;
2546 page_index = host_start >> TARGET_PAGE_BITS;
2547 p1 = page_find(page_index);
2548 if (!p1) {
2549 mmap_unlock();
2550 return 0;
2551 }
2552 host_end = host_start + qemu_host_page_size;
2553 p = p1;
2554 prot = 0;
2555 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2556 prot |= p->flags;
2557 p++;
2558 }
2559 /* if the page was really writable, then we change its
2560 protection back to writable */
2561 if (prot & PAGE_WRITE_ORG) {
2562 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2563 if (!(p1[pindex].flags & PAGE_WRITE)) {
2564 mprotect((void *)g2h(host_start), qemu_host_page_size,
2565 (prot & PAGE_BITS) | PAGE_WRITE);
2566 p1[pindex].flags |= PAGE_WRITE;
2567 /* and since the content will be modified, we must invalidate
2568 the corresponding translated code. */
2569 tb_invalidate_phys_page(address, pc, puc);
2570#ifdef DEBUG_TB_CHECK
2571 tb_invalidate_check(address);
2572#endif
2573 mmap_unlock();
2574 return 1;
2575 }
2576 }
2577 mmap_unlock();
2578 return 0;
2579}
2580
2581static inline void tlb_set_dirty(CPUState *env,
2582 unsigned long addr, target_ulong vaddr)
2583{
2584}
2585#endif /* defined(CONFIG_USER_ONLY) */
2586
2587#if !defined(CONFIG_USER_ONLY)
2588static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2589 ram_addr_t memory);
2590static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2591 ram_addr_t orig_memory);
2592#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2593 need_subpage) \
2594 do { \
2595 if (addr > start_addr) \
2596 start_addr2 = 0; \
2597 else { \
2598 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2599 if (start_addr2 > 0) \
2600 need_subpage = 1; \
2601 } \
2602 \
2603 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2604 end_addr2 = TARGET_PAGE_SIZE - 1; \
2605 else { \
2606 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2607 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2608 need_subpage = 1; \
2609 } \
2610 } while (0)
2611
2612
2613/* register physical memory. 'size' must be a multiple of the target
2614 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2615 io memory page */
2616void cpu_register_physical_memory(target_phys_addr_t start_addr,
2617 unsigned long size,
2618 unsigned long phys_offset)
2619{
2620 target_phys_addr_t addr, end_addr;
2621 PhysPageDesc *p;
2622 CPUState *env;
2623 ram_addr_t orig_size = size;
2624 void *subpage;
2625
2626#ifdef USE_KQEMU
2627 /* XXX: should not depend on cpu context */
2628 env = first_cpu;
2629 if (env->kqemu_enabled) {
2630 kqemu_set_phys_mem(start_addr, size, phys_offset);
2631 }
2632#endif
2633 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2634 end_addr = start_addr + (target_phys_addr_t)size;
2635 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2636 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2637 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2638 ram_addr_t orig_memory = p->phys_offset;
2639 target_phys_addr_t start_addr2, end_addr2;
2640 int need_subpage = 0;
2641
2642 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2643 need_subpage);
2644 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2645 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2646 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2647 &p->phys_offset, orig_memory);
2648 } else {
2649 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2650 >> IO_MEM_SHIFT];
2651 }
2652 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2653 } else {
2654 p->phys_offset = phys_offset;
2655#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2656 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2657 (phys_offset & IO_MEM_ROMD))
2658#else
2659 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2660 || (phys_offset & IO_MEM_ROMD)
2661 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2662#endif
2663 phys_offset += TARGET_PAGE_SIZE;
2664 }
2665 } else {
2666 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2667 p->phys_offset = phys_offset;
2668#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2669 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2670 (phys_offset & IO_MEM_ROMD))
2671#else
2672 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2673 || (phys_offset & IO_MEM_ROMD)
2674 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2675#endif
2676 phys_offset += TARGET_PAGE_SIZE;
2677 else {
2678 target_phys_addr_t start_addr2, end_addr2;
2679 int need_subpage = 0;
2680
2681 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2682 end_addr2, need_subpage);
2683
2684 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2685 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2686 &p->phys_offset, IO_MEM_UNASSIGNED);
2687 subpage_register(subpage, start_addr2, end_addr2,
2688 phys_offset);
2689 }
2690 }
2691 }
2692 }
2693 /* since each CPU stores ram addresses in its TLB cache, we must
2694 reset the modified entries */
2695 /* XXX: slow ! */
2696 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2697 tlb_flush(env, 1);
2698 }
2699}
2700
2701/* XXX: temporary until new memory mapping API */
2702uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2703{
2704 PhysPageDesc *p;
2705
2706 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2707 if (!p)
2708 return IO_MEM_UNASSIGNED;
2709 return p->phys_offset;
2710}
2711
2712#ifndef VBOX
2713/* XXX: better than nothing */
2714ram_addr_t qemu_ram_alloc(ram_addr_t size)
2715{
2716 ram_addr_t addr;
2717 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2718 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2719 (uint64_t)size, (uint64_t)phys_ram_size);
2720 abort();
2721 }
2722 addr = phys_ram_alloc_offset;
2723 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2724 return addr;
2725}
2726
2727void qemu_ram_free(ram_addr_t addr)
2728{
2729}
2730#endif
2731
2732
2733static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2734{
2735#ifdef DEBUG_UNASSIGNED
2736 printf("Unassigned mem read 0x%08x\n", (int)addr);
2737#endif
2738#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2739 do_unassigned_access(addr, 0, 0, 0, 1);
2740#endif
2741 return 0;
2742}
2743
2744static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2745{
2746#ifdef DEBUG_UNASSIGNED
2747 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2748#endif
2749#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2750 do_unassigned_access(addr, 0, 0, 0, 2);
2751#endif
2752 return 0;
2753}
2754
2755static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2756{
2757#ifdef DEBUG_UNASSIGNED
2758 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2759#endif
2760#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2761 do_unassigned_access(addr, 0, 0, 0, 4);
2762#endif
2763 return 0;
2764}
2765
2766static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2767{
2768#ifdef DEBUG_UNASSIGNED
2769 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2770#endif
2771}
2772
2773static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2774{
2775#ifdef DEBUG_UNASSIGNED
2776 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2777#endif
2778#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2779 do_unassigned_access(addr, 1, 0, 0, 2);
2780#endif
2781}
2782
2783static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2784{
2785#ifdef DEBUG_UNASSIGNED
2786 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2787#endif
2788#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2789 do_unassigned_access(addr, 1, 0, 0, 4);
2790#endif
2791}
2792static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2793 unassigned_mem_readb,
2794 unassigned_mem_readw,
2795 unassigned_mem_readl,
2796};
2797
2798static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2799 unassigned_mem_writeb,
2800 unassigned_mem_writew,
2801 unassigned_mem_writel,
2802};
2803
2804static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2805{
2806 unsigned long ram_addr;
2807 int dirty_flags;
2808#if defined(VBOX)
2809 ram_addr = addr;
2810#elif
2811 ram_addr = addr - (unsigned long)phys_ram_base;
2812#endif
2813#ifdef VBOX
2814 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2815 dirty_flags = 0xff;
2816 else
2817#endif /* VBOX */
2818 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2819 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2820#if !defined(CONFIG_USER_ONLY)
2821 tb_invalidate_phys_page_fast(ram_addr, 1);
2822# ifdef VBOX
2823 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2824 dirty_flags = 0xff;
2825 else
2826# endif /* VBOX */
2827 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2828#endif
2829 }
2830#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2831 remR3PhysWriteU8(addr, val);
2832#else
2833 stb_p((uint8_t *)(long)addr, val);
2834#endif
2835#ifdef USE_KQEMU
2836 if (cpu_single_env->kqemu_enabled &&
2837 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2838 kqemu_modify_page(cpu_single_env, ram_addr);
2839#endif
2840 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2841#ifdef VBOX
2842 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2843#endif /* !VBOX */
2844 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2845 /* we remove the notdirty callback only if the code has been
2846 flushed */
2847 if (dirty_flags == 0xff)
2848 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2849}
2850
2851static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2852{
2853 unsigned long ram_addr;
2854 int dirty_flags;
2855#if defined(VBOX)
2856 ram_addr = addr;
2857#else
2858 ram_addr = addr - (unsigned long)phys_ram_base;
2859#endif
2860#ifdef VBOX
2861 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2862 dirty_flags = 0xff;
2863 else
2864#endif /* VBOX */
2865 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2866 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2867#if !defined(CONFIG_USER_ONLY)
2868 tb_invalidate_phys_page_fast(ram_addr, 2);
2869# ifdef VBOX
2870 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2871 dirty_flags = 0xff;
2872 else
2873# endif /* VBOX */
2874 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2875#endif
2876 }
2877#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2878 remR3PhysWriteU16(addr, val);
2879#else
2880 stw_p((uint8_t *)(long)addr, val);
2881#endif
2882
2883#ifdef USE_KQEMU
2884 if (cpu_single_env->kqemu_enabled &&
2885 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2886 kqemu_modify_page(cpu_single_env, ram_addr);
2887#endif
2888 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2889#ifdef VBOX
2890 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2891#endif
2892 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2893 /* we remove the notdirty callback only if the code has been
2894 flushed */
2895 if (dirty_flags == 0xff)
2896 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2897}
2898
2899static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2900{
2901 unsigned long ram_addr;
2902 int dirty_flags;
2903#if defined(VBOX)
2904 ram_addr = addr;
2905#else
2906 ram_addr = addr - (unsigned long)phys_ram_base;
2907#endif
2908#ifdef VBOX
2909 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2910 dirty_flags = 0xff;
2911 else
2912#endif /* VBOX */
2913 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2914 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2915#if !defined(CONFIG_USER_ONLY)
2916 tb_invalidate_phys_page_fast(ram_addr, 4);
2917# ifdef VBOX
2918 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2919 dirty_flags = 0xff;
2920 else
2921# endif /* VBOX */
2922 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2923#endif
2924 }
2925#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2926 remR3PhysWriteU32(addr, val);
2927#else
2928 stl_p((uint8_t *)(long)addr, val);
2929#endif
2930#ifdef USE_KQEMU
2931 if (cpu_single_env->kqemu_enabled &&
2932 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2933 kqemu_modify_page(cpu_single_env, ram_addr);
2934#endif
2935 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2936#ifdef VBOX
2937 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2938#endif
2939 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2940 /* we remove the notdirty callback only if the code has been
2941 flushed */
2942 if (dirty_flags == 0xff)
2943 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2944}
2945
2946static CPUReadMemoryFunc *error_mem_read[3] = {
2947 NULL, /* never used */
2948 NULL, /* never used */
2949 NULL, /* never used */
2950};
2951
2952static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2953 notdirty_mem_writeb,
2954 notdirty_mem_writew,
2955 notdirty_mem_writel,
2956};
2957
2958
2959/* Generate a debug exception if a watchpoint has been hit. */
2960static void check_watchpoint(int offset, int flags)
2961{
2962 CPUState *env = cpu_single_env;
2963 target_ulong vaddr;
2964 int i;
2965
2966 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2967 for (i = 0; i < env->nb_watchpoints; i++) {
2968 if (vaddr == env->watchpoint[i].vaddr
2969 && (env->watchpoint[i].type & flags)) {
2970 env->watchpoint_hit = i + 1;
2971 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2972 break;
2973 }
2974 }
2975}
2976
2977/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2978 so these check for a hit then pass through to the normal out-of-line
2979 phys routines. */
2980static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2981{
2982 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2983 return ldub_phys(addr);
2984}
2985
2986static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2987{
2988 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2989 return lduw_phys(addr);
2990}
2991
2992static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2993{
2994 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2995 return ldl_phys(addr);
2996}
2997
2998static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2999 uint32_t val)
3000{
3001 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3002 stb_phys(addr, val);
3003}
3004
3005static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3006 uint32_t val)
3007{
3008 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3009 stw_phys(addr, val);
3010}
3011
3012static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3013 uint32_t val)
3014{
3015 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3016 stl_phys(addr, val);
3017}
3018
3019static CPUReadMemoryFunc *watch_mem_read[3] = {
3020 watch_mem_readb,
3021 watch_mem_readw,
3022 watch_mem_readl,
3023};
3024
3025static CPUWriteMemoryFunc *watch_mem_write[3] = {
3026 watch_mem_writeb,
3027 watch_mem_writew,
3028 watch_mem_writel,
3029};
3030
3031static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3032 unsigned int len)
3033{
3034 uint32_t ret;
3035 unsigned int idx;
3036
3037 idx = SUBPAGE_IDX(addr - mmio->base);
3038#if defined(DEBUG_SUBPAGE)
3039 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3040 mmio, len, addr, idx);
3041#endif
3042 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3043
3044 return ret;
3045}
3046
3047static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3048 uint32_t value, unsigned int len)
3049{
3050 unsigned int idx;
3051
3052 idx = SUBPAGE_IDX(addr - mmio->base);
3053#if defined(DEBUG_SUBPAGE)
3054 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3055 mmio, len, addr, idx, value);
3056#endif
3057 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3058}
3059
3060static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3061{
3062#if defined(DEBUG_SUBPAGE)
3063 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3064#endif
3065
3066 return subpage_readlen(opaque, addr, 0);
3067}
3068
3069static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3070 uint32_t value)
3071{
3072#if defined(DEBUG_SUBPAGE)
3073 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3074#endif
3075 subpage_writelen(opaque, addr, value, 0);
3076}
3077
3078static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3079{
3080#if defined(DEBUG_SUBPAGE)
3081 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3082#endif
3083
3084 return subpage_readlen(opaque, addr, 1);
3085}
3086
3087static void subpage_writew (void *opaque, target_phys_addr_t addr,
3088 uint32_t value)
3089{
3090#if defined(DEBUG_SUBPAGE)
3091 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3092#endif
3093 subpage_writelen(opaque, addr, value, 1);
3094}
3095
3096static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3097{
3098#if defined(DEBUG_SUBPAGE)
3099 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3100#endif
3101
3102 return subpage_readlen(opaque, addr, 2);
3103}
3104
3105static void subpage_writel (void *opaque,
3106 target_phys_addr_t addr, uint32_t value)
3107{
3108#if defined(DEBUG_SUBPAGE)
3109 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3110#endif
3111 subpage_writelen(opaque, addr, value, 2);
3112}
3113
3114static CPUReadMemoryFunc *subpage_read[] = {
3115 &subpage_readb,
3116 &subpage_readw,
3117 &subpage_readl,
3118};
3119
3120static CPUWriteMemoryFunc *subpage_write[] = {
3121 &subpage_writeb,
3122 &subpage_writew,
3123 &subpage_writel,
3124};
3125
3126static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3127 ram_addr_t memory)
3128{
3129 int idx, eidx;
3130 unsigned int i;
3131
3132 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3133 return -1;
3134 idx = SUBPAGE_IDX(start);
3135 eidx = SUBPAGE_IDX(end);
3136#if defined(DEBUG_SUBPAGE)
3137 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3138 mmio, start, end, idx, eidx, memory);
3139#endif
3140 memory >>= IO_MEM_SHIFT;
3141 for (; idx <= eidx; idx++) {
3142 for (i = 0; i < 4; i++) {
3143 if (io_mem_read[memory][i]) {
3144 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3145 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3146 }
3147 if (io_mem_write[memory][i]) {
3148 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3149 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3150 }
3151 }
3152 }
3153
3154 return 0;
3155}
3156
3157static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3158 ram_addr_t orig_memory)
3159{
3160 subpage_t *mmio;
3161 int subpage_memory;
3162
3163 mmio = qemu_mallocz(sizeof(subpage_t));
3164 if (mmio != NULL) {
3165 mmio->base = base;
3166 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3167#if defined(DEBUG_SUBPAGE)
3168 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3169 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3170#endif
3171 *phys = subpage_memory | IO_MEM_SUBPAGE;
3172 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3173 }
3174
3175 return mmio;
3176}
3177
3178static void io_mem_init(void)
3179{
3180 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3181 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3182 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3183#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
3184 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3185 io_mem_nb = 6;
3186#else
3187 io_mem_nb = 5;
3188#endif
3189
3190 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3191 watch_mem_write, NULL);
3192
3193#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3194 /* alloc dirty bits array */
3195 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3196 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3197#endif /* !VBOX */
3198}
3199
3200/* mem_read and mem_write are arrays of functions containing the
3201 function to access byte (index 0), word (index 1) and dword (index
3202 2). Functions can be omitted with a NULL function pointer. The
3203 registered functions may be modified dynamically later.
3204 If io_index is non zero, the corresponding io zone is
3205 modified. If it is zero, a new io zone is allocated. The return
3206 value can be used with cpu_register_physical_memory(). (-1) is
3207 returned if error. */
3208int cpu_register_io_memory(int io_index,
3209 CPUReadMemoryFunc **mem_read,
3210 CPUWriteMemoryFunc **mem_write,
3211 void *opaque)
3212{
3213 int i, subwidth = 0;
3214
3215 if (io_index <= 0) {
3216 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3217 return -1;
3218 io_index = io_mem_nb++;
3219 } else {
3220 if (io_index >= IO_MEM_NB_ENTRIES)
3221 return -1;
3222 }
3223
3224 for(i = 0;i < 3; i++) {
3225 if (!mem_read[i] || !mem_write[i])
3226 subwidth = IO_MEM_SUBWIDTH;
3227 io_mem_read[io_index][i] = mem_read[i];
3228 io_mem_write[io_index][i] = mem_write[i];
3229 }
3230 io_mem_opaque[io_index] = opaque;
3231 return (io_index << IO_MEM_SHIFT) | subwidth;
3232}
3233
3234CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3235{
3236 return io_mem_write[io_index >> IO_MEM_SHIFT];
3237}
3238
3239CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3240{
3241 return io_mem_read[io_index >> IO_MEM_SHIFT];
3242}
3243#endif /* !defined(CONFIG_USER_ONLY) */
3244
3245/* physical memory access (slow version, mainly for debug) */
3246#if defined(CONFIG_USER_ONLY)
3247void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3248 int len, int is_write)
3249{
3250 int l, flags;
3251 target_ulong page;
3252 void * p;
3253
3254 while (len > 0) {
3255 page = addr & TARGET_PAGE_MASK;
3256 l = (page + TARGET_PAGE_SIZE) - addr;
3257 if (l > len)
3258 l = len;
3259 flags = page_get_flags(page);
3260 if (!(flags & PAGE_VALID))
3261 return;
3262 if (is_write) {
3263 if (!(flags & PAGE_WRITE))
3264 return;
3265 /* XXX: this code should not depend on lock_user */
3266 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3267 /* FIXME - should this return an error rather than just fail? */
3268 return;
3269 memcpy(p, buf, len);
3270 unlock_user(p, addr, len);
3271 } else {
3272 if (!(flags & PAGE_READ))
3273 return;
3274 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3275 /* FIXME - should this return an error rather than just fail? */
3276 return;
3277 memcpy(buf, p, len);
3278 unlock_user(p, addr, 0);
3279 }
3280 len -= l;
3281 buf += l;
3282 addr += l;
3283 }
3284}
3285
3286#else
3287void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3288 int len, int is_write)
3289{
3290 int l, io_index;
3291 uint8_t *ptr;
3292 uint32_t val;
3293 target_phys_addr_t page;
3294 unsigned long pd;
3295 PhysPageDesc *p;
3296
3297 while (len > 0) {
3298 page = addr & TARGET_PAGE_MASK;
3299 l = (page + TARGET_PAGE_SIZE) - addr;
3300 if (l > len)
3301 l = len;
3302 p = phys_page_find(page >> TARGET_PAGE_BITS);
3303 if (!p) {
3304 pd = IO_MEM_UNASSIGNED;
3305 } else {
3306 pd = p->phys_offset;
3307 }
3308
3309 if (is_write) {
3310 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3311 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3312 /* XXX: could force cpu_single_env to NULL to avoid
3313 potential bugs */
3314 if (l >= 4 && ((addr & 3) == 0)) {
3315 /* 32 bit write access */
3316#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3317 val = ldl_p(buf);
3318#else
3319 val = *(const uint32_t *)buf;
3320#endif
3321 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3322 l = 4;
3323 } else if (l >= 2 && ((addr & 1) == 0)) {
3324 /* 16 bit write access */
3325#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3326 val = lduw_p(buf);
3327#else
3328 val = *(const uint16_t *)buf;
3329#endif
3330 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3331 l = 2;
3332 } else {
3333 /* 8 bit write access */
3334#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3335 val = ldub_p(buf);
3336#else
3337 val = *(const uint8_t *)buf;
3338#endif
3339 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3340 l = 1;
3341 }
3342 } else {
3343 unsigned long addr1;
3344 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3345 /* RAM case */
3346#ifdef VBOX
3347 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3348#else
3349 ptr = phys_ram_base + addr1;
3350 memcpy(ptr, buf, l);
3351#endif
3352 if (!cpu_physical_memory_is_dirty(addr1)) {
3353 /* invalidate code */
3354 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3355 /* set dirty bit */
3356#ifdef VBOX
3357 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3358#endif
3359 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3360 (0xff & ~CODE_DIRTY_FLAG);
3361 }
3362 }
3363 } else {
3364 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3365 !(pd & IO_MEM_ROMD)) {
3366 /* I/O case */
3367 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3368 if (l >= 4 && ((addr & 3) == 0)) {
3369 /* 32 bit read access */
3370 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3371#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3372 stl_p(buf, val);
3373#else
3374 *(uint32_t *)buf = val;
3375#endif
3376 l = 4;
3377 } else if (l >= 2 && ((addr & 1) == 0)) {
3378 /* 16 bit read access */
3379 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3380#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3381 stw_p(buf, val);
3382#else
3383 *(uint16_t *)buf = val;
3384#endif
3385 l = 2;
3386 } else {
3387 /* 8 bit read access */
3388 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3389#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3390 stb_p(buf, val);
3391#else
3392 *(uint8_t *)buf = val;
3393#endif
3394 l = 1;
3395 }
3396 } else {
3397 /* RAM case */
3398#ifdef VBOX
3399 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3400#else
3401 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3402 (addr & ~TARGET_PAGE_MASK);
3403 memcpy(buf, ptr, l);
3404#endif
3405 }
3406 }
3407 len -= l;
3408 buf += l;
3409 addr += l;
3410 }
3411}
3412
3413#ifndef VBOX
3414/* used for ROM loading : can write in RAM and ROM */
3415void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3416 const uint8_t *buf, int len)
3417{
3418 int l;
3419 uint8_t *ptr;
3420 target_phys_addr_t page;
3421 unsigned long pd;
3422 PhysPageDesc *p;
3423
3424 while (len > 0) {
3425 page = addr & TARGET_PAGE_MASK;
3426 l = (page + TARGET_PAGE_SIZE) - addr;
3427 if (l > len)
3428 l = len;
3429 p = phys_page_find(page >> TARGET_PAGE_BITS);
3430 if (!p) {
3431 pd = IO_MEM_UNASSIGNED;
3432 } else {
3433 pd = p->phys_offset;
3434 }
3435
3436 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3437 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3438 !(pd & IO_MEM_ROMD)) {
3439 /* do nothing */
3440 } else {
3441 unsigned long addr1;
3442 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3443 /* ROM/RAM case */
3444 ptr = phys_ram_base + addr1;
3445 memcpy(ptr, buf, l);
3446 }
3447 len -= l;
3448 buf += l;
3449 addr += l;
3450 }
3451}
3452#endif /* !VBOX */
3453
3454
3455/* warning: addr must be aligned */
3456uint32_t ldl_phys(target_phys_addr_t addr)
3457{
3458 int io_index;
3459 uint8_t *ptr;
3460 uint32_t val;
3461 unsigned long pd;
3462 PhysPageDesc *p;
3463
3464 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3465 if (!p) {
3466 pd = IO_MEM_UNASSIGNED;
3467 } else {
3468 pd = p->phys_offset;
3469 }
3470
3471 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3472 !(pd & IO_MEM_ROMD)) {
3473 /* I/O case */
3474 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3475 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3476 } else {
3477 /* RAM case */
3478#ifndef VBOX
3479 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3480 (addr & ~TARGET_PAGE_MASK);
3481 val = ldl_p(ptr);
3482#else
3483 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3484#endif
3485 }
3486 return val;
3487}
3488
3489/* warning: addr must be aligned */
3490uint64_t ldq_phys(target_phys_addr_t addr)
3491{
3492 int io_index;
3493 uint8_t *ptr;
3494 uint64_t val;
3495 unsigned long pd;
3496 PhysPageDesc *p;
3497
3498 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3499 if (!p) {
3500 pd = IO_MEM_UNASSIGNED;
3501 } else {
3502 pd = p->phys_offset;
3503 }
3504
3505 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3506 !(pd & IO_MEM_ROMD)) {
3507 /* I/O case */
3508 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3509#ifdef TARGET_WORDS_BIGENDIAN
3510 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3511 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3512#else
3513 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3514 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3515#endif
3516 } else {
3517 /* RAM case */
3518#ifndef VBOX
3519 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3520 (addr & ~TARGET_PAGE_MASK);
3521 val = ldq_p(ptr);
3522#else
3523 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3524#endif
3525 }
3526 return val;
3527}
3528
3529/* XXX: optimize */
3530uint32_t ldub_phys(target_phys_addr_t addr)
3531{
3532 uint8_t val;
3533 cpu_physical_memory_read(addr, &val, 1);
3534 return val;
3535}
3536
3537/* XXX: optimize */
3538uint32_t lduw_phys(target_phys_addr_t addr)
3539{
3540 uint16_t val;
3541 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3542 return tswap16(val);
3543}
3544
3545/* warning: addr must be aligned. The ram page is not masked as dirty
3546 and the code inside is not invalidated. It is useful if the dirty
3547 bits are used to track modified PTEs */
3548void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3549{
3550 int io_index;
3551 uint8_t *ptr;
3552 unsigned long pd;
3553 PhysPageDesc *p;
3554
3555 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3556 if (!p) {
3557 pd = IO_MEM_UNASSIGNED;
3558 } else {
3559 pd = p->phys_offset;
3560 }
3561
3562 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3563 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3564 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3565 } else {
3566#ifndef VBOX
3567 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3568 (addr & ~TARGET_PAGE_MASK);
3569 stl_p(ptr, val);
3570#else
3571 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3572#endif
3573#ifndef VBOX
3574 if (unlikely(in_migration)) {
3575 if (!cpu_physical_memory_is_dirty(addr1)) {
3576 /* invalidate code */
3577 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3578 /* set dirty bit */
3579 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3580 (0xff & ~CODE_DIRTY_FLAG);
3581 }
3582 }
3583#endif
3584 }
3585}
3586
3587void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3588{
3589 int io_index;
3590 uint8_t *ptr;
3591 unsigned long pd;
3592 PhysPageDesc *p;
3593
3594 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3595 if (!p) {
3596 pd = IO_MEM_UNASSIGNED;
3597 } else {
3598 pd = p->phys_offset;
3599 }
3600
3601 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3602 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3603#ifdef TARGET_WORDS_BIGENDIAN
3604 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3605 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3606#else
3607 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3608 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3609#endif
3610 } else {
3611#ifndef VBOX
3612 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3613 (addr & ~TARGET_PAGE_MASK);
3614 stq_p(ptr, val);
3615#else
3616 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3617#endif
3618 }
3619}
3620
3621
3622/* warning: addr must be aligned */
3623void stl_phys(target_phys_addr_t addr, uint32_t val)
3624{
3625 int io_index;
3626 uint8_t *ptr;
3627 unsigned long pd;
3628 PhysPageDesc *p;
3629
3630 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3631 if (!p) {
3632 pd = IO_MEM_UNASSIGNED;
3633 } else {
3634 pd = p->phys_offset;
3635 }
3636
3637 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3638 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3639 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3640 } else {
3641 unsigned long addr1;
3642 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3643 /* RAM case */
3644#ifndef VBOX
3645 ptr = phys_ram_base + addr1;
3646 stl_p(ptr, val);
3647#else
3648 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3649#endif
3650 if (!cpu_physical_memory_is_dirty(addr1)) {
3651 /* invalidate code */
3652 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3653 /* set dirty bit */
3654#ifdef VBOX
3655 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3656#endif
3657 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3658 (0xff & ~CODE_DIRTY_FLAG);
3659 }
3660 }
3661}
3662
3663/* XXX: optimize */
3664void stb_phys(target_phys_addr_t addr, uint32_t val)
3665{
3666 uint8_t v = val;
3667 cpu_physical_memory_write(addr, &v, 1);
3668}
3669
3670/* XXX: optimize */
3671void stw_phys(target_phys_addr_t addr, uint32_t val)
3672{
3673 uint16_t v = tswap16(val);
3674 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3675}
3676
3677/* XXX: optimize */
3678void stq_phys(target_phys_addr_t addr, uint64_t val)
3679{
3680 val = tswap64(val);
3681 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3682}
3683
3684#endif
3685
3686/* virtual memory access for debug */
3687int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3688 uint8_t *buf, int len, int is_write)
3689{
3690 int l;
3691 target_ulong page, phys_addr;
3692
3693 while (len > 0) {
3694 page = addr & TARGET_PAGE_MASK;
3695 phys_addr = cpu_get_phys_page_debug(env, page);
3696 /* if no physical page mapped, return an error */
3697 if (phys_addr == -1)
3698 return -1;
3699 l = (page + TARGET_PAGE_SIZE) - addr;
3700 if (l > len)
3701 l = len;
3702 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3703 buf, l, is_write);
3704 len -= l;
3705 buf += l;
3706 addr += l;
3707 }
3708 return 0;
3709}
3710
3711/* in deterministic execution mode, instructions doing device I/Os
3712 must be at the end of the TB */
3713void cpu_io_recompile(CPUState *env, void *retaddr)
3714{
3715 TranslationBlock *tb;
3716 uint32_t n, cflags;
3717 target_ulong pc, cs_base;
3718 uint64_t flags;
3719
3720 tb = tb_find_pc((unsigned long)retaddr);
3721 if (!tb) {
3722 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3723 retaddr);
3724 }
3725 n = env->icount_decr.u16.low + tb->icount;
3726 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3727 /* Calculate how many instructions had been executed before the fault
3728 occurred. */
3729 n = n - env->icount_decr.u16.low;
3730 /* Generate a new TB ending on the I/O insn. */
3731 n++;
3732 /* On MIPS and SH, delay slot instructions can only be restarted if
3733 they were already the first instruction in the TB. If this is not
3734 the first instruction in a TB then re-execute the preceding
3735 branch. */
3736#if defined(TARGET_MIPS)
3737 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3738 env->active_tc.PC -= 4;
3739 env->icount_decr.u16.low++;
3740 env->hflags &= ~MIPS_HFLAG_BMASK;
3741 }
3742#elif defined(TARGET_SH4)
3743 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3744 && n > 1) {
3745 env->pc -= 2;
3746 env->icount_decr.u16.low++;
3747 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3748 }
3749#endif
3750 /* This should never happen. */
3751 if (n > CF_COUNT_MASK)
3752 cpu_abort(env, "TB too big during recompile");
3753
3754 cflags = n | CF_LAST_IO;
3755 pc = tb->pc;
3756 cs_base = tb->cs_base;
3757 flags = tb->flags;
3758 tb_phys_invalidate(tb, -1);
3759 /* FIXME: In theory this could raise an exception. In practice
3760 we have already translated the block once so it's probably ok. */
3761 tb_gen_code(env, pc, cs_base, flags, cflags);
3762 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3763 the first in the TB) then we end up generating a whole new TB and
3764 repeating the fault, which is horribly inefficient.
3765 Better would be to execute just this insn uncached, or generate a
3766 second new TB. */
3767 cpu_resume_from_signal(env, NULL);
3768}
3769
3770#ifndef VBOX
3771void dump_exec_info(FILE *f,
3772 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3773{
3774 int i, target_code_size, max_target_code_size;
3775 int direct_jmp_count, direct_jmp2_count, cross_page;
3776 TranslationBlock *tb;
3777
3778 target_code_size = 0;
3779 max_target_code_size = 0;
3780 cross_page = 0;
3781 direct_jmp_count = 0;
3782 direct_jmp2_count = 0;
3783 for(i = 0; i < nb_tbs; i++) {
3784 tb = &tbs[i];
3785 target_code_size += tb->size;
3786 if (tb->size > max_target_code_size)
3787 max_target_code_size = tb->size;
3788 if (tb->page_addr[1] != -1)
3789 cross_page++;
3790 if (tb->tb_next_offset[0] != 0xffff) {
3791 direct_jmp_count++;
3792 if (tb->tb_next_offset[1] != 0xffff) {
3793 direct_jmp2_count++;
3794 }
3795 }
3796 }
3797 /* XXX: avoid using doubles ? */
3798 cpu_fprintf(f, "Translation buffer state:\n");
3799 cpu_fprintf(f, "gen code size %ld/%ld\n",
3800 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3801 cpu_fprintf(f, "TB count %d/%d\n",
3802 nb_tbs, code_gen_max_blocks);
3803 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3804 nb_tbs ? target_code_size / nb_tbs : 0,
3805 max_target_code_size);
3806 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3807 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3808 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3809 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3810 cross_page,
3811 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3812 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3813 direct_jmp_count,
3814 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3815 direct_jmp2_count,
3816 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3817 cpu_fprintf(f, "\nStatistics:\n");
3818 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3819 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3820 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3821 tcg_dump_info(f, cpu_fprintf);
3822}
3823#endif /* !VBOX */
3824
3825#if !defined(CONFIG_USER_ONLY)
3826
3827#define MMUSUFFIX _cmmu
3828#define GETPC() NULL
3829#define env cpu_single_env
3830#define SOFTMMU_CODE_ACCESS
3831
3832#define SHIFT 0
3833#include "softmmu_template.h"
3834
3835#define SHIFT 1
3836#include "softmmu_template.h"
3837
3838#define SHIFT 2
3839#include "softmmu_template.h"
3840
3841#define SHIFT 3
3842#include "softmmu_template.h"
3843
3844#undef env
3845
3846#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette