VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 40843

Last change on this file since 40843 was 37702, checked in by vboxsync, 13 years ago

REM/VMM: Don't flush the TLB if you don't hold the EM/REM lock, some other EMT may be executing code in the recompiler and could be really surprised by a TLB flush.

  • Property svn:eol-style set to native
File size: 133.1 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#include "qemu-common.h"
56#include "tcg.h"
57#ifndef VBOX
58#include "hw/hw.h"
59#include "hw/qdev.h"
60#endif /* !VBOX */
61#include "osdep.h"
62#include "kvm.h"
63#include "qemu-timer.h"
64#if defined(CONFIG_USER_ONLY)
65#include <qemu.h>
66#include <signal.h>
67#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
68#include <sys/param.h>
69#if __FreeBSD_version >= 700104
70#define HAVE_KINFO_GETVMMAP
71#define sigqueue sigqueue_freebsd /* avoid redefinition */
72#include <sys/time.h>
73#include <sys/proc.h>
74#include <machine/profile.h>
75#define _KERNEL
76#include <sys/user.h>
77#undef _KERNEL
78#undef sigqueue
79#include <libutil.h>
80#endif
81#endif
82#endif
83
84//#define DEBUG_TB_INVALIDATE
85//#define DEBUG_FLUSH
86//#define DEBUG_TLB
87//#define DEBUG_UNASSIGNED
88
89/* make various TB consistency checks */
90//#define DEBUG_TB_CHECK
91//#define DEBUG_TLB_CHECK
92
93//#define DEBUG_IOPORT
94//#define DEBUG_SUBPAGE
95
96#if !defined(CONFIG_USER_ONLY)
97/* TB consistency checks only implemented for usermode emulation. */
98#undef DEBUG_TB_CHECK
99#endif
100
101#define SMC_BITMAP_USE_THRESHOLD 10
102
103static TranslationBlock *tbs;
104static int code_gen_max_blocks;
105TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
106static int nb_tbs;
107/* any access to the tbs or the page table must use this lock */
108spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
109
110#ifndef VBOX
111#if defined(__arm__) || defined(__sparc_v9__)
112/* The prologue must be reachable with a direct jump. ARM and Sparc64
113 have limited branch ranges (possibly also PPC) so place it in a
114 section close to code segment. */
115#define code_gen_section \
116 __attribute__((__section__(".gen_code"))) \
117 __attribute__((aligned (32)))
118#elif defined(_WIN32)
119/* Maximum alignment for Win32 is 16. */
120#define code_gen_section \
121 __attribute__((aligned (16)))
122#else
123#define code_gen_section \
124 __attribute__((aligned (32)))
125#endif
126
127uint8_t code_gen_prologue[1024] code_gen_section;
128#else /* VBOX */
129extern uint8_t *code_gen_prologue;
130#endif /* VBOX */
131static uint8_t *code_gen_buffer;
132static unsigned long code_gen_buffer_size;
133/* threshold to flush the translated code buffer */
134static unsigned long code_gen_buffer_max_size;
135static uint8_t *code_gen_ptr;
136
137#if !defined(CONFIG_USER_ONLY)
138# ifndef VBOX
139int phys_ram_fd;
140static int in_migration;
141# endif /* !VBOX */
142
143RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
144#endif
145
146CPUState *first_cpu;
147/* current CPU in the current thread. It is only valid inside
148 cpu_exec() */
149CPUState *cpu_single_env;
150/* 0 = Do not count executed instructions.
151 1 = Precise instruction counting.
152 2 = Adaptive rate instruction counting. */
153int use_icount = 0;
154/* Current instruction counter. While executing translated code this may
155 include some instructions that have not yet been executed. */
156int64_t qemu_icount;
157
158typedef struct PageDesc {
159 /* list of TBs intersecting this ram page */
160 TranslationBlock *first_tb;
161 /* in order to optimize self modifying code, we count the number
162 of lookups we do to a given page to use a bitmap */
163 unsigned int code_write_count;
164 uint8_t *code_bitmap;
165#if defined(CONFIG_USER_ONLY)
166 unsigned long flags;
167#endif
168} PageDesc;
169
170/* In system mode we want L1_MAP to be based on ram offsets,
171 while in user mode we want it to be based on virtual addresses. */
172#if !defined(CONFIG_USER_ONLY)
173#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
174# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
175#else
176# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
177#endif
178#else
179# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
180#endif
181
182/* Size of the L2 (and L3, etc) page tables. */
183#define L2_BITS 10
184#define L2_SIZE (1 << L2_BITS)
185
186/* The bits remaining after N lower levels of page tables. */
187#define P_L1_BITS_REM \
188 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
189#define V_L1_BITS_REM \
190 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
191
192/* Size of the L1 page table. Avoid silly small sizes. */
193#if P_L1_BITS_REM < 4
194#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
195#else
196#define P_L1_BITS P_L1_BITS_REM
197#endif
198
199#if V_L1_BITS_REM < 4
200#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
201#else
202#define V_L1_BITS V_L1_BITS_REM
203#endif
204
205#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
206#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
207
208#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
209#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
210
211unsigned long qemu_real_host_page_size;
212unsigned long qemu_host_page_bits;
213unsigned long qemu_host_page_size;
214unsigned long qemu_host_page_mask;
215
216/* This is a multi-level map on the virtual address space.
217 The bottom level has pointers to PageDesc. */
218static void *l1_map[V_L1_SIZE];
219
220#if !defined(CONFIG_USER_ONLY)
221typedef struct PhysPageDesc {
222 /* offset in host memory of the page + io_index in the low bits */
223 ram_addr_t phys_offset;
224 ram_addr_t region_offset;
225} PhysPageDesc;
226
227/* This is a multi-level map on the physical address space.
228 The bottom level has pointers to PhysPageDesc. */
229static void *l1_phys_map[P_L1_SIZE];
230
231static void io_mem_init(void);
232
233/* io memory support */
234CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
235CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
236void *io_mem_opaque[IO_MEM_NB_ENTRIES];
237static char io_mem_used[IO_MEM_NB_ENTRIES];
238static int io_mem_watch;
239#endif
240
241#ifndef VBOX
242/* log support */
243#ifdef WIN32
244static const char *logfilename = "qemu.log";
245#else
246static const char *logfilename = "/tmp/qemu.log";
247#endif
248#endif /* !VBOX */
249FILE *logfile;
250int loglevel;
251#ifndef VBOX
252static int log_append = 0;
253#endif /* !VBOX */
254
255/* statistics */
256#ifndef VBOX
257#if !defined(CONFIG_USER_ONLY)
258static int tlb_flush_count;
259#endif
260static int tb_flush_count;
261static int tb_phys_invalidate_count;
262#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
263uint32_t tlb_flush_count;
264uint32_t tb_flush_count;
265uint32_t tb_phys_invalidate_count;
266#endif /* VBOX */
267
268#ifndef VBOX
269#ifdef _WIN32
270static void map_exec(void *addr, long size)
271{
272 DWORD old_protect;
273 VirtualProtect(addr, size,
274 PAGE_EXECUTE_READWRITE, &old_protect);
275
276}
277#else
278static void map_exec(void *addr, long size)
279{
280 unsigned long start, end, page_size;
281
282 page_size = getpagesize();
283 start = (unsigned long)addr;
284 start &= ~(page_size - 1);
285
286 end = (unsigned long)addr + size;
287 end += page_size - 1;
288 end &= ~(page_size - 1);
289
290 mprotect((void *)start, end - start,
291 PROT_READ | PROT_WRITE | PROT_EXEC);
292}
293#endif
294#else /* VBOX */
295static void map_exec(void *addr, long size)
296{
297 RTMemProtect(addr, size,
298 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
299}
300#endif /* VBOX */
301
302static void page_init(void)
303{
304 /* NOTE: we can always suppose that qemu_host_page_size >=
305 TARGET_PAGE_SIZE */
306#ifdef VBOX
307 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
308 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
309 qemu_real_host_page_size = PAGE_SIZE;
310#else /* !VBOX */
311#ifdef _WIN32
312 {
313 SYSTEM_INFO system_info;
314
315 GetSystemInfo(&system_info);
316 qemu_real_host_page_size = system_info.dwPageSize;
317 }
318#else
319 qemu_real_host_page_size = getpagesize();
320#endif
321#endif /* !VBOX */
322 if (qemu_host_page_size == 0)
323 qemu_host_page_size = qemu_real_host_page_size;
324 if (qemu_host_page_size < TARGET_PAGE_SIZE)
325 qemu_host_page_size = TARGET_PAGE_SIZE;
326 qemu_host_page_bits = 0;
327 while ((1 << qemu_host_page_bits) < VBOX_ONLY((int))qemu_host_page_size)
328 qemu_host_page_bits++;
329 qemu_host_page_mask = ~(qemu_host_page_size - 1);
330
331#ifndef VBOX /* We use other means to set reserved bit on our pages */
332#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
333 {
334#ifdef HAVE_KINFO_GETVMMAP
335 struct kinfo_vmentry *freep;
336 int i, cnt;
337
338 freep = kinfo_getvmmap(getpid(), &cnt);
339 if (freep) {
340 mmap_lock();
341 for (i = 0; i < cnt; i++) {
342 unsigned long startaddr, endaddr;
343
344 startaddr = freep[i].kve_start;
345 endaddr = freep[i].kve_end;
346 if (h2g_valid(startaddr)) {
347 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
348
349 if (h2g_valid(endaddr)) {
350 endaddr = h2g(endaddr);
351 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
352 } else {
353#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
354 endaddr = ~0ul;
355 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
356#endif
357 }
358 }
359 }
360 free(freep);
361 mmap_unlock();
362 }
363#else
364 FILE *f;
365
366 last_brk = (unsigned long)sbrk(0);
367
368 f = fopen("/compat/linux/proc/self/maps", "r");
369 if (f) {
370 mmap_lock();
371
372 do {
373 unsigned long startaddr, endaddr;
374 int n;
375
376 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
377
378 if (n == 2 && h2g_valid(startaddr)) {
379 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
380
381 if (h2g_valid(endaddr)) {
382 endaddr = h2g(endaddr);
383 } else {
384 endaddr = ~0ul;
385 }
386 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
387 }
388 } while (!feof(f));
389
390 fclose(f);
391 mmap_unlock();
392 }
393#endif
394 }
395#endif
396#endif /* !VBOX */
397}
398
399static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
400{
401 PageDesc *pd;
402 void **lp;
403 int i;
404
405#if defined(CONFIG_USER_ONLY)
406 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
407# define ALLOC(P, SIZE) \
408 do { \
409 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
410 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
411 } while (0)
412#else
413# define ALLOC(P, SIZE) \
414 do { P = qemu_mallocz(SIZE); } while (0)
415#endif
416
417 /* Level 1. Always allocated. */
418 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
419
420 /* Level 2..N-1. */
421 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
422 void **p = *lp;
423
424 if (p == NULL) {
425 if (!alloc) {
426 return NULL;
427 }
428 ALLOC(p, sizeof(void *) * L2_SIZE);
429 *lp = p;
430 }
431
432 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
433 }
434
435 pd = *lp;
436 if (pd == NULL) {
437 if (!alloc) {
438 return NULL;
439 }
440 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
441 *lp = pd;
442 }
443
444#undef ALLOC
445
446 return pd + (index & (L2_SIZE - 1));
447}
448
449static inline PageDesc *page_find(tb_page_addr_t index)
450{
451 return page_find_alloc(index, 0);
452}
453
454#if !defined(CONFIG_USER_ONLY)
455static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
456{
457 PhysPageDesc *pd;
458 void **lp;
459 int i;
460
461 /* Level 1. Always allocated. */
462 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
463
464 /* Level 2..N-1. */
465 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
466 void **p = *lp;
467 if (p == NULL) {
468 if (!alloc) {
469 return NULL;
470 }
471 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
472 }
473 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
474 }
475
476 pd = *lp;
477 if (pd == NULL) {
478 int i;
479
480 if (!alloc) {
481 return NULL;
482 }
483
484 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
485
486 for (i = 0; i < L2_SIZE; i++) {
487 pd[i].phys_offset = IO_MEM_UNASSIGNED;
488 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
489 }
490 }
491
492 return pd + (index & (L2_SIZE - 1));
493}
494
495static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
496{
497 return phys_page_find_alloc(index, 0);
498}
499
500static void tlb_protect_code(ram_addr_t ram_addr);
501static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
502 target_ulong vaddr);
503#define mmap_lock() do { } while(0)
504#define mmap_unlock() do { } while(0)
505#endif
506
507#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
508 most of the code in raw or hwacc mode. */
509#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
510#else /* !VBOX */
511#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
512#endif /* !VBOX */
513
514#if defined(CONFIG_USER_ONLY)
515/* Currently it is not recommended to allocate big chunks of data in
516 user mode. It will change when a dedicated libc will be used */
517#define USE_STATIC_CODE_GEN_BUFFER
518#endif
519
520#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
521# error "VBox allocates codegen buffer dynamically"
522#endif
523
524#ifdef USE_STATIC_CODE_GEN_BUFFER
525static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
526 __attribute__((aligned (CODE_GEN_ALIGN)));
527#endif
528
529static void code_gen_alloc(unsigned long tb_size)
530{
531#ifdef USE_STATIC_CODE_GEN_BUFFER
532 code_gen_buffer = static_code_gen_buffer;
533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534 map_exec(code_gen_buffer, code_gen_buffer_size);
535#else
536# ifdef VBOX
537 /* We cannot use phys_ram_size here, as it's 0 now,
538 * it only gets initialized once RAM registration callback
539 * (REMR3NotifyPhysRamRegister()) called.
540 */
541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542# else /* !VBOX */
543 code_gen_buffer_size = tb_size;
544 if (code_gen_buffer_size == 0) {
545#if defined(CONFIG_USER_ONLY)
546 /* in user mode, phys_ram_size is not meaningful */
547 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
548#else
549 /* XXX: needs adjustments */
550 code_gen_buffer_size = (unsigned long)(ram_size / 4);
551#endif
552 }
553 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
554 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
555# endif /* !VBOX */
556 /* The code gen buffer location may have constraints depending on
557 the host cpu and OS */
558# ifdef VBOX
559 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
560
561 if (!code_gen_buffer) {
562 LogRel(("REM: failed allocate codegen buffer %lld\n",
563 code_gen_buffer_size));
564 return;
565 }
566# else /* !VBOX */
567#if defined(__linux__)
568 {
569 int flags;
570 void *start = NULL;
571
572 flags = MAP_PRIVATE | MAP_ANONYMOUS;
573#if defined(__x86_64__)
574 flags |= MAP_32BIT;
575 /* Cannot map more than that */
576 if (code_gen_buffer_size > (800 * 1024 * 1024))
577 code_gen_buffer_size = (800 * 1024 * 1024);
578#elif defined(__sparc_v9__)
579 // Map the buffer below 2G, so we can use direct calls and branches
580 flags |= MAP_FIXED;
581 start = (void *) 0x60000000UL;
582 if (code_gen_buffer_size > (512 * 1024 * 1024))
583 code_gen_buffer_size = (512 * 1024 * 1024);
584#elif defined(__arm__)
585 /* Map the buffer below 32M, so we can use direct calls and branches */
586 flags |= MAP_FIXED;
587 start = (void *) 0x01000000UL;
588 if (code_gen_buffer_size > 16 * 1024 * 1024)
589 code_gen_buffer_size = 16 * 1024 * 1024;
590#elif defined(__s390x__)
591 /* Map the buffer so that we can use direct calls and branches. */
592 /* We have a +- 4GB range on the branches; leave some slop. */
593 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
594 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
595 }
596 start = (void *)0x90000000UL;
597#endif
598 code_gen_buffer = mmap(start, code_gen_buffer_size,
599 PROT_WRITE | PROT_READ | PROT_EXEC,
600 flags, -1, 0);
601 if (code_gen_buffer == MAP_FAILED) {
602 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
603 exit(1);
604 }
605 }
606#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
607 {
608 int flags;
609 void *addr = NULL;
610 flags = MAP_PRIVATE | MAP_ANONYMOUS;
611#if defined(__x86_64__)
612 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
613 * 0x40000000 is free */
614 flags |= MAP_FIXED;
615 addr = (void *)0x40000000;
616 /* Cannot map more than that */
617 if (code_gen_buffer_size > (800 * 1024 * 1024))
618 code_gen_buffer_size = (800 * 1024 * 1024);
619#endif
620 code_gen_buffer = mmap(addr, code_gen_buffer_size,
621 PROT_WRITE | PROT_READ | PROT_EXEC,
622 flags, -1, 0);
623 if (code_gen_buffer == MAP_FAILED) {
624 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
625 exit(1);
626 }
627 }
628#else
629 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
630 map_exec(code_gen_buffer, code_gen_buffer_size);
631#endif
632# endif /* !VBOX */
633#endif /* !USE_STATIC_CODE_GEN_BUFFER */
634#ifndef VBOX /** @todo r=bird: why are we different? */
635 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
636#else
637 map_exec(code_gen_prologue, _1K);
638#endif
639 code_gen_buffer_max_size = code_gen_buffer_size -
640 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
641 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
642 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
643}
644
645/* Must be called before using the QEMU cpus. 'tb_size' is the size
646 (in bytes) allocated to the translation buffer. Zero means default
647 size. */
648void cpu_exec_init_all(unsigned long tb_size)
649{
650 cpu_gen_init();
651 code_gen_alloc(tb_size);
652 code_gen_ptr = code_gen_buffer;
653 page_init();
654#if !defined(CONFIG_USER_ONLY)
655 io_mem_init();
656#endif
657#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
658 /* There's no guest base to take into account, so go ahead and
659 initialize the prologue now. */
660 tcg_prologue_init(&tcg_ctx);
661#endif
662}
663
664#ifndef VBOX
665#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
666
667static int cpu_common_post_load(void *opaque, int version_id)
668{
669 CPUState *env = opaque;
670
671 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
672 version_id is increased. */
673 env->interrupt_request &= ~0x01;
674 tlb_flush(env, 1);
675
676 return 0;
677}
678
679static const VMStateDescription vmstate_cpu_common = {
680 .name = "cpu_common",
681 .version_id = 1,
682 .minimum_version_id = 1,
683 .minimum_version_id_old = 1,
684 .post_load = cpu_common_post_load,
685 .fields = (VMStateField []) {
686 VMSTATE_UINT32(halted, CPUState),
687 VMSTATE_UINT32(interrupt_request, CPUState),
688 VMSTATE_END_OF_LIST()
689 }
690};
691#endif
692
693CPUState *qemu_get_cpu(int cpu)
694{
695 CPUState *env = first_cpu;
696
697 while (env) {
698 if (env->cpu_index == cpu)
699 break;
700 env = env->next_cpu;
701 }
702
703 return env;
704}
705
706#endif /* !VBOX */
707
708void cpu_exec_init(CPUState *env)
709{
710 CPUState **penv;
711 int cpu_index;
712
713#if defined(CONFIG_USER_ONLY)
714 cpu_list_lock();
715#endif
716 env->next_cpu = NULL;
717 penv = &first_cpu;
718 cpu_index = 0;
719 while (*penv != NULL) {
720 penv = &(*penv)->next_cpu;
721 cpu_index++;
722 }
723 env->cpu_index = cpu_index;
724 env->numa_node = 0;
725 QTAILQ_INIT(&env->breakpoints);
726 QTAILQ_INIT(&env->watchpoints);
727 *penv = env;
728#ifndef VBOX
729#if defined(CONFIG_USER_ONLY)
730 cpu_list_unlock();
731#endif
732#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
733 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
734 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
735 cpu_save, cpu_load, env);
736#endif
737#endif /* !VBOX */
738}
739
740static inline void invalidate_page_bitmap(PageDesc *p)
741{
742 if (p->code_bitmap) {
743 qemu_free(p->code_bitmap);
744 p->code_bitmap = NULL;
745 }
746 p->code_write_count = 0;
747}
748
749/* Set to NULL all the 'first_tb' fields in all PageDescs. */
750
751static void page_flush_tb_1 (int level, void **lp)
752{
753 int i;
754
755 if (*lp == NULL) {
756 return;
757 }
758 if (level == 0) {
759 PageDesc *pd = *lp;
760 for (i = 0; i < L2_SIZE; ++i) {
761 pd[i].first_tb = NULL;
762 invalidate_page_bitmap(pd + i);
763 }
764 } else {
765 void **pp = *lp;
766 for (i = 0; i < L2_SIZE; ++i) {
767 page_flush_tb_1 (level - 1, pp + i);
768 }
769 }
770}
771
772static void page_flush_tb(void)
773{
774 int i;
775 for (i = 0; i < V_L1_SIZE; i++) {
776 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
777 }
778}
779
780/* flush all the translation blocks */
781/* XXX: tb_flush is currently not thread safe */
782void tb_flush(CPUState *env1)
783{
784 CPUState *env;
785#ifdef VBOX
786 STAM_PROFILE_START(&env1->StatTbFlush, a);
787#endif
788#if defined(DEBUG_FLUSH)
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr - code_gen_buffer),
791 nb_tbs, nb_tbs > 0 ?
792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
793#endif
794 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
795 cpu_abort(env1, "Internal error: code buffer overflow\n");
796
797 nb_tbs = 0;
798
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
801 }
802
803 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
804 page_flush_tb();
805
806 code_gen_ptr = code_gen_buffer;
807 /* XXX: flush processor icache at this point if cache flush is
808 expensive */
809 tb_flush_count++;
810#ifdef VBOX
811 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
812#endif
813}
814
815#ifdef DEBUG_TB_CHECK
816
817static void tb_invalidate_check(target_ulong address)
818{
819 TranslationBlock *tb;
820 int i;
821 address &= TARGET_PAGE_MASK;
822 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
823 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
824 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
825 address >= tb->pc + tb->size)) {
826 printf("ERROR invalidate: address=" TARGET_FMT_lx
827 " PC=%08lx size=%04x\n",
828 address, (long)tb->pc, tb->size);
829 }
830 }
831 }
832}
833
834/* verify that all the pages have correct rights for code */
835static void tb_page_check(void)
836{
837 TranslationBlock *tb;
838 int i, flags1, flags2;
839
840 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
841 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
842 flags1 = page_get_flags(tb->pc);
843 flags2 = page_get_flags(tb->pc + tb->size - 1);
844 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
845 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
846 (long)tb->pc, tb->size, flags1, flags2);
847 }
848 }
849 }
850}
851
852#endif
853
854/* invalidate one TB */
855static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
856 int next_offset)
857{
858 TranslationBlock *tb1;
859 for(;;) {
860 tb1 = *ptb;
861 if (tb1 == tb) {
862 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
863 break;
864 }
865 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
866 }
867}
868
869static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
870{
871 TranslationBlock *tb1;
872 unsigned int n1;
873
874 for(;;) {
875 tb1 = *ptb;
876 n1 = (long)tb1 & 3;
877 tb1 = (TranslationBlock *)((long)tb1 & ~3);
878 if (tb1 == tb) {
879 *ptb = tb1->page_next[n1];
880 break;
881 }
882 ptb = &tb1->page_next[n1];
883 }
884}
885
886static inline void tb_jmp_remove(TranslationBlock *tb, int n)
887{
888 TranslationBlock *tb1, **ptb;
889 unsigned int n1;
890
891 ptb = &tb->jmp_next[n];
892 tb1 = *ptb;
893 if (tb1) {
894 /* find tb(n) in circular list */
895 for(;;) {
896 tb1 = *ptb;
897 n1 = (long)tb1 & 3;
898 tb1 = (TranslationBlock *)((long)tb1 & ~3);
899 if (n1 == n && tb1 == tb)
900 break;
901 if (n1 == 2) {
902 ptb = &tb1->jmp_first;
903 } else {
904 ptb = &tb1->jmp_next[n1];
905 }
906 }
907 /* now we can suppress tb(n) from the list */
908 *ptb = tb->jmp_next[n];
909
910 tb->jmp_next[n] = NULL;
911 }
912}
913
914/* reset the jump entry 'n' of a TB so that it is not chained to
915 another TB */
916static inline void tb_reset_jump(TranslationBlock *tb, int n)
917{
918 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
919}
920
921void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
922{
923 CPUState *env;
924 PageDesc *p;
925 unsigned int h, n1;
926 tb_page_addr_t phys_pc;
927 TranslationBlock *tb1, *tb2;
928
929 /* remove the TB from the hash list */
930 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
931 h = tb_phys_hash_func(phys_pc);
932 tb_remove(&tb_phys_hash[h], tb,
933 offsetof(TranslationBlock, phys_hash_next));
934
935 /* remove the TB from the page list */
936 if (tb->page_addr[0] != page_addr) {
937 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
938 tb_page_remove(&p->first_tb, tb);
939 invalidate_page_bitmap(p);
940 }
941 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
942 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
943 tb_page_remove(&p->first_tb, tb);
944 invalidate_page_bitmap(p);
945 }
946
947 tb_invalidated_flag = 1;
948
949 /* remove the TB from the hash list */
950 h = tb_jmp_cache_hash_func(tb->pc);
951 for(env = first_cpu; env != NULL; env = env->next_cpu) {
952 if (env->tb_jmp_cache[h] == tb)
953 env->tb_jmp_cache[h] = NULL;
954 }
955
956 /* suppress this TB from the two jump lists */
957 tb_jmp_remove(tb, 0);
958 tb_jmp_remove(tb, 1);
959
960 /* suppress any remaining jumps to this TB */
961 tb1 = tb->jmp_first;
962 for(;;) {
963 n1 = (long)tb1 & 3;
964 if (n1 == 2)
965 break;
966 tb1 = (TranslationBlock *)((long)tb1 & ~3);
967 tb2 = tb1->jmp_next[n1];
968 tb_reset_jump(tb1, n1);
969 tb1->jmp_next[n1] = NULL;
970 tb1 = tb2;
971 }
972 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
973
974 tb_phys_invalidate_count++;
975}
976
977#ifdef VBOX
978
979void tb_invalidate_virt(CPUState *env, uint32_t eip)
980{
981# if 1
982 tb_flush(env);
983# else
984 uint8_t *cs_base, *pc;
985 unsigned int flags, h, phys_pc;
986 TranslationBlock *tb, **ptb;
987
988 flags = env->hflags;
989 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
990 cs_base = env->segs[R_CS].base;
991 pc = cs_base + eip;
992
993 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
994 flags);
995
996 if(tb)
997 {
998# ifdef DEBUG
999 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1000# endif
1001 tb_invalidate(tb);
1002 //Note: this will leak TBs, but the whole cache will be flushed
1003 // when it happens too often
1004 tb->pc = 0;
1005 tb->cs_base = 0;
1006 tb->flags = 0;
1007 }
1008# endif
1009}
1010
1011# ifdef VBOX_STRICT
1012/**
1013 * Gets the page offset.
1014 */
1015unsigned long get_phys_page_offset(target_ulong addr)
1016{
1017 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1018 return p ? p->phys_offset : 0;
1019}
1020# endif /* VBOX_STRICT */
1021
1022#endif /* VBOX */
1023
1024static inline void set_bits(uint8_t *tab, int start, int len)
1025{
1026 int end, mask, end1;
1027
1028 end = start + len;
1029 tab += start >> 3;
1030 mask = 0xff << (start & 7);
1031 if ((start & ~7) == (end & ~7)) {
1032 if (start < end) {
1033 mask &= ~(0xff << (end & 7));
1034 *tab |= mask;
1035 }
1036 } else {
1037 *tab++ |= mask;
1038 start = (start + 8) & ~7;
1039 end1 = end & ~7;
1040 while (start < end1) {
1041 *tab++ = 0xff;
1042 start += 8;
1043 }
1044 if (start < end) {
1045 mask = ~(0xff << (end & 7));
1046 *tab |= mask;
1047 }
1048 }
1049}
1050
1051static void build_page_bitmap(PageDesc *p)
1052{
1053 int n, tb_start, tb_end;
1054 TranslationBlock *tb;
1055
1056 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1057
1058 tb = p->first_tb;
1059 while (tb != NULL) {
1060 n = (long)tb & 3;
1061 tb = (TranslationBlock *)((long)tb & ~3);
1062 /* NOTE: this is subtle as a TB may span two physical pages */
1063 if (n == 0) {
1064 /* NOTE: tb_end may be after the end of the page, but
1065 it is not a problem */
1066 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1067 tb_end = tb_start + tb->size;
1068 if (tb_end > TARGET_PAGE_SIZE)
1069 tb_end = TARGET_PAGE_SIZE;
1070 } else {
1071 tb_start = 0;
1072 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1073 }
1074 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1075 tb = tb->page_next[n];
1076 }
1077}
1078
1079TranslationBlock *tb_gen_code(CPUState *env,
1080 target_ulong pc, target_ulong cs_base,
1081 int flags, int cflags)
1082{
1083 TranslationBlock *tb;
1084 uint8_t *tc_ptr;
1085 tb_page_addr_t phys_pc, phys_page2;
1086 target_ulong virt_page2;
1087 int code_gen_size;
1088
1089 phys_pc = get_page_addr_code(env, pc);
1090 tb = tb_alloc(pc);
1091 if (!tb) {
1092 /* flush must be done */
1093 tb_flush(env);
1094 /* cannot fail at this point */
1095 tb = tb_alloc(pc);
1096 /* Don't forget to invalidate previous TB info. */
1097 tb_invalidated_flag = 1;
1098 }
1099 tc_ptr = code_gen_ptr;
1100 tb->tc_ptr = tc_ptr;
1101 tb->cs_base = cs_base;
1102 tb->flags = flags;
1103 tb->cflags = cflags;
1104 cpu_gen_code(env, tb, &code_gen_size);
1105 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1106
1107 /* check next page if needed */
1108 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1109 phys_page2 = -1;
1110 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1111 phys_page2 = get_page_addr_code(env, virt_page2);
1112 }
1113 tb_link_page(tb, phys_pc, phys_page2);
1114 return tb;
1115}
1116
1117/* invalidate all TBs which intersect with the target physical page
1118 starting in range [start;end[. NOTE: start and end must refer to
1119 the same physical page. 'is_cpu_write_access' should be true if called
1120 from a real cpu write access: the virtual CPU will exit the current
1121 TB if code is modified inside this TB. */
1122void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1123 int is_cpu_write_access)
1124{
1125 TranslationBlock *tb, *tb_next, *saved_tb;
1126 CPUState *env = cpu_single_env;
1127 tb_page_addr_t tb_start, tb_end;
1128 PageDesc *p;
1129 int n;
1130#ifdef TARGET_HAS_PRECISE_SMC
1131 int current_tb_not_found = is_cpu_write_access;
1132 TranslationBlock *current_tb = NULL;
1133 int current_tb_modified = 0;
1134 target_ulong current_pc = 0;
1135 target_ulong current_cs_base = 0;
1136 int current_flags = 0;
1137#endif /* TARGET_HAS_PRECISE_SMC */
1138
1139 p = page_find(start >> TARGET_PAGE_BITS);
1140 if (!p)
1141 return;
1142 if (!p->code_bitmap &&
1143 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1144 is_cpu_write_access) {
1145 /* build code bitmap */
1146 build_page_bitmap(p);
1147 }
1148
1149 /* we remove all the TBs in the range [start, end[ */
1150 /* XXX: see if in some cases it could be faster to invalidate all the code */
1151 tb = p->first_tb;
1152 while (tb != NULL) {
1153 n = (long)tb & 3;
1154 tb = (TranslationBlock *)((long)tb & ~3);
1155 tb_next = tb->page_next[n];
1156 /* NOTE: this is subtle as a TB may span two physical pages */
1157 if (n == 0) {
1158 /* NOTE: tb_end may be after the end of the page, but
1159 it is not a problem */
1160 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1161 tb_end = tb_start + tb->size;
1162 } else {
1163 tb_start = tb->page_addr[1];
1164 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1165 }
1166 if (!(tb_end <= start || tb_start >= end)) {
1167#ifdef TARGET_HAS_PRECISE_SMC
1168 if (current_tb_not_found) {
1169 current_tb_not_found = 0;
1170 current_tb = NULL;
1171 if (env->mem_io_pc) {
1172 /* now we have a real cpu fault */
1173 current_tb = tb_find_pc(env->mem_io_pc);
1174 }
1175 }
1176 if (current_tb == tb &&
1177 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1178 /* If we are modifying the current TB, we must stop
1179 its execution. We could be more precise by checking
1180 that the modification is after the current PC, but it
1181 would require a specialized function to partially
1182 restore the CPU state */
1183
1184 current_tb_modified = 1;
1185 cpu_restore_state(current_tb, env,
1186 env->mem_io_pc, NULL);
1187 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1188 &current_flags);
1189 }
1190#endif /* TARGET_HAS_PRECISE_SMC */
1191 /* we need to do that to handle the case where a signal
1192 occurs while doing tb_phys_invalidate() */
1193 saved_tb = NULL;
1194 if (env) {
1195 saved_tb = env->current_tb;
1196 env->current_tb = NULL;
1197 }
1198 tb_phys_invalidate(tb, -1);
1199 if (env) {
1200 env->current_tb = saved_tb;
1201 if (env->interrupt_request && env->current_tb)
1202 cpu_interrupt(env, env->interrupt_request);
1203 }
1204 }
1205 tb = tb_next;
1206 }
1207#if !defined(CONFIG_USER_ONLY)
1208 /* if no code remaining, no need to continue to use slow writes */
1209 if (!p->first_tb) {
1210 invalidate_page_bitmap(p);
1211 if (is_cpu_write_access) {
1212 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1213 }
1214 }
1215#endif
1216#ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1220 itself */
1221 env->current_tb = NULL;
1222 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1223 cpu_resume_from_signal(env, NULL);
1224 }
1225#endif
1226}
1227
1228/* len must be <= 8 and start must be a multiple of len */
1229static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1230{
1231 PageDesc *p;
1232 int offset, b;
1233#if 0
1234 if (1) {
1235 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1236 cpu_single_env->mem_io_vaddr, len,
1237 cpu_single_env->eip,
1238 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1239 }
1240#endif
1241 p = page_find(start >> TARGET_PAGE_BITS);
1242 if (!p)
1243 return;
1244 if (p->code_bitmap) {
1245 offset = start & ~TARGET_PAGE_MASK;
1246 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1247 if (b & ((1 << len) - 1))
1248 goto do_invalidate;
1249 } else {
1250 do_invalidate:
1251 tb_invalidate_phys_page_range(start, start + len, 1);
1252 }
1253}
1254
1255#if !defined(CONFIG_SOFTMMU)
1256static void tb_invalidate_phys_page(tb_page_addr_t addr,
1257 unsigned long pc, void *puc)
1258{
1259 TranslationBlock *tb;
1260 PageDesc *p;
1261 int n;
1262#ifdef TARGET_HAS_PRECISE_SMC
1263 TranslationBlock *current_tb = NULL;
1264 CPUState *env = cpu_single_env;
1265 int current_tb_modified = 0;
1266 target_ulong current_pc = 0;
1267 target_ulong current_cs_base = 0;
1268 int current_flags = 0;
1269#endif
1270
1271 addr &= TARGET_PAGE_MASK;
1272 p = page_find(addr >> TARGET_PAGE_BITS);
1273 if (!p)
1274 return;
1275 tb = p->first_tb;
1276#ifdef TARGET_HAS_PRECISE_SMC
1277 if (tb && pc != 0) {
1278 current_tb = tb_find_pc(pc);
1279 }
1280#endif
1281 while (tb != NULL) {
1282 n = (long)tb & 3;
1283 tb = (TranslationBlock *)((long)tb & ~3);
1284#ifdef TARGET_HAS_PRECISE_SMC
1285 if (current_tb == tb &&
1286 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1287 /* If we are modifying the current TB, we must stop
1288 its execution. We could be more precise by checking
1289 that the modification is after the current PC, but it
1290 would require a specialized function to partially
1291 restore the CPU state */
1292
1293 current_tb_modified = 1;
1294 cpu_restore_state(current_tb, env, pc, puc);
1295 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1296 &current_flags);
1297 }
1298#endif /* TARGET_HAS_PRECISE_SMC */
1299 tb_phys_invalidate(tb, addr);
1300 tb = tb->page_next[n];
1301 }
1302 p->first_tb = NULL;
1303#ifdef TARGET_HAS_PRECISE_SMC
1304 if (current_tb_modified) {
1305 /* we generate a block containing just the instruction
1306 modifying the memory. It will ensure that it cannot modify
1307 itself */
1308 env->current_tb = NULL;
1309 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1310 cpu_resume_from_signal(env, puc);
1311 }
1312#endif
1313}
1314#endif
1315
1316/* add the tb in the target page and protect it if necessary */
1317static inline void tb_alloc_page(TranslationBlock *tb,
1318 unsigned int n, tb_page_addr_t page_addr)
1319{
1320 PageDesc *p;
1321 TranslationBlock *last_first_tb;
1322
1323 tb->page_addr[n] = page_addr;
1324 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1325 tb->page_next[n] = p->first_tb;
1326 last_first_tb = p->first_tb;
1327 p->first_tb = (TranslationBlock *)((long)tb | n);
1328 invalidate_page_bitmap(p);
1329
1330#if defined(TARGET_HAS_SMC) || 1
1331
1332#if defined(CONFIG_USER_ONLY)
1333 if (p->flags & PAGE_WRITE) {
1334 target_ulong addr;
1335 PageDesc *p2;
1336 int prot;
1337
1338 /* force the host page as non writable (writes will have a
1339 page fault + mprotect overhead) */
1340 page_addr &= qemu_host_page_mask;
1341 prot = 0;
1342 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1343 addr += TARGET_PAGE_SIZE) {
1344
1345 p2 = page_find (addr >> TARGET_PAGE_BITS);
1346 if (!p2)
1347 continue;
1348 prot |= p2->flags;
1349 p2->flags &= ~PAGE_WRITE;
1350 }
1351 mprotect(g2h(page_addr), qemu_host_page_size,
1352 (prot & PAGE_BITS) & ~PAGE_WRITE);
1353#ifdef DEBUG_TB_INVALIDATE
1354 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1355 page_addr);
1356#endif
1357 }
1358#else
1359 /* if some code is already present, then the pages are already
1360 protected. So we handle the case where only the first TB is
1361 allocated in a physical page */
1362 if (!last_first_tb) {
1363 tlb_protect_code(page_addr);
1364 }
1365#endif
1366
1367#endif /* TARGET_HAS_SMC */
1368}
1369
1370/* Allocate a new translation block. Flush the translation buffer if
1371 too many translation blocks or too much generated code. */
1372TranslationBlock *tb_alloc(target_ulong pc)
1373{
1374 TranslationBlock *tb;
1375
1376 if (nb_tbs >= code_gen_max_blocks ||
1377 (code_gen_ptr - code_gen_buffer) >= VBOX_ONLY((unsigned long))code_gen_buffer_max_size)
1378 return NULL;
1379 tb = &tbs[nb_tbs++];
1380 tb->pc = pc;
1381 tb->cflags = 0;
1382 return tb;
1383}
1384
1385void tb_free(TranslationBlock *tb)
1386{
1387 /* In practice this is mostly used for single use temporary TB
1388 Ignore the hard cases and just back up if this TB happens to
1389 be the last one generated. */
1390 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1391 code_gen_ptr = tb->tc_ptr;
1392 nb_tbs--;
1393 }
1394}
1395
1396/* add a new TB and link it to the physical page tables. phys_page2 is
1397 (-1) to indicate that only one page contains the TB. */
1398void tb_link_page(TranslationBlock *tb,
1399 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1400{
1401 unsigned int h;
1402 TranslationBlock **ptb;
1403
1404 /* Grab the mmap lock to stop another thread invalidating this TB
1405 before we are done. */
1406 mmap_lock();
1407 /* add in the physical hash table */
1408 h = tb_phys_hash_func(phys_pc);
1409 ptb = &tb_phys_hash[h];
1410 tb->phys_hash_next = *ptb;
1411 *ptb = tb;
1412
1413 /* add in the page list */
1414 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1415 if (phys_page2 != -1)
1416 tb_alloc_page(tb, 1, phys_page2);
1417 else
1418 tb->page_addr[1] = -1;
1419
1420 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1421 tb->jmp_next[0] = NULL;
1422 tb->jmp_next[1] = NULL;
1423
1424 /* init original jump addresses */
1425 if (tb->tb_next_offset[0] != 0xffff)
1426 tb_reset_jump(tb, 0);
1427 if (tb->tb_next_offset[1] != 0xffff)
1428 tb_reset_jump(tb, 1);
1429
1430#ifdef DEBUG_TB_CHECK
1431 tb_page_check();
1432#endif
1433 mmap_unlock();
1434}
1435
1436/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1437 tb[1].tc_ptr. Return NULL if not found */
1438TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1439{
1440 int m_min, m_max, m;
1441 unsigned long v;
1442 TranslationBlock *tb;
1443
1444 if (nb_tbs <= 0)
1445 return NULL;
1446 if (tc_ptr < (unsigned long)code_gen_buffer ||
1447 tc_ptr >= (unsigned long)code_gen_ptr)
1448 return NULL;
1449 /* binary search (cf Knuth) */
1450 m_min = 0;
1451 m_max = nb_tbs - 1;
1452 while (m_min <= m_max) {
1453 m = (m_min + m_max) >> 1;
1454 tb = &tbs[m];
1455 v = (unsigned long)tb->tc_ptr;
1456 if (v == tc_ptr)
1457 return tb;
1458 else if (tc_ptr < v) {
1459 m_max = m - 1;
1460 } else {
1461 m_min = m + 1;
1462 }
1463 }
1464 return &tbs[m_max];
1465}
1466
1467static void tb_reset_jump_recursive(TranslationBlock *tb);
1468
1469static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1470{
1471 TranslationBlock *tb1, *tb_next, **ptb;
1472 unsigned int n1;
1473
1474 tb1 = tb->jmp_next[n];
1475 if (tb1 != NULL) {
1476 /* find head of list */
1477 for(;;) {
1478 n1 = (long)tb1 & 3;
1479 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1480 if (n1 == 2)
1481 break;
1482 tb1 = tb1->jmp_next[n1];
1483 }
1484 /* we are now sure now that tb jumps to tb1 */
1485 tb_next = tb1;
1486
1487 /* remove tb from the jmp_first list */
1488 ptb = &tb_next->jmp_first;
1489 for(;;) {
1490 tb1 = *ptb;
1491 n1 = (long)tb1 & 3;
1492 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1493 if (n1 == n && tb1 == tb)
1494 break;
1495 ptb = &tb1->jmp_next[n1];
1496 }
1497 *ptb = tb->jmp_next[n];
1498 tb->jmp_next[n] = NULL;
1499
1500 /* suppress the jump to next tb in generated code */
1501 tb_reset_jump(tb, n);
1502
1503 /* suppress jumps in the tb on which we could have jumped */
1504 tb_reset_jump_recursive(tb_next);
1505 }
1506}
1507
1508static void tb_reset_jump_recursive(TranslationBlock *tb)
1509{
1510 tb_reset_jump_recursive2(tb, 0);
1511 tb_reset_jump_recursive2(tb, 1);
1512}
1513
1514#if defined(TARGET_HAS_ICE)
1515#if defined(CONFIG_USER_ONLY)
1516static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1517{
1518 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1519}
1520#else
1521static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1522{
1523 target_phys_addr_t addr;
1524 target_ulong pd;
1525 ram_addr_t ram_addr;
1526 PhysPageDesc *p;
1527
1528 addr = cpu_get_phys_page_debug(env, pc);
1529 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1530 if (!p) {
1531 pd = IO_MEM_UNASSIGNED;
1532 } else {
1533 pd = p->phys_offset;
1534 }
1535 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1536 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1537}
1538#endif
1539#endif /* TARGET_HAS_ICE */
1540
1541#if defined(CONFIG_USER_ONLY)
1542void cpu_watchpoint_remove_all(CPUState *env, int mask)
1543
1544{
1545}
1546
1547int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1548 int flags, CPUWatchpoint **watchpoint)
1549{
1550 return -ENOSYS;
1551}
1552#else
1553/* Add a watchpoint. */
1554int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1555 int flags, CPUWatchpoint **watchpoint)
1556{
1557 target_ulong len_mask = ~(len - 1);
1558 CPUWatchpoint *wp;
1559
1560 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1561 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1562 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1563 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1564#ifndef VBOX
1565 return -EINVAL;
1566#else
1567 return VERR_INVALID_PARAMETER;
1568#endif
1569 }
1570 wp = qemu_malloc(sizeof(*wp));
1571
1572 wp->vaddr = addr;
1573 wp->len_mask = len_mask;
1574 wp->flags = flags;
1575
1576 /* keep all GDB-injected watchpoints in front */
1577 if (flags & BP_GDB)
1578 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1579 else
1580 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1581
1582 tlb_flush_page(env, addr);
1583
1584 if (watchpoint)
1585 *watchpoint = wp;
1586 return 0;
1587}
1588
1589/* Remove a specific watchpoint. */
1590int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1591 int flags)
1592{
1593 target_ulong len_mask = ~(len - 1);
1594 CPUWatchpoint *wp;
1595
1596 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1597 if (addr == wp->vaddr && len_mask == wp->len_mask
1598 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1599 cpu_watchpoint_remove_by_ref(env, wp);
1600 return 0;
1601 }
1602 }
1603#ifndef VBOX
1604 return -ENOENT;
1605#else
1606 return VERR_NOT_FOUND;
1607#endif
1608}
1609
1610/* Remove a specific watchpoint by reference. */
1611void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1612{
1613 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1614
1615 tlb_flush_page(env, watchpoint->vaddr);
1616
1617 qemu_free(watchpoint);
1618}
1619
1620/* Remove all matching watchpoints. */
1621void cpu_watchpoint_remove_all(CPUState *env, int mask)
1622{
1623 CPUWatchpoint *wp, *next;
1624
1625 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1626 if (wp->flags & mask)
1627 cpu_watchpoint_remove_by_ref(env, wp);
1628 }
1629}
1630#endif
1631
1632/* Add a breakpoint. */
1633int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1634 CPUBreakpoint **breakpoint)
1635{
1636#if defined(TARGET_HAS_ICE)
1637 CPUBreakpoint *bp;
1638
1639 bp = qemu_malloc(sizeof(*bp));
1640
1641 bp->pc = pc;
1642 bp->flags = flags;
1643
1644 /* keep all GDB-injected breakpoints in front */
1645 if (flags & BP_GDB)
1646 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1647 else
1648 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1649
1650 breakpoint_invalidate(env, pc);
1651
1652 if (breakpoint)
1653 *breakpoint = bp;
1654 return 0;
1655#else
1656 return -ENOSYS;
1657#endif
1658}
1659
1660/* Remove a specific breakpoint. */
1661int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1662{
1663#if defined(TARGET_HAS_ICE)
1664 CPUBreakpoint *bp;
1665
1666 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1667 if (bp->pc == pc && bp->flags == flags) {
1668 cpu_breakpoint_remove_by_ref(env, bp);
1669 return 0;
1670 }
1671 }
1672# ifndef VBOX
1673 return -ENOENT;
1674# else
1675 return VERR_NOT_FOUND;
1676# endif
1677#else
1678 return -ENOSYS;
1679#endif
1680}
1681
1682/* Remove a specific breakpoint by reference. */
1683void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1684{
1685#if defined(TARGET_HAS_ICE)
1686 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1687
1688 breakpoint_invalidate(env, breakpoint->pc);
1689
1690 qemu_free(breakpoint);
1691#endif
1692}
1693
1694/* Remove all matching breakpoints. */
1695void cpu_breakpoint_remove_all(CPUState *env, int mask)
1696{
1697#if defined(TARGET_HAS_ICE)
1698 CPUBreakpoint *bp, *next;
1699
1700 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1701 if (bp->flags & mask)
1702 cpu_breakpoint_remove_by_ref(env, bp);
1703 }
1704#endif
1705}
1706
1707/* enable or disable single step mode. EXCP_DEBUG is returned by the
1708 CPU loop after each instruction */
1709void cpu_single_step(CPUState *env, int enabled)
1710{
1711#if defined(TARGET_HAS_ICE)
1712 if (env->singlestep_enabled != enabled) {
1713 env->singlestep_enabled = enabled;
1714 if (kvm_enabled())
1715 kvm_update_guest_debug(env, 0);
1716 else {
1717 /* must flush all the translated code to avoid inconsistencies */
1718 /* XXX: only flush what is necessary */
1719 tb_flush(env);
1720 }
1721 }
1722#endif
1723}
1724
1725#ifndef VBOX
1726
1727/* enable or disable low levels log */
1728void cpu_set_log(int log_flags)
1729{
1730 loglevel = log_flags;
1731 if (loglevel && !logfile) {
1732 logfile = fopen(logfilename, log_append ? "a" : "w");
1733 if (!logfile) {
1734 perror(logfilename);
1735 _exit(1);
1736 }
1737#if !defined(CONFIG_SOFTMMU)
1738 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1739 {
1740 static char logfile_buf[4096];
1741 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1742 }
1743#elif !defined(_WIN32)
1744 /* Win32 doesn't support line-buffering and requires size >= 2 */
1745 setvbuf(logfile, NULL, _IOLBF, 0);
1746#endif
1747 log_append = 1;
1748 }
1749 if (!loglevel && logfile) {
1750 fclose(logfile);
1751 logfile = NULL;
1752 }
1753}
1754
1755void cpu_set_log_filename(const char *filename)
1756{
1757 logfilename = strdup(filename);
1758 if (logfile) {
1759 fclose(logfile);
1760 logfile = NULL;
1761 }
1762 cpu_set_log(loglevel);
1763}
1764
1765#endif /* !VBOX */
1766
1767static void cpu_unlink_tb(CPUState *env)
1768{
1769 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1770 problem and hope the cpu will stop of its own accord. For userspace
1771 emulation this often isn't actually as bad as it sounds. Often
1772 signals are used primarily to interrupt blocking syscalls. */
1773 TranslationBlock *tb;
1774 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1775
1776 spin_lock(&interrupt_lock);
1777 tb = env->current_tb;
1778 /* if the cpu is currently executing code, we must unlink it and
1779 all the potentially executing TB */
1780 if (tb) {
1781 env->current_tb = NULL;
1782 tb_reset_jump_recursive(tb);
1783 }
1784 spin_unlock(&interrupt_lock);
1785}
1786
1787/* mask must never be zero, except for A20 change call */
1788void cpu_interrupt(CPUState *env, int mask)
1789{
1790 int old_mask;
1791
1792 old_mask = env->interrupt_request;
1793#ifndef VBOX
1794 env->interrupt_request |= mask;
1795#else /* VBOX */
1796 VM_ASSERT_EMT(env->pVM);
1797 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1798#endif /* VBOX */
1799
1800#ifndef VBOX
1801#ifndef CONFIG_USER_ONLY
1802 /*
1803 * If called from iothread context, wake the target cpu in
1804 * case its halted.
1805 */
1806 if (!qemu_cpu_self(env)) {
1807 qemu_cpu_kick(env);
1808 return;
1809 }
1810#endif
1811#endif /* !VBOX */
1812
1813 if (use_icount) {
1814 env->icount_decr.u16.high = 0xffff;
1815#ifndef CONFIG_USER_ONLY
1816 if (!can_do_io(env)
1817 && (mask & ~old_mask) != 0) {
1818 cpu_abort(env, "Raised interrupt while not in I/O function");
1819 }
1820#endif
1821 } else {
1822 cpu_unlink_tb(env);
1823 }
1824}
1825
1826void cpu_reset_interrupt(CPUState *env, int mask)
1827{
1828#ifdef VBOX
1829 /*
1830 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1831 * for future changes!
1832 */
1833 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1834#else /* !VBOX */
1835 env->interrupt_request &= ~mask;
1836#endif /* !VBOX */
1837}
1838
1839void cpu_exit(CPUState *env)
1840{
1841 env->exit_request = 1;
1842 cpu_unlink_tb(env);
1843}
1844
1845#ifndef VBOX
1846const CPULogItem cpu_log_items[] = {
1847 { CPU_LOG_TB_OUT_ASM, "out_asm",
1848 "show generated host assembly code for each compiled TB" },
1849 { CPU_LOG_TB_IN_ASM, "in_asm",
1850 "show target assembly code for each compiled TB" },
1851 { CPU_LOG_TB_OP, "op",
1852 "show micro ops for each compiled TB" },
1853 { CPU_LOG_TB_OP_OPT, "op_opt",
1854 "show micro ops "
1855#ifdef TARGET_I386
1856 "before eflags optimization and "
1857#endif
1858 "after liveness analysis" },
1859 { CPU_LOG_INT, "int",
1860 "show interrupts/exceptions in short format" },
1861 { CPU_LOG_EXEC, "exec",
1862 "show trace before each executed TB (lots of logs)" },
1863 { CPU_LOG_TB_CPU, "cpu",
1864 "show CPU state before block translation" },
1865#ifdef TARGET_I386
1866 { CPU_LOG_PCALL, "pcall",
1867 "show protected mode far calls/returns/exceptions" },
1868 { CPU_LOG_RESET, "cpu_reset",
1869 "show CPU state before CPU resets" },
1870#endif
1871#ifdef DEBUG_IOPORT
1872 { CPU_LOG_IOPORT, "ioport",
1873 "show all i/o ports accesses" },
1874#endif
1875 { 0, NULL, NULL },
1876};
1877
1878#ifndef CONFIG_USER_ONLY
1879static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1880 = QLIST_HEAD_INITIALIZER(memory_client_list);
1881
1882static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1883 ram_addr_t size,
1884 ram_addr_t phys_offset)
1885{
1886 CPUPhysMemoryClient *client;
1887 QLIST_FOREACH(client, &memory_client_list, list) {
1888 client->set_memory(client, start_addr, size, phys_offset);
1889 }
1890}
1891
1892static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1893 target_phys_addr_t end)
1894{
1895 CPUPhysMemoryClient *client;
1896 QLIST_FOREACH(client, &memory_client_list, list) {
1897 int r = client->sync_dirty_bitmap(client, start, end);
1898 if (r < 0)
1899 return r;
1900 }
1901 return 0;
1902}
1903
1904static int cpu_notify_migration_log(int enable)
1905{
1906 CPUPhysMemoryClient *client;
1907 QLIST_FOREACH(client, &memory_client_list, list) {
1908 int r = client->migration_log(client, enable);
1909 if (r < 0)
1910 return r;
1911 }
1912 return 0;
1913}
1914
1915static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1916 int level, void **lp)
1917{
1918 int i;
1919
1920 if (*lp == NULL) {
1921 return;
1922 }
1923 if (level == 0) {
1924 PhysPageDesc *pd = *lp;
1925 for (i = 0; i < L2_SIZE; ++i) {
1926 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1927 client->set_memory(client, pd[i].region_offset,
1928 TARGET_PAGE_SIZE, pd[i].phys_offset);
1929 }
1930 }
1931 } else {
1932 void **pp = *lp;
1933 for (i = 0; i < L2_SIZE; ++i) {
1934 phys_page_for_each_1(client, level - 1, pp + i);
1935 }
1936 }
1937}
1938
1939static void phys_page_for_each(CPUPhysMemoryClient *client)
1940{
1941 int i;
1942 for (i = 0; i < P_L1_SIZE; ++i) {
1943 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1944 l1_phys_map + 1);
1945 }
1946}
1947
1948void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1949{
1950 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1951 phys_page_for_each(client);
1952}
1953
1954void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1955{
1956 QLIST_REMOVE(client, list);
1957}
1958#endif
1959
1960static int cmp1(const char *s1, int n, const char *s2)
1961{
1962 if (strlen(s2) != n)
1963 return 0;
1964 return memcmp(s1, s2, n) == 0;
1965}
1966
1967/* takes a comma separated list of log masks. Return 0 if error. */
1968int cpu_str_to_log_mask(const char *str)
1969{
1970 const CPULogItem *item;
1971 int mask;
1972 const char *p, *p1;
1973
1974 p = str;
1975 mask = 0;
1976 for(;;) {
1977 p1 = strchr(p, ',');
1978 if (!p1)
1979 p1 = p + strlen(p);
1980 if(cmp1(p,p1-p,"all")) {
1981 for(item = cpu_log_items; item->mask != 0; item++) {
1982 mask |= item->mask;
1983 }
1984 } else {
1985 for(item = cpu_log_items; item->mask != 0; item++) {
1986 if (cmp1(p, p1 - p, item->name))
1987 goto found;
1988 }
1989 return 0;
1990 }
1991 found:
1992 mask |= item->mask;
1993 if (*p1 != ',')
1994 break;
1995 p = p1 + 1;
1996 }
1997 return mask;
1998}
1999
2000void cpu_abort(CPUState *env, const char *fmt, ...)
2001{
2002 va_list ap;
2003 va_list ap2;
2004
2005 va_start(ap, fmt);
2006 va_copy(ap2, ap);
2007 fprintf(stderr, "qemu: fatal: ");
2008 vfprintf(stderr, fmt, ap);
2009 fprintf(stderr, "\n");
2010#ifdef TARGET_I386
2011 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
2012#else
2013 cpu_dump_state(env, stderr, fprintf, 0);
2014#endif
2015 if (qemu_log_enabled()) {
2016 qemu_log("qemu: fatal: ");
2017 qemu_log_vprintf(fmt, ap2);
2018 qemu_log("\n");
2019#ifdef TARGET_I386
2020 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
2021#else
2022 log_cpu_state(env, 0);
2023#endif
2024 qemu_log_flush();
2025 qemu_log_close();
2026 }
2027 va_end(ap2);
2028 va_end(ap);
2029#if defined(CONFIG_USER_ONLY)
2030 {
2031 struct sigaction act;
2032 sigfillset(&act.sa_mask);
2033 act.sa_handler = SIG_DFL;
2034 sigaction(SIGABRT, &act, NULL);
2035 }
2036#endif
2037 abort();
2038}
2039
2040CPUState *cpu_copy(CPUState *env)
2041{
2042 CPUState *new_env = cpu_init(env->cpu_model_str);
2043 CPUState *next_cpu = new_env->next_cpu;
2044 int cpu_index = new_env->cpu_index;
2045#if defined(TARGET_HAS_ICE)
2046 CPUBreakpoint *bp;
2047 CPUWatchpoint *wp;
2048#endif
2049
2050 memcpy(new_env, env, sizeof(CPUState));
2051
2052 /* Preserve chaining and index. */
2053 new_env->next_cpu = next_cpu;
2054 new_env->cpu_index = cpu_index;
2055
2056 /* Clone all break/watchpoints.
2057 Note: Once we support ptrace with hw-debug register access, make sure
2058 BP_CPU break/watchpoints are handled correctly on clone. */
2059 QTAILQ_INIT(&env->breakpoints);
2060 QTAILQ_INIT(&env->watchpoints);
2061#if defined(TARGET_HAS_ICE)
2062 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2063 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
2064 }
2065 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2066 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
2067 wp->flags, NULL);
2068 }
2069#endif
2070
2071 return new_env;
2072}
2073
2074#endif /* !VBOX */
2075#if !defined(CONFIG_USER_ONLY)
2076
2077static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
2078{
2079 unsigned int i;
2080
2081 /* Discard jump cache entries for any tb which might potentially
2082 overlap the flushed page. */
2083 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
2084 memset (&env->tb_jmp_cache[i], 0,
2085 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
2086
2087 i = tb_jmp_cache_hash_page(addr);
2088 memset (&env->tb_jmp_cache[i], 0,
2089 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
2090#ifdef VBOX
2091
2092 /* inform raw mode about TLB page flush */
2093 remR3FlushPage(env, addr);
2094#endif /* VBOX */
2095}
2096
2097static CPUTLBEntry s_cputlb_empty_entry = {
2098 .addr_read = -1,
2099 .addr_write = -1,
2100 .addr_code = -1,
2101 .addend = -1,
2102};
2103
2104/* NOTE: if flush_global is true, also flush global entries (not
2105 implemented yet) */
2106void tlb_flush(CPUState *env, int flush_global)
2107{
2108 int i;
2109
2110#ifdef VBOX
2111 Assert(EMRemIsLockOwner(env->pVM));
2112 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_FLUSH_TLB);
2113#endif
2114
2115#if defined(DEBUG_TLB)
2116 printf("tlb_flush:\n");
2117#endif
2118 /* must reset current TB so that interrupts cannot modify the
2119 links while we are modifying them */
2120 env->current_tb = NULL;
2121
2122 for(i = 0; i < CPU_TLB_SIZE; i++) {
2123 int mmu_idx;
2124 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2125 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
2126 }
2127 }
2128
2129 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2130
2131 env->tlb_flush_addr = -1;
2132 env->tlb_flush_mask = 0;
2133 tlb_flush_count++;
2134#ifdef VBOX
2135
2136 /* inform raw mode about TLB flush */
2137 remR3FlushTLB(env, flush_global);
2138#endif /* VBOX */
2139}
2140
2141static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2142{
2143 if (addr == (tlb_entry->addr_read &
2144 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2145 addr == (tlb_entry->addr_write &
2146 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2147 addr == (tlb_entry->addr_code &
2148 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2149 *tlb_entry = s_cputlb_empty_entry;
2150 }
2151}
2152
2153void tlb_flush_page(CPUState *env, target_ulong addr)
2154{
2155 int i;
2156 int mmu_idx;
2157
2158 Assert(EMRemIsLockOwner(env->pVM));
2159#if defined(DEBUG_TLB)
2160 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2161#endif
2162 /* Check if we need to flush due to large pages. */
2163 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2164#if defined(DEBUG_TLB)
2165 printf("tlb_flush_page: forced full flush ("
2166 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2167 env->tlb_flush_addr, env->tlb_flush_mask);
2168#endif
2169 tlb_flush(env, 1);
2170 return;
2171 }
2172 /* must reset current TB so that interrupts cannot modify the
2173 links while we are modifying them */
2174 env->current_tb = NULL;
2175
2176 addr &= TARGET_PAGE_MASK;
2177 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2178 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2179 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2180
2181 tlb_flush_jmp_cache(env, addr);
2182}
2183
2184/* update the TLBs so that writes to code in the virtual page 'addr'
2185 can be detected */
2186static void tlb_protect_code(ram_addr_t ram_addr)
2187{
2188 cpu_physical_memory_reset_dirty(ram_addr,
2189 ram_addr + TARGET_PAGE_SIZE,
2190 CODE_DIRTY_FLAG);
2191#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2192 /** @todo Retest this? This function has changed... */
2193 remR3ProtectCode(cpu_single_env, ram_addr);
2194#endif /* VBOX */
2195}
2196
2197/* update the TLB so that writes in physical page 'phys_addr' are no longer
2198 tested for self modifying code */
2199static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2200 target_ulong vaddr)
2201{
2202 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2203}
2204
2205static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2206 unsigned long start, unsigned long length)
2207{
2208 unsigned long addr;
2209#ifdef VBOX
2210
2211 if (start & 3)
2212 return;
2213#endif /* VBOX */
2214 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2215 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2216 if ((addr - start) < length) {
2217 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2218 }
2219 }
2220}
2221
2222/* Note: start and end must be within the same ram block. */
2223void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2224 int dirty_flags)
2225{
2226 CPUState *env;
2227 unsigned long length, start1;
2228 int i;
2229
2230 start &= TARGET_PAGE_MASK;
2231 end = TARGET_PAGE_ALIGN(end);
2232
2233 length = end - start;
2234 if (length == 0)
2235 return;
2236 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2237
2238 /* we modify the TLB cache so that the dirty bit will be set again
2239 when accessing the range */
2240#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2241 start1 = start;
2242#elif !defined(VBOX)
2243 start1 = (unsigned long)qemu_get_ram_ptr(start);
2244 /* Chek that we don't span multiple blocks - this breaks the
2245 address comparisons below. */
2246 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2247 != (end - 1) - start) {
2248 abort();
2249 }
2250#else
2251 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2252#endif
2253
2254 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2255 int mmu_idx;
2256 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2257 for(i = 0; i < CPU_TLB_SIZE; i++)
2258 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2259 start1, length);
2260 }
2261 }
2262}
2263
2264#ifndef VBOX
2265
2266int cpu_physical_memory_set_dirty_tracking(int enable)
2267{
2268 int ret = 0;
2269 in_migration = enable;
2270 ret = cpu_notify_migration_log(!!enable);
2271 return ret;
2272}
2273
2274int cpu_physical_memory_get_dirty_tracking(void)
2275{
2276 return in_migration;
2277}
2278
2279#endif /* !VBOX */
2280
2281int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2282 target_phys_addr_t end_addr)
2283{
2284#ifndef VBOX
2285 int ret;
2286
2287 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2288 return ret;
2289#else /* VBOX */
2290 return 0;
2291#endif /* VBOX */
2292}
2293
2294#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2295DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2296#else
2297static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2298#endif
2299{
2300 ram_addr_t ram_addr;
2301 void *p;
2302
2303 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2304#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2305 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2306#elif !defined(VBOX)
2307 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2308 + tlb_entry->addend);
2309 ram_addr = qemu_ram_addr_from_host(p);
2310#else
2311 Assert(phys_addend != -1);
2312 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2313#endif
2314 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2315 tlb_entry->addr_write |= TLB_NOTDIRTY;
2316 }
2317 }
2318}
2319
2320/* update the TLB according to the current state of the dirty bits */
2321void cpu_tlb_update_dirty(CPUState *env)
2322{
2323 int i;
2324 int mmu_idx;
2325 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2326 for(i = 0; i < CPU_TLB_SIZE; i++)
2327#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2328 tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]);
2329#else
2330 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2331#endif
2332 }
2333}
2334
2335static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2336{
2337 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2338 tlb_entry->addr_write = vaddr;
2339}
2340
2341/* update the TLB corresponding to virtual page vaddr
2342 so that it is no longer dirty */
2343static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2344{
2345 int i;
2346 int mmu_idx;
2347
2348 vaddr &= TARGET_PAGE_MASK;
2349 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2350 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2351 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2352}
2353
2354/* Our TLB does not support large pages, so remember the area covered by
2355 large pages and trigger a full TLB flush if these are invalidated. */
2356static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2357 target_ulong size)
2358{
2359 target_ulong mask = ~(size - 1);
2360
2361 if (env->tlb_flush_addr == (target_ulong)-1) {
2362 env->tlb_flush_addr = vaddr & mask;
2363 env->tlb_flush_mask = mask;
2364 return;
2365 }
2366 /* Extend the existing region to include the new page.
2367 This is a compromise between unnecessary flushes and the cost
2368 of maintaining a full variable size TLB. */
2369 mask &= env->tlb_flush_mask;
2370 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2371 mask <<= 1;
2372 }
2373 env->tlb_flush_addr &= mask;
2374 env->tlb_flush_mask = mask;
2375}
2376
2377/* Add a new TLB entry. At most one entry for a given virtual address
2378 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2379 supplied size is only used by tlb_flush_page. */
2380void tlb_set_page(CPUState *env, target_ulong vaddr,
2381 target_phys_addr_t paddr, int prot,
2382 int mmu_idx, target_ulong size)
2383{
2384 PhysPageDesc *p;
2385 unsigned long pd;
2386 unsigned int index;
2387 target_ulong address;
2388 target_ulong code_address;
2389 unsigned long addend;
2390 CPUTLBEntry *te;
2391 CPUWatchpoint *wp;
2392 target_phys_addr_t iotlb;
2393#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2394 int read_mods = 0, write_mods = 0, code_mods = 0;
2395#endif
2396
2397 assert(size >= TARGET_PAGE_SIZE);
2398 if (size != TARGET_PAGE_SIZE) {
2399 tlb_add_large_page(env, vaddr, size);
2400 }
2401 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2402 if (!p) {
2403 pd = IO_MEM_UNASSIGNED;
2404 } else {
2405 pd = p->phys_offset;
2406 }
2407#if defined(DEBUG_TLB)
2408 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d size=" TARGET_FMT_lx " pd=0x%08lx\n",
2409 vaddr, (int)paddr, prot, mmu_idx, size, pd);
2410#endif
2411
2412 address = vaddr;
2413 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2414 /* IO memory case (romd handled later) */
2415 address |= TLB_MMIO;
2416 }
2417#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2418 addend = pd & TARGET_PAGE_MASK;
2419#elif !defined(VBOX)
2420 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2421#else
2422 /** @todo this is racing the phys_page_find call above since it may register
2423 * a new chunk of memory... */
2424 addend = (unsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));
2425#endif
2426
2427 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2428 /* Normal RAM. */
2429 iotlb = pd & TARGET_PAGE_MASK;
2430 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2431 iotlb |= IO_MEM_NOTDIRTY;
2432 else
2433 iotlb |= IO_MEM_ROM;
2434 } else {
2435 /* IO handlers are currently passed a physical address.
2436 It would be nice to pass an offset from the base address
2437 of that region. This would avoid having to special case RAM,
2438 and avoid full address decoding in every device.
2439 We can't use the high bits of pd for this because
2440 IO_MEM_ROMD uses these as a ram address. */
2441 iotlb = (pd & ~TARGET_PAGE_MASK);
2442 if (p) {
2443 iotlb += p->region_offset;
2444 } else {
2445 iotlb += paddr;
2446 }
2447 }
2448
2449 code_address = address;
2450#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2451
2452 if (addend & 0x3)
2453 {
2454 if (addend & 0x2)
2455 {
2456 /* catch write */
2457 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2458 write_mods |= TLB_MMIO;
2459 }
2460 else if (addend & 0x1)
2461 {
2462 /* catch all */
2463 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2464 {
2465 read_mods |= TLB_MMIO;
2466 write_mods |= TLB_MMIO;
2467 code_mods |= TLB_MMIO;
2468 }
2469 }
2470 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2471 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2472 addend &= ~(target_ulong)0x3;
2473 }
2474
2475#endif
2476 /* Make accesses to pages with watchpoints go via the
2477 watchpoint trap routines. */
2478 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2479 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2480 /* Avoid trapping reads of pages with a write breakpoint. */
2481 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2482 iotlb = io_mem_watch + paddr;
2483 address |= TLB_MMIO;
2484 break;
2485 }
2486 }
2487 }
2488
2489 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2490 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2491 te = &env->tlb_table[mmu_idx][index];
2492 te->addend = addend - vaddr;
2493 if (prot & PAGE_READ) {
2494 te->addr_read = address;
2495 } else {
2496 te->addr_read = -1;
2497 }
2498
2499 if (prot & PAGE_EXEC) {
2500 te->addr_code = code_address;
2501 } else {
2502 te->addr_code = -1;
2503 }
2504 if (prot & PAGE_WRITE) {
2505 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2506 (pd & IO_MEM_ROMD)) {
2507 /* Write access calls the I/O callback. */
2508 te->addr_write = address | TLB_MMIO;
2509 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2510 !cpu_physical_memory_is_dirty(pd)) {
2511 te->addr_write = address | TLB_NOTDIRTY;
2512 } else {
2513 te->addr_write = address;
2514 }
2515 } else {
2516 te->addr_write = -1;
2517 }
2518
2519#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2520 if (prot & PAGE_READ)
2521 te->addr_read |= read_mods;
2522 if (prot & PAGE_EXEC)
2523 te->addr_code |= code_mods;
2524 if (prot & PAGE_WRITE)
2525 te->addr_write |= write_mods;
2526
2527 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2528#endif
2529
2530#ifdef VBOX
2531 /* inform raw mode about TLB page change */
2532 remR3FlushPage(env, vaddr);
2533#endif
2534}
2535
2536#else
2537
2538void tlb_flush(CPUState *env, int flush_global)
2539{
2540}
2541
2542void tlb_flush_page(CPUState *env, target_ulong addr)
2543{
2544}
2545
2546/*
2547 * Walks guest process memory "regions" one by one
2548 * and calls callback function 'fn' for each region.
2549 */
2550
2551struct walk_memory_regions_data
2552{
2553 walk_memory_regions_fn fn;
2554 void *priv;
2555 unsigned long start;
2556 int prot;
2557};
2558
2559static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2560 abi_ulong end, int new_prot)
2561{
2562 if (data->start != -1ul) {
2563 int rc = data->fn(data->priv, data->start, end, data->prot);
2564 if (rc != 0) {
2565 return rc;
2566 }
2567 }
2568
2569 data->start = (new_prot ? end : -1ul);
2570 data->prot = new_prot;
2571
2572 return 0;
2573}
2574
2575static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2576 abi_ulong base, int level, void **lp)
2577{
2578 abi_ulong pa;
2579 int i, rc;
2580
2581 if (*lp == NULL) {
2582 return walk_memory_regions_end(data, base, 0);
2583 }
2584
2585 if (level == 0) {
2586 PageDesc *pd = *lp;
2587 for (i = 0; i < L2_SIZE; ++i) {
2588 int prot = pd[i].flags;
2589
2590 pa = base | (i << TARGET_PAGE_BITS);
2591 if (prot != data->prot) {
2592 rc = walk_memory_regions_end(data, pa, prot);
2593 if (rc != 0) {
2594 return rc;
2595 }
2596 }
2597 }
2598 } else {
2599 void **pp = *lp;
2600 for (i = 0; i < L2_SIZE; ++i) {
2601 pa = base | ((abi_ulong)i <<
2602 (TARGET_PAGE_BITS + L2_BITS * level));
2603 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2604 if (rc != 0) {
2605 return rc;
2606 }
2607 }
2608 }
2609
2610 return 0;
2611}
2612
2613int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2614{
2615 struct walk_memory_regions_data data;
2616 unsigned long i;
2617
2618 data.fn = fn;
2619 data.priv = priv;
2620 data.start = -1ul;
2621 data.prot = 0;
2622
2623 for (i = 0; i < V_L1_SIZE; i++) {
2624 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2625 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2626 if (rc != 0) {
2627 return rc;
2628 }
2629 }
2630
2631 return walk_memory_regions_end(&data, 0, 0);
2632}
2633
2634static int dump_region(void *priv, abi_ulong start,
2635 abi_ulong end, unsigned long prot)
2636{
2637 FILE *f = (FILE *)priv;
2638
2639 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2640 " "TARGET_ABI_FMT_lx" %c%c%c\n",
2641 start, end, end - start,
2642 ((prot & PAGE_READ) ? 'r' : '-'),
2643 ((prot & PAGE_WRITE) ? 'w' : '-'),
2644 ((prot & PAGE_EXEC) ? 'x' : '-'));
2645
2646 return (0);
2647}
2648
2649/* dump memory mappings */
2650void page_dump(FILE *f)
2651{
2652 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2653 "start", "end", "size", "prot");
2654 walk_memory_regions(f, dump_region);
2655}
2656
2657int page_get_flags(target_ulong address)
2658{
2659 PageDesc *p;
2660
2661 p = page_find(address >> TARGET_PAGE_BITS);
2662 if (!p)
2663 return 0;
2664 return p->flags;
2665}
2666
2667/* Modify the flags of a page and invalidate the code if necessary.
2668 The flag PAGE_WRITE_ORG is positioned automatically depending
2669 on PAGE_WRITE. The mmap_lock should already be held. */
2670void page_set_flags(target_ulong start, target_ulong end, int flags)
2671{
2672 target_ulong addr, len;
2673
2674 /* This function should never be called with addresses outside the
2675 guest address space. If this assert fires, it probably indicates
2676 a missing call to h2g_valid. */
2677#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2678 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2679#endif
2680 assert(start < end);
2681
2682 start = start & TARGET_PAGE_MASK;
2683 end = TARGET_PAGE_ALIGN(end);
2684
2685 if (flags & PAGE_WRITE) {
2686 flags |= PAGE_WRITE_ORG;
2687 }
2688
2689#ifdef VBOX
2690 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2691#endif
2692 for (addr = start, len = end - start;
2693 len != 0;
2694 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2695 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2696
2697 /* If the write protection bit is set, then we invalidate
2698 the code inside. */
2699 if (!(p->flags & PAGE_WRITE) &&
2700 (flags & PAGE_WRITE) &&
2701 p->first_tb) {
2702 tb_invalidate_phys_page(addr, 0, NULL);
2703 }
2704 p->flags = flags;
2705 }
2706}
2707
2708int page_check_range(target_ulong start, target_ulong len, int flags)
2709{
2710 PageDesc *p;
2711 target_ulong end;
2712 target_ulong addr;
2713
2714 /* This function should never be called with addresses outside the
2715 guest address space. If this assert fires, it probably indicates
2716 a missing call to h2g_valid. */
2717#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2718 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2719#endif
2720
2721 if (len == 0) {
2722 return 0;
2723 }
2724 if (start + len - 1 < start) {
2725 /* We've wrapped around. */
2726 return -1;
2727 }
2728
2729 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2730 start = start & TARGET_PAGE_MASK;
2731
2732 for (addr = start, len = end - start;
2733 len != 0;
2734 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2735 p = page_find(addr >> TARGET_PAGE_BITS);
2736 if( !p )
2737 return -1;
2738 if( !(p->flags & PAGE_VALID) )
2739 return -1;
2740
2741 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2742 return -1;
2743 if (flags & PAGE_WRITE) {
2744 if (!(p->flags & PAGE_WRITE_ORG))
2745 return -1;
2746 /* unprotect the page if it was put read-only because it
2747 contains translated code */
2748 if (!(p->flags & PAGE_WRITE)) {
2749 if (!page_unprotect(addr, 0, NULL))
2750 return -1;
2751 }
2752 return 0;
2753 }
2754 }
2755 return 0;
2756}
2757
2758/* called from signal handler: invalidate the code and unprotect the
2759 page. Return TRUE if the fault was successfully handled. */
2760int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2761{
2762 unsigned int prot;
2763 PageDesc *p;
2764 target_ulong host_start, host_end, addr;
2765
2766 /* Technically this isn't safe inside a signal handler. However we
2767 know this only ever happens in a synchronous SEGV handler, so in
2768 practice it seems to be ok. */
2769 mmap_lock();
2770
2771 p = page_find(address >> TARGET_PAGE_BITS);
2772 if (!p) {
2773 mmap_unlock();
2774 return 0;
2775 }
2776
2777 /* if the page was really writable, then we change its
2778 protection back to writable */
2779 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2780 host_start = address & qemu_host_page_mask;
2781 host_end = host_start + qemu_host_page_size;
2782
2783 prot = 0;
2784 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2785 p = page_find(addr >> TARGET_PAGE_BITS);
2786 p->flags |= PAGE_WRITE;
2787 prot |= p->flags;
2788
2789 /* and since the content will be modified, we must invalidate
2790 the corresponding translated code. */
2791 tb_invalidate_phys_page(addr, pc, puc);
2792#ifdef DEBUG_TB_CHECK
2793 tb_invalidate_check(addr);
2794#endif
2795 }
2796 mprotect((void *)g2h(host_start), qemu_host_page_size,
2797 prot & PAGE_BITS);
2798
2799 mmap_unlock();
2800 return 1;
2801 }
2802 mmap_unlock();
2803 return 0;
2804}
2805
2806static inline void tlb_set_dirty(CPUState *env,
2807 unsigned long addr, target_ulong vaddr)
2808{
2809}
2810#endif /* defined(CONFIG_USER_ONLY) */
2811
2812#if !defined(CONFIG_USER_ONLY)
2813
2814#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2815typedef struct subpage_t {
2816 target_phys_addr_t base;
2817 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2818 ram_addr_t region_offset[TARGET_PAGE_SIZE];
2819} subpage_t;
2820
2821static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2822 ram_addr_t memory, ram_addr_t region_offset);
2823static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2824 ram_addr_t orig_memory,
2825 ram_addr_t region_offset);
2826#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2827 need_subpage) \
2828 do { \
2829 if (addr > start_addr) \
2830 start_addr2 = 0; \
2831 else { \
2832 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2833 if (start_addr2 > 0) \
2834 need_subpage = 1; \
2835 } \
2836 \
2837 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2838 end_addr2 = TARGET_PAGE_SIZE - 1; \
2839 else { \
2840 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2841 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2842 need_subpage = 1; \
2843 } \
2844 } while (0)
2845
2846/* register physical memory.
2847 For RAM, 'size' must be a multiple of the target page size.
2848 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2849 io memory page. The address used when calling the IO function is
2850 the offset from the start of the region, plus region_offset. Both
2851 start_addr and region_offset are rounded down to a page boundary
2852 before calculating this offset. This should not be a problem unless
2853 the low bits of start_addr and region_offset differ. */
2854void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2855 ram_addr_t size,
2856 ram_addr_t phys_offset,
2857 ram_addr_t region_offset)
2858{
2859 target_phys_addr_t addr, end_addr;
2860 PhysPageDesc *p;
2861 CPUState *env;
2862 ram_addr_t orig_size = size;
2863 subpage_t *subpage;
2864
2865#ifndef VBOX
2866 cpu_notify_set_memory(start_addr, size, phys_offset);
2867#endif /* !VBOX */
2868
2869 if (phys_offset == IO_MEM_UNASSIGNED) {
2870 region_offset = start_addr;
2871 }
2872 region_offset &= TARGET_PAGE_MASK;
2873 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2874 end_addr = start_addr + (target_phys_addr_t)size;
2875 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2876 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2877 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2878 ram_addr_t orig_memory = p->phys_offset;
2879 target_phys_addr_t start_addr2, end_addr2;
2880 int need_subpage = 0;
2881
2882 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2883 need_subpage);
2884 if (need_subpage) {
2885 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2886 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2887 &p->phys_offset, orig_memory,
2888 p->region_offset);
2889 } else {
2890 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2891 >> IO_MEM_SHIFT];
2892 }
2893 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2894 region_offset);
2895 p->region_offset = 0;
2896 } else {
2897 p->phys_offset = phys_offset;
2898 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2899 (phys_offset & IO_MEM_ROMD))
2900 phys_offset += TARGET_PAGE_SIZE;
2901 }
2902 } else {
2903 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2904 p->phys_offset = phys_offset;
2905 p->region_offset = region_offset;
2906 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2907 (phys_offset & IO_MEM_ROMD)) {
2908 phys_offset += TARGET_PAGE_SIZE;
2909 } else {
2910 target_phys_addr_t start_addr2, end_addr2;
2911 int need_subpage = 0;
2912
2913 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2914 end_addr2, need_subpage);
2915
2916 if (need_subpage) {
2917 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2918 &p->phys_offset, IO_MEM_UNASSIGNED,
2919 addr & TARGET_PAGE_MASK);
2920 subpage_register(subpage, start_addr2, end_addr2,
2921 phys_offset, region_offset);
2922 p->region_offset = 0;
2923 }
2924 }
2925 }
2926 region_offset += TARGET_PAGE_SIZE;
2927 }
2928
2929 /* since each CPU stores ram addresses in its TLB cache, we must
2930 reset the modified entries */
2931#ifndef VBOX
2932 /* XXX: slow ! */
2933 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2934 tlb_flush(env, 1);
2935 }
2936#else
2937 /* We have one thread per CPU, so, one of the other EMTs might be executing
2938 code right now and flushing the TLB may crash it. */
2939 env = first_cpu;
2940 if (EMRemIsLockOwner(env->pVM))
2941 tlb_flush(env, 1);
2942 else
2943 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request,
2944 CPU_INTERRUPT_EXTERNAL_FLUSH_TLB);
2945#endif
2946}
2947
2948/* XXX: temporary until new memory mapping API */
2949ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2950{
2951 PhysPageDesc *p;
2952
2953 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2954 if (!p)
2955 return IO_MEM_UNASSIGNED;
2956 return p->phys_offset;
2957}
2958
2959#ifndef VBOX
2960
2961void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2962{
2963 if (kvm_enabled())
2964 kvm_coalesce_mmio_region(addr, size);
2965}
2966
2967void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2968{
2969 if (kvm_enabled())
2970 kvm_uncoalesce_mmio_region(addr, size);
2971}
2972
2973void qemu_flush_coalesced_mmio_buffer(void)
2974{
2975 if (kvm_enabled())
2976 kvm_flush_coalesced_mmio_buffer();
2977}
2978
2979#if defined(__linux__) && !defined(TARGET_S390X)
2980
2981#include <sys/vfs.h>
2982
2983#define HUGETLBFS_MAGIC 0x958458f6
2984
2985static long gethugepagesize(const char *path)
2986{
2987 struct statfs fs;
2988 int ret;
2989
2990 do {
2991 ret = statfs(path, &fs);
2992 } while (ret != 0 && errno == EINTR);
2993
2994 if (ret != 0) {
2995 perror(path);
2996 return 0;
2997 }
2998
2999 if (fs.f_type != HUGETLBFS_MAGIC)
3000 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
3001
3002 return fs.f_bsize;
3003}
3004
3005static void *file_ram_alloc(RAMBlock *block,
3006 ram_addr_t memory,
3007 const char *path)
3008{
3009 char *filename;
3010 void *area;
3011 int fd;
3012#ifdef MAP_POPULATE
3013 int flags;
3014#endif
3015 unsigned long hpagesize;
3016
3017 hpagesize = gethugepagesize(path);
3018 if (!hpagesize) {
3019 return NULL;
3020 }
3021
3022 if (memory < hpagesize) {
3023 return NULL;
3024 }
3025
3026 if (kvm_enabled() && !kvm_has_sync_mmu()) {
3027 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
3028 return NULL;
3029 }
3030
3031 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
3032 return NULL;
3033 }
3034
3035 fd = mkstemp(filename);
3036 if (fd < 0) {
3037 perror("unable to create backing store for hugepages");
3038 free(filename);
3039 return NULL;
3040 }
3041 unlink(filename);
3042 free(filename);
3043
3044 memory = (memory+hpagesize-1) & ~(hpagesize-1);
3045
3046 /*
3047 * ftruncate is not supported by hugetlbfs in older
3048 * hosts, so don't bother bailing out on errors.
3049 * If anything goes wrong with it under other filesystems,
3050 * mmap will fail.
3051 */
3052 if (ftruncate(fd, memory))
3053 perror("ftruncate");
3054
3055#ifdef MAP_POPULATE
3056 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
3057 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
3058 * to sidestep this quirk.
3059 */
3060 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
3061 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
3062#else
3063 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
3064#endif
3065 if (area == MAP_FAILED) {
3066 perror("file_ram_alloc: can't mmap RAM pages");
3067 close(fd);
3068 return (NULL);
3069 }
3070 block->fd = fd;
3071 return area;
3072}
3073#endif
3074
3075static ram_addr_t find_ram_offset(ram_addr_t size)
3076{
3077 RAMBlock *block, *next_block;
3078 ram_addr_t offset = 0, mingap = ULONG_MAX;
3079
3080 if (QLIST_EMPTY(&ram_list.blocks))
3081 return 0;
3082
3083 QLIST_FOREACH(block, &ram_list.blocks, next) {
3084 ram_addr_t end, next = ULONG_MAX;
3085
3086 end = block->offset + block->length;
3087
3088 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
3089 if (next_block->offset >= end) {
3090 next = MIN(next, next_block->offset);
3091 }
3092 }
3093 if (next - end >= size && next - end < mingap) {
3094 offset = end;
3095 mingap = next - end;
3096 }
3097 }
3098 return offset;
3099}
3100
3101static ram_addr_t last_ram_offset(void)
3102{
3103 RAMBlock *block;
3104 ram_addr_t last = 0;
3105
3106 QLIST_FOREACH(block, &ram_list.blocks, next)
3107 last = MAX(last, block->offset + block->length);
3108
3109 return last;
3110}
3111
3112ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
3113 ram_addr_t size, void *host)
3114{
3115 RAMBlock *new_block, *block;
3116
3117 size = TARGET_PAGE_ALIGN(size);
3118 new_block = qemu_mallocz(sizeof(*new_block));
3119
3120 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
3121 char *id = dev->parent_bus->info->get_dev_path(dev);
3122 if (id) {
3123 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
3124 qemu_free(id);
3125 }
3126 }
3127 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
3128
3129 QLIST_FOREACH(block, &ram_list.blocks, next) {
3130 if (!strcmp(block->idstr, new_block->idstr)) {
3131 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
3132 new_block->idstr);
3133 abort();
3134 }
3135 }
3136
3137 new_block->host = host;
3138
3139 new_block->offset = find_ram_offset(size);
3140 new_block->length = size;
3141
3142 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
3143
3144 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
3145 last_ram_offset() >> TARGET_PAGE_BITS);
3146 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
3147 0xff, size >> TARGET_PAGE_BITS);
3148
3149 if (kvm_enabled())
3150 kvm_setup_guest_memory(new_block->host, size);
3151
3152 return new_block->offset;
3153}
3154
3155ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
3156{
3157 RAMBlock *new_block, *block;
3158
3159 size = TARGET_PAGE_ALIGN(size);
3160 new_block = qemu_mallocz(sizeof(*new_block));
3161
3162 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
3163 char *id = dev->parent_bus->info->get_dev_path(dev);
3164 if (id) {
3165 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
3166 qemu_free(id);
3167 }
3168 }
3169 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
3170
3171 QLIST_FOREACH(block, &ram_list.blocks, next) {
3172 if (!strcmp(block->idstr, new_block->idstr)) {
3173 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
3174 new_block->idstr);
3175 abort();
3176 }
3177 }
3178
3179 if (mem_path) {
3180#if defined (__linux__) && !defined(TARGET_S390X)
3181 new_block->host = file_ram_alloc(new_block, size, mem_path);
3182 if (!new_block->host) {
3183 new_block->host = qemu_vmalloc(size);
3184#ifdef MADV_MERGEABLE
3185 madvise(new_block->host, size, MADV_MERGEABLE);
3186#endif
3187 }
3188#else
3189 fprintf(stderr, "-mem-path option unsupported\n");
3190 exit(1);
3191#endif
3192 } else {
3193#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3194 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
3195 new_block->host = mmap((void*)0x1000000, size,
3196 PROT_EXEC|PROT_READ|PROT_WRITE,
3197 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
3198#else
3199 new_block->host = qemu_vmalloc(size);
3200#endif
3201#ifdef MADV_MERGEABLE
3202 madvise(new_block->host, size, MADV_MERGEABLE);
3203#endif
3204 }
3205 new_block->offset = find_ram_offset(size);
3206 new_block->length = size;
3207
3208 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
3209
3210 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
3211 last_ram_offset() >> TARGET_PAGE_BITS);
3212 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
3213 0xff, size >> TARGET_PAGE_BITS);
3214
3215 if (kvm_enabled())
3216 kvm_setup_guest_memory(new_block->host, size);
3217
3218 return new_block->offset;
3219}
3220
3221void qemu_ram_free(ram_addr_t addr)
3222{
3223 RAMBlock *block;
3224
3225 QLIST_FOREACH(block, &ram_list.blocks, next) {
3226 if (addr == block->offset) {
3227 QLIST_REMOVE(block, next);
3228 if (mem_path) {
3229#if defined (__linux__) && !defined(TARGET_S390X)
3230 if (block->fd) {
3231 munmap(block->host, block->length);
3232 close(block->fd);
3233 } else {
3234 qemu_vfree(block->host);
3235 }
3236#endif
3237 } else {
3238#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3239 munmap(block->host, block->length);
3240#else
3241 qemu_vfree(block->host);
3242#endif
3243 }
3244 qemu_free(block);
3245 return;
3246 }
3247 }
3248
3249}
3250
3251/* Return a host pointer to ram allocated with qemu_ram_alloc.
3252 With the exception of the softmmu code in this file, this should
3253 only be used for local memory (e.g. video ram) that the device owns,
3254 and knows it isn't going to access beyond the end of the block.
3255
3256 It should not be used for general purpose DMA.
3257 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3258 */
3259void *qemu_get_ram_ptr(ram_addr_t addr)
3260{
3261 RAMBlock *block;
3262
3263 QLIST_FOREACH(block, &ram_list.blocks, next) {
3264 if (addr - block->offset < block->length) {
3265 QLIST_REMOVE(block, next);
3266 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3267 return block->host + (addr - block->offset);
3268 }
3269 }
3270
3271 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3272 abort();
3273
3274 return NULL;
3275}
3276
3277/* Some of the softmmu routines need to translate from a host pointer
3278 (typically a TLB entry) back to a ram offset. */
3279ram_addr_t qemu_ram_addr_from_host(void *ptr)
3280{
3281 RAMBlock *block;
3282 uint8_t *host = ptr;
3283
3284 QLIST_FOREACH(block, &ram_list.blocks, next) {
3285 if (host - block->host < block->length) {
3286 return block->offset + (host - block->host);
3287 }
3288 }
3289
3290 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3291 abort();
3292
3293 return 0;
3294}
3295
3296#endif /* !VBOX */
3297
3298static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3299{
3300#ifdef DEBUG_UNASSIGNED
3301 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3302#endif
3303#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3304 do_unassigned_access(addr, 0, 0, 0, 1);
3305#endif
3306 return 0;
3307}
3308
3309static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3310{
3311#ifdef DEBUG_UNASSIGNED
3312 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3313#endif
3314#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3315 do_unassigned_access(addr, 0, 0, 0, 2);
3316#endif
3317 return 0;
3318}
3319
3320static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3321{
3322#ifdef DEBUG_UNASSIGNED
3323 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3324#endif
3325#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3326 do_unassigned_access(addr, 0, 0, 0, 4);
3327#endif
3328 return 0;
3329}
3330
3331static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3332{
3333#ifdef DEBUG_UNASSIGNED
3334 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3335#endif
3336#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3337 do_unassigned_access(addr, 1, 0, 0, 1);
3338#endif
3339}
3340
3341static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3342{
3343#ifdef DEBUG_UNASSIGNED
3344 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3345#endif
3346#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3347 do_unassigned_access(addr, 1, 0, 0, 2);
3348#endif
3349}
3350
3351static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3352{
3353#ifdef DEBUG_UNASSIGNED
3354 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3355#endif
3356#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3357 do_unassigned_access(addr, 1, 0, 0, 4);
3358#endif
3359}
3360
3361static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3362 unassigned_mem_readb,
3363 unassigned_mem_readw,
3364 unassigned_mem_readl,
3365};
3366
3367static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3368 unassigned_mem_writeb,
3369 unassigned_mem_writew,
3370 unassigned_mem_writel,
3371};
3372
3373static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3374 uint32_t val)
3375{
3376 int dirty_flags;
3377 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3378 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3379#if !defined(CONFIG_USER_ONLY)
3380 tb_invalidate_phys_page_fast(ram_addr, 1);
3381 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3382#endif
3383 }
3384#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3385 remR3PhysWriteU8(ram_addr, val);
3386#else
3387 stb_p(qemu_get_ram_ptr(ram_addr), val);
3388#endif
3389 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3390 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3391 /* we remove the notdirty callback only if the code has been
3392 flushed */
3393 if (dirty_flags == 0xff)
3394 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3395}
3396
3397static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3398 uint32_t val)
3399{
3400 int dirty_flags;
3401 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3402 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3403#if !defined(CONFIG_USER_ONLY)
3404 tb_invalidate_phys_page_fast(ram_addr, 2);
3405 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3406#endif
3407 }
3408#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3409 remR3PhysWriteU16(ram_addr, val);
3410#else
3411 stw_p(qemu_get_ram_ptr(ram_addr), val);
3412#endif
3413 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3414 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3415 /* we remove the notdirty callback only if the code has been
3416 flushed */
3417 if (dirty_flags == 0xff)
3418 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3419}
3420
3421static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3422 uint32_t val)
3423{
3424 int dirty_flags;
3425 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3426 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3427#if !defined(CONFIG_USER_ONLY)
3428 tb_invalidate_phys_page_fast(ram_addr, 4);
3429 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3430#endif
3431 }
3432#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3433 remR3PhysWriteU32(ram_addr, val);
3434#else
3435 stl_p(qemu_get_ram_ptr(ram_addr), val);
3436#endif
3437 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3438 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3439 /* we remove the notdirty callback only if the code has been
3440 flushed */
3441 if (dirty_flags == 0xff)
3442 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3443}
3444
3445static CPUReadMemoryFunc * const error_mem_read[3] = {
3446 NULL, /* never used */
3447 NULL, /* never used */
3448 NULL, /* never used */
3449};
3450
3451static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3452 notdirty_mem_writeb,
3453 notdirty_mem_writew,
3454 notdirty_mem_writel,
3455};
3456
3457/* Generate a debug exception if a watchpoint has been hit. */
3458static void check_watchpoint(int offset, int len_mask, int flags)
3459{
3460 CPUState *env = cpu_single_env;
3461 target_ulong pc, cs_base;
3462 TranslationBlock *tb;
3463 target_ulong vaddr;
3464 CPUWatchpoint *wp;
3465 int cpu_flags;
3466
3467 if (env->watchpoint_hit) {
3468 /* We re-entered the check after replacing the TB. Now raise
3469 * the debug interrupt so that is will trigger after the
3470 * current instruction. */
3471 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3472 return;
3473 }
3474 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3475 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3476 if ((vaddr == (wp->vaddr & len_mask) ||
3477 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3478 wp->flags |= BP_WATCHPOINT_HIT;
3479 if (!env->watchpoint_hit) {
3480 env->watchpoint_hit = wp;
3481 tb = tb_find_pc(env->mem_io_pc);
3482 if (!tb) {
3483 cpu_abort(env, "check_watchpoint: could not find TB for "
3484 "pc=%p", (void *)env->mem_io_pc);
3485 }
3486 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3487 tb_phys_invalidate(tb, -1);
3488 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3489 env->exception_index = EXCP_DEBUG;
3490 } else {
3491 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3492 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3493 }
3494 cpu_resume_from_signal(env, NULL);
3495 }
3496 } else {
3497 wp->flags &= ~BP_WATCHPOINT_HIT;
3498 }
3499 }
3500}
3501
3502/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3503 so these check for a hit then pass through to the normal out-of-line
3504 phys routines. */
3505static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3506{
3507 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3508 return ldub_phys(addr);
3509}
3510
3511static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3512{
3513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3514 return lduw_phys(addr);
3515}
3516
3517static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3518{
3519 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3520 return ldl_phys(addr);
3521}
3522
3523static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3524 uint32_t val)
3525{
3526 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3527 stb_phys(addr, val);
3528}
3529
3530static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3531 uint32_t val)
3532{
3533 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3534 stw_phys(addr, val);
3535}
3536
3537static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3538 uint32_t val)
3539{
3540 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3541 stl_phys(addr, val);
3542}
3543
3544static CPUReadMemoryFunc * const watch_mem_read[3] = {
3545 watch_mem_readb,
3546 watch_mem_readw,
3547 watch_mem_readl,
3548};
3549
3550static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3551 watch_mem_writeb,
3552 watch_mem_writew,
3553 watch_mem_writel,
3554};
3555
3556static inline uint32_t subpage_readlen (subpage_t *mmio,
3557 target_phys_addr_t addr,
3558 unsigned int len)
3559{
3560 unsigned int idx = SUBPAGE_IDX(addr);
3561#if defined(DEBUG_SUBPAGE)
3562 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3563 mmio, len, addr, idx);
3564#endif
3565
3566 addr += mmio->region_offset[idx];
3567 idx = mmio->sub_io_index[idx];
3568 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3569}
3570
3571static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3572 uint32_t value, unsigned int len)
3573{
3574 unsigned int idx = SUBPAGE_IDX(addr);
3575#if defined(DEBUG_SUBPAGE)
3576 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3577 __func__, mmio, len, addr, idx, value);
3578#endif
3579
3580 addr += mmio->region_offset[idx];
3581 idx = mmio->sub_io_index[idx];
3582 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3583}
3584
3585static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3586{
3587 return subpage_readlen(opaque, addr, 0);
3588}
3589
3590static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3591 uint32_t value)
3592{
3593 subpage_writelen(opaque, addr, value, 0);
3594}
3595
3596static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3597{
3598 return subpage_readlen(opaque, addr, 1);
3599}
3600
3601static void subpage_writew (void *opaque, target_phys_addr_t addr,
3602 uint32_t value)
3603{
3604 subpage_writelen(opaque, addr, value, 1);
3605}
3606
3607static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3608{
3609 return subpage_readlen(opaque, addr, 2);
3610}
3611
3612static void subpage_writel (void *opaque, target_phys_addr_t addr,
3613 uint32_t value)
3614{
3615 subpage_writelen(opaque, addr, value, 2);
3616}
3617
3618static CPUReadMemoryFunc * const subpage_read[] = {
3619 &subpage_readb,
3620 &subpage_readw,
3621 &subpage_readl,
3622};
3623
3624static CPUWriteMemoryFunc * const subpage_write[] = {
3625 &subpage_writeb,
3626 &subpage_writew,
3627 &subpage_writel,
3628};
3629
3630static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3631 ram_addr_t memory, ram_addr_t region_offset)
3632{
3633 int idx, eidx;
3634
3635 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3636 return -1;
3637 idx = SUBPAGE_IDX(start);
3638 eidx = SUBPAGE_IDX(end);
3639#if defined(DEBUG_SUBPAGE)
3640 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3641 mmio, start, end, idx, eidx, memory);
3642#endif
3643 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3644 for (; idx <= eidx; idx++) {
3645 mmio->sub_io_index[idx] = memory;
3646 mmio->region_offset[idx] = region_offset;
3647 }
3648
3649 return 0;
3650}
3651
3652static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3653 ram_addr_t orig_memory,
3654 ram_addr_t region_offset)
3655{
3656 subpage_t *mmio;
3657 int subpage_memory;
3658
3659 mmio = qemu_mallocz(sizeof(subpage_t));
3660
3661 mmio->base = base;
3662 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3663#if defined(DEBUG_SUBPAGE)
3664 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3665 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3666#endif
3667 *phys = subpage_memory | IO_MEM_SUBPAGE;
3668 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3669
3670 return mmio;
3671}
3672
3673static int get_free_io_mem_idx(void)
3674{
3675 int i;
3676
3677 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3678 if (!io_mem_used[i]) {
3679 io_mem_used[i] = 1;
3680 return i;
3681 }
3682 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3683 return -1;
3684}
3685
3686/* mem_read and mem_write are arrays of functions containing the
3687 function to access byte (index 0), word (index 1) and dword (index
3688 2). Functions can be omitted with a NULL function pointer.
3689 If io_index is non zero, the corresponding io zone is
3690 modified. If it is zero, a new io zone is allocated. The return
3691 value can be used with cpu_register_physical_memory(). (-1) is
3692 returned if error. */
3693static int cpu_register_io_memory_fixed(int io_index,
3694 CPUReadMemoryFunc * const *mem_read,
3695 CPUWriteMemoryFunc * const *mem_write,
3696 void *opaque)
3697{
3698 int i;
3699
3700 if (io_index <= 0) {
3701 io_index = get_free_io_mem_idx();
3702 if (io_index == -1)
3703 return io_index;
3704 } else {
3705 io_index >>= IO_MEM_SHIFT;
3706 if (io_index >= IO_MEM_NB_ENTRIES)
3707 return -1;
3708 }
3709
3710 for (i = 0; i < 3; ++i) {
3711 io_mem_read[io_index][i]
3712 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3713 }
3714 for (i = 0; i < 3; ++i) {
3715 io_mem_write[io_index][i]
3716 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3717 }
3718 io_mem_opaque[io_index] = opaque;
3719
3720 return (io_index << IO_MEM_SHIFT);
3721}
3722
3723int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3724 CPUWriteMemoryFunc * const *mem_write,
3725 void *opaque)
3726{
3727 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3728}
3729
3730void cpu_unregister_io_memory(int io_table_address)
3731{
3732 int i;
3733 int io_index = io_table_address >> IO_MEM_SHIFT;
3734
3735 for (i=0;i < 3; i++) {
3736 io_mem_read[io_index][i] = unassigned_mem_read[i];
3737 io_mem_write[io_index][i] = unassigned_mem_write[i];
3738 }
3739 io_mem_opaque[io_index] = NULL;
3740 io_mem_used[io_index] = 0;
3741}
3742
3743static void io_mem_init(void)
3744{
3745 int i;
3746
3747 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3748 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3749 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3750 for (i=0; i<5; i++)
3751 io_mem_used[i] = 1;
3752
3753 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3754 watch_mem_write, NULL);
3755}
3756
3757#endif /* !defined(CONFIG_USER_ONLY) */
3758
3759/* physical memory access (slow version, mainly for debug) */
3760#if defined(CONFIG_USER_ONLY)
3761int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3762 uint8_t *buf, int len, int is_write)
3763{
3764 int l, flags;
3765 target_ulong page;
3766 void * p;
3767
3768 while (len > 0) {
3769 page = addr & TARGET_PAGE_MASK;
3770 l = (page + TARGET_PAGE_SIZE) - addr;
3771 if (l > len)
3772 l = len;
3773 flags = page_get_flags(page);
3774 if (!(flags & PAGE_VALID))
3775 return -1;
3776 if (is_write) {
3777 if (!(flags & PAGE_WRITE))
3778 return -1;
3779 /* XXX: this code should not depend on lock_user */
3780 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3781 return -1;
3782 memcpy(p, buf, l);
3783 unlock_user(p, addr, l);
3784 } else {
3785 if (!(flags & PAGE_READ))
3786 return -1;
3787 /* XXX: this code should not depend on lock_user */
3788 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3789 return -1;
3790 memcpy(buf, p, l);
3791 unlock_user(p, addr, 0);
3792 }
3793 len -= l;
3794 buf += l;
3795 addr += l;
3796 }
3797 return 0;
3798}
3799
3800#else
3801void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3802 int len, int is_write)
3803{
3804 int l, io_index;
3805 uint8_t *ptr;
3806 uint32_t val;
3807 target_phys_addr_t page;
3808 unsigned long pd;
3809 PhysPageDesc *p;
3810
3811 while (len > 0) {
3812 page = addr & TARGET_PAGE_MASK;
3813 l = (page + TARGET_PAGE_SIZE) - addr;
3814 if (l > len)
3815 l = len;
3816 p = phys_page_find(page >> TARGET_PAGE_BITS);
3817 if (!p) {
3818 pd = IO_MEM_UNASSIGNED;
3819 } else {
3820 pd = p->phys_offset;
3821 }
3822
3823 if (is_write) {
3824 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3825 target_phys_addr_t addr1 = addr;
3826 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3827 if (p)
3828 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3829 /* XXX: could force cpu_single_env to NULL to avoid
3830 potential bugs */
3831 if (l >= 4 && ((addr1 & 3) == 0)) {
3832 /* 32 bit write access */
3833#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3834 val = ldl_p(buf);
3835#else
3836 val = *(const uint32_t *)buf;
3837#endif
3838 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3839 l = 4;
3840 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3841 /* 16 bit write access */
3842#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3843 val = lduw_p(buf);
3844#else
3845 val = *(const uint16_t *)buf;
3846#endif
3847 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3848 l = 2;
3849 } else {
3850 /* 8 bit write access */
3851#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3852 val = ldub_p(buf);
3853#else
3854 val = *(const uint8_t *)buf;
3855#endif
3856 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3857 l = 1;
3858 }
3859 } else {
3860 unsigned long addr1;
3861 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3862 /* RAM case */
3863#ifdef VBOX
3864 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3865#else
3866 ptr = qemu_get_ram_ptr(addr1);
3867 memcpy(ptr, buf, l);
3868#endif
3869 if (!cpu_physical_memory_is_dirty(addr1)) {
3870 /* invalidate code */
3871 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3872 /* set dirty bit */
3873 cpu_physical_memory_set_dirty_flags(
3874 addr1, (0xff & ~CODE_DIRTY_FLAG));
3875 }
3876 }
3877 } else {
3878 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3879 !(pd & IO_MEM_ROMD)) {
3880 target_phys_addr_t addr1 = addr;
3881 /* I/O case */
3882 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3883 if (p)
3884 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3885 if (l >= 4 && ((addr1 & 3) == 0)) {
3886 /* 32 bit read access */
3887 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3888#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3889 stl_p(buf, val);
3890#else
3891 *(uint32_t *)buf = val;
3892#endif
3893 l = 4;
3894 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3895 /* 16 bit read access */
3896 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3897#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3898 stw_p(buf, val);
3899#else
3900 *(uint16_t *)buf = val;
3901#endif
3902 l = 2;
3903 } else {
3904 /* 8 bit read access */
3905 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3906#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3907 stb_p(buf, val);
3908#else
3909 *(uint8_t *)buf = val;
3910#endif
3911 l = 1;
3912 }
3913 } else {
3914 /* RAM case */
3915#ifdef VBOX
3916 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3917#else
3918 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3919 (addr & ~TARGET_PAGE_MASK);
3920 memcpy(buf, ptr, l);
3921#endif
3922 }
3923 }
3924 len -= l;
3925 buf += l;
3926 addr += l;
3927 }
3928}
3929
3930#ifndef VBOX
3931
3932/* used for ROM loading : can write in RAM and ROM */
3933void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3934 const uint8_t *buf, int len)
3935{
3936 int l;
3937 uint8_t *ptr;
3938 target_phys_addr_t page;
3939 unsigned long pd;
3940 PhysPageDesc *p;
3941
3942 while (len > 0) {
3943 page = addr & TARGET_PAGE_MASK;
3944 l = (page + TARGET_PAGE_SIZE) - addr;
3945 if (l > len)
3946 l = len;
3947 p = phys_page_find(page >> TARGET_PAGE_BITS);
3948 if (!p) {
3949 pd = IO_MEM_UNASSIGNED;
3950 } else {
3951 pd = p->phys_offset;
3952 }
3953
3954 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3955 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3956 !(pd & IO_MEM_ROMD)) {
3957 /* do nothing */
3958 } else {
3959 unsigned long addr1;
3960 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3961 /* ROM/RAM case */
3962 ptr = qemu_get_ram_ptr(addr1);
3963 memcpy(ptr, buf, l);
3964 }
3965 len -= l;
3966 buf += l;
3967 addr += l;
3968 }
3969}
3970
3971typedef struct {
3972 void *buffer;
3973 target_phys_addr_t addr;
3974 target_phys_addr_t len;
3975} BounceBuffer;
3976
3977static BounceBuffer bounce;
3978
3979typedef struct MapClient {
3980 void *opaque;
3981 void (*callback)(void *opaque);
3982 QLIST_ENTRY(MapClient) link;
3983} MapClient;
3984
3985static QLIST_HEAD(map_client_list, MapClient) map_client_list
3986 = QLIST_HEAD_INITIALIZER(map_client_list);
3987
3988void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3989{
3990 MapClient *client = qemu_malloc(sizeof(*client));
3991
3992 client->opaque = opaque;
3993 client->callback = callback;
3994 QLIST_INSERT_HEAD(&map_client_list, client, link);
3995 return client;
3996}
3997
3998void cpu_unregister_map_client(void *_client)
3999{
4000 MapClient *client = (MapClient *)_client;
4001
4002 QLIST_REMOVE(client, link);
4003 qemu_free(client);
4004}
4005
4006static void cpu_notify_map_clients(void)
4007{
4008 MapClient *client;
4009
4010 while (!QLIST_EMPTY(&map_client_list)) {
4011 client = QLIST_FIRST(&map_client_list);
4012 client->callback(client->opaque);
4013 cpu_unregister_map_client(client);
4014 }
4015}
4016
4017/* Map a physical memory region into a host virtual address.
4018 * May map a subset of the requested range, given by and returned in *plen.
4019 * May return NULL if resources needed to perform the mapping are exhausted.
4020 * Use only for reads OR writes - not for read-modify-write operations.
4021 * Use cpu_register_map_client() to know when retrying the map operation is
4022 * likely to succeed.
4023 */
4024void *cpu_physical_memory_map(target_phys_addr_t addr,
4025 target_phys_addr_t *plen,
4026 int is_write)
4027{
4028 target_phys_addr_t len = *plen;
4029 target_phys_addr_t done = 0;
4030 int l;
4031 uint8_t *ret = NULL;
4032 uint8_t *ptr;
4033 target_phys_addr_t page;
4034 unsigned long pd;
4035 PhysPageDesc *p;
4036 unsigned long addr1;
4037
4038 while (len > 0) {
4039 page = addr & TARGET_PAGE_MASK;
4040 l = (page + TARGET_PAGE_SIZE) - addr;
4041 if (l > len)
4042 l = len;
4043 p = phys_page_find(page >> TARGET_PAGE_BITS);
4044 if (!p) {
4045 pd = IO_MEM_UNASSIGNED;
4046 } else {
4047 pd = p->phys_offset;
4048 }
4049
4050 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4051 if (done || bounce.buffer) {
4052 break;
4053 }
4054 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4055 bounce.addr = addr;
4056 bounce.len = l;
4057 if (!is_write) {
4058 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
4059 }
4060 ptr = bounce.buffer;
4061 } else {
4062 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4063 ptr = qemu_get_ram_ptr(addr1);
4064 }
4065 if (!done) {
4066 ret = ptr;
4067 } else if (ret + done != ptr) {
4068 break;
4069 }
4070
4071 len -= l;
4072 addr += l;
4073 done += l;
4074 }
4075 *plen = done;
4076 return ret;
4077}
4078
4079/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4080 * Will also mark the memory as dirty if is_write == 1. access_len gives
4081 * the amount of memory that was actually read or written by the caller.
4082 */
4083void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4084 int is_write, target_phys_addr_t access_len)
4085{
4086 if (buffer != bounce.buffer) {
4087 if (is_write) {
4088 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
4089 while (access_len) {
4090 unsigned l;
4091 l = TARGET_PAGE_SIZE;
4092 if (l > access_len)
4093 l = access_len;
4094 if (!cpu_physical_memory_is_dirty(addr1)) {
4095 /* invalidate code */
4096 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4097 /* set dirty bit */
4098 cpu_physical_memory_set_dirty_flags(
4099 addr1, (0xff & ~CODE_DIRTY_FLAG));
4100 }
4101 addr1 += l;
4102 access_len -= l;
4103 }
4104 }
4105 return;
4106 }
4107 if (is_write) {
4108 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4109 }
4110 qemu_vfree(bounce.buffer);
4111 bounce.buffer = NULL;
4112 cpu_notify_map_clients();
4113}
4114
4115#endif /* !VBOX */
4116
4117/* warning: addr must be aligned */
4118uint32_t ldl_phys(target_phys_addr_t addr)
4119{
4120 int io_index;
4121 uint8_t *ptr;
4122 uint32_t val;
4123 unsigned long pd;
4124 PhysPageDesc *p;
4125
4126 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4127 if (!p) {
4128 pd = IO_MEM_UNASSIGNED;
4129 } else {
4130 pd = p->phys_offset;
4131 }
4132
4133 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4134 !(pd & IO_MEM_ROMD)) {
4135 /* I/O case */
4136 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4137 if (p)
4138 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4139 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4140 } else {
4141 /* RAM case */
4142#ifndef VBOX
4143 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4144 (addr & ~TARGET_PAGE_MASK);
4145 val = ldl_p(ptr);
4146#else
4147 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
4148#endif
4149 }
4150 return val;
4151}
4152
4153/* warning: addr must be aligned */
4154uint64_t ldq_phys(target_phys_addr_t addr)
4155{
4156 int io_index;
4157 uint8_t *ptr;
4158 uint64_t val;
4159 unsigned long pd;
4160 PhysPageDesc *p;
4161
4162 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4163 if (!p) {
4164 pd = IO_MEM_UNASSIGNED;
4165 } else {
4166 pd = p->phys_offset;
4167 }
4168
4169 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4170 !(pd & IO_MEM_ROMD)) {
4171 /* I/O case */
4172 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4173 if (p)
4174 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4175#ifdef TARGET_WORDS_BIGENDIAN
4176 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4177 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4178#else
4179 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4180 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4181#endif
4182 } else {
4183 /* RAM case */
4184#ifndef VBOX
4185 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4186 (addr & ~TARGET_PAGE_MASK);
4187 val = ldq_p(ptr);
4188#else
4189 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
4190#endif
4191 }
4192 return val;
4193}
4194
4195/* XXX: optimize */
4196uint32_t ldub_phys(target_phys_addr_t addr)
4197{
4198 uint8_t val;
4199 cpu_physical_memory_read(addr, &val, 1);
4200 return val;
4201}
4202
4203/* warning: addr must be aligned */
4204uint32_t lduw_phys(target_phys_addr_t addr)
4205{
4206 int io_index;
4207 uint8_t *ptr;
4208 uint64_t val;
4209 unsigned long pd;
4210 PhysPageDesc *p;
4211
4212 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4213 if (!p) {
4214 pd = IO_MEM_UNASSIGNED;
4215 } else {
4216 pd = p->phys_offset;
4217 }
4218
4219 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4220 !(pd & IO_MEM_ROMD)) {
4221 /* I/O case */
4222 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4223 if (p)
4224 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4225 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4226 } else {
4227 /* RAM case */
4228#ifndef VBOX
4229 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4230 (addr & ~TARGET_PAGE_MASK);
4231 val = lduw_p(ptr);
4232#else
4233 val = remR3PhysReadU16((pd & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK));
4234#endif
4235 }
4236 return val;
4237}
4238
4239/* warning: addr must be aligned. The ram page is not masked as dirty
4240 and the code inside is not invalidated. It is useful if the dirty
4241 bits are used to track modified PTEs */
4242void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4243{
4244 int io_index;
4245 uint8_t *ptr;
4246 unsigned long pd;
4247 PhysPageDesc *p;
4248
4249 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4250 if (!p) {
4251 pd = IO_MEM_UNASSIGNED;
4252 } else {
4253 pd = p->phys_offset;
4254 }
4255
4256 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4257 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4258 if (p)
4259 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4260 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4261 } else {
4262#ifndef VBOX
4263 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4264 ptr = qemu_get_ram_ptr(addr1);
4265 stl_p(ptr, val);
4266#else
4267 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4268#endif
4269
4270#ifndef VBOX
4271 if (unlikely(in_migration)) {
4272 if (!cpu_physical_memory_is_dirty(addr1)) {
4273 /* invalidate code */
4274 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4275 /* set dirty bit */
4276 cpu_physical_memory_set_dirty_flags(
4277 addr1, (0xff & ~CODE_DIRTY_FLAG));
4278 }
4279 }
4280#endif /* !VBOX */
4281 }
4282}
4283
4284void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4285{
4286 int io_index;
4287 uint8_t *ptr;
4288 unsigned long pd;
4289 PhysPageDesc *p;
4290
4291 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4292 if (!p) {
4293 pd = IO_MEM_UNASSIGNED;
4294 } else {
4295 pd = p->phys_offset;
4296 }
4297
4298 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4299 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4300 if (p)
4301 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4302#ifdef TARGET_WORDS_BIGENDIAN
4303 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4304 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4305#else
4306 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4307 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4308#endif
4309 } else {
4310#ifndef VBOX
4311 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4312 (addr & ~TARGET_PAGE_MASK);
4313 stq_p(ptr, val);
4314#else
4315 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4316#endif
4317 }
4318}
4319
4320/* warning: addr must be aligned */
4321void stl_phys(target_phys_addr_t addr, uint32_t val)
4322{
4323 int io_index;
4324 uint8_t *ptr;
4325 unsigned long pd;
4326 PhysPageDesc *p;
4327
4328 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4329 if (!p) {
4330 pd = IO_MEM_UNASSIGNED;
4331 } else {
4332 pd = p->phys_offset;
4333 }
4334
4335 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4336 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4337 if (p)
4338 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4339 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4340 } else {
4341 unsigned long addr1;
4342 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4343 /* RAM case */
4344#ifndef VBOX
4345 ptr = qemu_get_ram_ptr(addr1);
4346 stl_p(ptr, val);
4347#else
4348 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4349#endif
4350 if (!cpu_physical_memory_is_dirty(addr1)) {
4351 /* invalidate code */
4352 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4353 /* set dirty bit */
4354 cpu_physical_memory_set_dirty_flags(addr1,
4355 (0xff & ~CODE_DIRTY_FLAG));
4356 }
4357 }
4358}
4359
4360/* XXX: optimize */
4361void stb_phys(target_phys_addr_t addr, uint32_t val)
4362{
4363 uint8_t v = val;
4364 cpu_physical_memory_write(addr, &v, 1);
4365}
4366
4367/* warning: addr must be aligned */
4368void stw_phys(target_phys_addr_t addr, uint32_t val)
4369{
4370 int io_index;
4371 uint8_t *ptr;
4372 unsigned long pd;
4373 PhysPageDesc *p;
4374
4375 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4376 if (!p) {
4377 pd = IO_MEM_UNASSIGNED;
4378 } else {
4379 pd = p->phys_offset;
4380 }
4381
4382 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4383 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4384 if (p)
4385 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4386 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4387 } else {
4388 unsigned long addr1;
4389 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4390 /* RAM case */
4391#ifndef VBOX
4392 ptr = qemu_get_ram_ptr(addr1);
4393 stw_p(ptr, val);
4394#else
4395 remR3PhysWriteU16(addr1, val); NOREF(ptr);
4396#endif
4397 if (!cpu_physical_memory_is_dirty(addr1)) {
4398 /* invalidate code */
4399 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4400 /* set dirty bit */
4401 cpu_physical_memory_set_dirty_flags(addr1,
4402 (0xff & ~CODE_DIRTY_FLAG));
4403 }
4404 }
4405}
4406
4407/* XXX: optimize */
4408void stq_phys(target_phys_addr_t addr, uint64_t val)
4409{
4410 val = tswap64(val);
4411 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4412}
4413
4414#ifndef VBOX
4415/* virtual memory access for debug (includes writing to ROM) */
4416int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4417 uint8_t *buf, int len, int is_write)
4418{
4419 int l;
4420 target_phys_addr_t phys_addr;
4421 target_ulong page;
4422
4423 while (len > 0) {
4424 page = addr & TARGET_PAGE_MASK;
4425 phys_addr = cpu_get_phys_page_debug(env, page);
4426 /* if no physical page mapped, return an error */
4427 if (phys_addr == -1)
4428 return -1;
4429 l = (page + TARGET_PAGE_SIZE) - addr;
4430 if (l > len)
4431 l = len;
4432 phys_addr += (addr & ~TARGET_PAGE_MASK);
4433 if (is_write)
4434 cpu_physical_memory_write_rom(phys_addr, buf, l);
4435 else
4436 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4437 len -= l;
4438 buf += l;
4439 addr += l;
4440 }
4441 return 0;
4442}
4443#endif /* !VBOX */
4444#endif
4445
4446/* in deterministic execution mode, instructions doing device I/Os
4447 must be at the end of the TB */
4448void cpu_io_recompile(CPUState *env, void *retaddr)
4449{
4450 TranslationBlock *tb;
4451 uint32_t n, cflags;
4452 target_ulong pc, cs_base;
4453 uint64_t flags;
4454
4455 tb = tb_find_pc((unsigned long)retaddr);
4456 if (!tb) {
4457 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4458 retaddr);
4459 }
4460 n = env->icount_decr.u16.low + tb->icount;
4461 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4462 /* Calculate how many instructions had been executed before the fault
4463 occurred. */
4464 n = n - env->icount_decr.u16.low;
4465 /* Generate a new TB ending on the I/O insn. */
4466 n++;
4467 /* On MIPS and SH, delay slot instructions can only be restarted if
4468 they were already the first instruction in the TB. If this is not
4469 the first instruction in a TB then re-execute the preceding
4470 branch. */
4471#if defined(TARGET_MIPS)
4472 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4473 env->active_tc.PC -= 4;
4474 env->icount_decr.u16.low++;
4475 env->hflags &= ~MIPS_HFLAG_BMASK;
4476 }
4477#elif defined(TARGET_SH4)
4478 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4479 && n > 1) {
4480 env->pc -= 2;
4481 env->icount_decr.u16.low++;
4482 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4483 }
4484#endif
4485 /* This should never happen. */
4486 if (n > CF_COUNT_MASK)
4487 cpu_abort(env, "TB too big during recompile");
4488
4489 cflags = n | CF_LAST_IO;
4490 pc = tb->pc;
4491 cs_base = tb->cs_base;
4492 flags = tb->flags;
4493 tb_phys_invalidate(tb, -1);
4494 /* FIXME: In theory this could raise an exception. In practice
4495 we have already translated the block once so it's probably ok. */
4496 tb_gen_code(env, pc, cs_base, flags, cflags);
4497 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4498 the first in the TB) then we end up generating a whole new TB and
4499 repeating the fault, which is horribly inefficient.
4500 Better would be to execute just this insn uncached, or generate a
4501 second new TB. */
4502 cpu_resume_from_signal(env, NULL);
4503}
4504
4505#if !defined(CONFIG_USER_ONLY)
4506
4507#ifndef VBOX
4508void dump_exec_info(FILE *f,
4509 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4510{
4511 int i, target_code_size, max_target_code_size;
4512 int direct_jmp_count, direct_jmp2_count, cross_page;
4513 TranslationBlock *tb;
4514
4515 target_code_size = 0;
4516 max_target_code_size = 0;
4517 cross_page = 0;
4518 direct_jmp_count = 0;
4519 direct_jmp2_count = 0;
4520 for(i = 0; i < nb_tbs; i++) {
4521 tb = &tbs[i];
4522 target_code_size += tb->size;
4523 if (tb->size > max_target_code_size)
4524 max_target_code_size = tb->size;
4525 if (tb->page_addr[1] != -1)
4526 cross_page++;
4527 if (tb->tb_next_offset[0] != 0xffff) {
4528 direct_jmp_count++;
4529 if (tb->tb_next_offset[1] != 0xffff) {
4530 direct_jmp2_count++;
4531 }
4532 }
4533 }
4534 /* XXX: avoid using doubles ? */
4535 cpu_fprintf(f, "Translation buffer state:\n");
4536 cpu_fprintf(f, "gen code size %ld/%ld\n",
4537 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4538 cpu_fprintf(f, "TB count %d/%d\n",
4539 nb_tbs, code_gen_max_blocks);
4540 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4541 nb_tbs ? target_code_size / nb_tbs : 0,
4542 max_target_code_size);
4543 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4544 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4545 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4546 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4547 cross_page,
4548 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4549 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4550 direct_jmp_count,
4551 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4552 direct_jmp2_count,
4553 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4554 cpu_fprintf(f, "\nStatistics:\n");
4555 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4556 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4557 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4558 tcg_dump_info(f, cpu_fprintf);
4559}
4560#endif /* !VBOX */
4561
4562#define MMUSUFFIX _cmmu
4563#define GETPC() NULL
4564#define env cpu_single_env
4565#define SOFTMMU_CODE_ACCESS
4566
4567#define SHIFT 0
4568#include "softmmu_template.h"
4569
4570#define SHIFT 1
4571#include "softmmu_template.h"
4572
4573#define SHIFT 2
4574#include "softmmu_template.h"
4575
4576#define SHIFT 3
4577#include "softmmu_template.h"
4578
4579#undef env
4580
4581#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette