VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 18597

Last change on this file since 18597 was 18597, checked in by vboxsync, 16 years ago

REM: Added a l0 map for PageDesc, this should fix the performance issue if our hunch is right about the cause. Profile tb_flush.

  • Property svn:eol-style set to native
File size: 114.6 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140RTGCPHYS phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186#ifdef VBOX
187/* Note: Not for PhysPageDesc, only to speed up page_flush_tb. */
188#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
189#endif
190
191#ifdef VBOX
192#define L0_SIZE (1 << L0_BITS)
193#endif
194#define L1_SIZE (1 << L1_BITS)
195#define L2_SIZE (1 << L2_BITS)
196
197static void io_mem_init(void);
198
199unsigned long qemu_real_host_page_size;
200unsigned long qemu_host_page_bits;
201unsigned long qemu_host_page_size;
202unsigned long qemu_host_page_mask;
203
204/* XXX: for system emulation, it could just be an array */
205#ifndef VBOX
206static PageDesc *l1_map[L1_SIZE];
207#else
208static l0_map_max_used = 0;
209static PageDesc **l0_map[L0_SIZE];
210#endif
211static PhysPageDesc **l1_phys_map;
212
213#if !defined(CONFIG_USER_ONLY)
214static void io_mem_init(void);
215
216/* io memory support */
217CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
218CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
219void *io_mem_opaque[IO_MEM_NB_ENTRIES];
220static int io_mem_nb;
221static int io_mem_watch;
222#endif
223
224#ifndef VBOX
225/* log support */
226static const char *logfilename = "/tmp/qemu.log";
227#endif /* !VBOX */
228FILE *logfile;
229int loglevel;
230#ifndef VBOX
231static int log_append = 0;
232#endif
233
234/* statistics */
235#ifndef VBOX
236static int tlb_flush_count;
237static int tb_flush_count;
238static int tb_phys_invalidate_count;
239#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
240uint32_t tlb_flush_count;
241uint32_t tb_flush_count;
242uint32_t tb_phys_invalidate_count;
243#endif /* VBOX */
244
245#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
246typedef struct subpage_t {
247 target_phys_addr_t base;
248 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
249 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
250 void *opaque[TARGET_PAGE_SIZE][2][4];
251} subpage_t;
252
253
254#ifndef VBOX
255#ifdef _WIN32
256static void map_exec(void *addr, long size)
257{
258 DWORD old_protect;
259 VirtualProtect(addr, size,
260 PAGE_EXECUTE_READWRITE, &old_protect);
261
262}
263#else
264static void map_exec(void *addr, long size)
265{
266 unsigned long start, end, page_size;
267
268 page_size = getpagesize();
269 start = (unsigned long)addr;
270 start &= ~(page_size - 1);
271
272 end = (unsigned long)addr + size;
273 end += page_size - 1;
274 end &= ~(page_size - 1);
275
276 mprotect((void *)start, end - start,
277 PROT_READ | PROT_WRITE | PROT_EXEC);
278}
279#endif
280#else // VBOX
281static void map_exec(void *addr, long size)
282{
283 RTMemProtect(addr, size,
284 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
285}
286#endif
287
288static void page_init(void)
289{
290 /* NOTE: we can always suppose that qemu_host_page_size >=
291 TARGET_PAGE_SIZE */
292#ifdef VBOX
293 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
294 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
295 qemu_real_host_page_size = PAGE_SIZE;
296#else /* !VBOX */
297#ifdef _WIN32
298 {
299 SYSTEM_INFO system_info;
300 DWORD old_protect;
301
302 GetSystemInfo(&system_info);
303 qemu_real_host_page_size = system_info.dwPageSize;
304 }
305#else
306 qemu_real_host_page_size = getpagesize();
307#endif
308#endif /* !VBOX */
309
310 if (qemu_host_page_size == 0)
311 qemu_host_page_size = qemu_real_host_page_size;
312 if (qemu_host_page_size < TARGET_PAGE_SIZE)
313 qemu_host_page_size = TARGET_PAGE_SIZE;
314 qemu_host_page_bits = 0;
315#ifndef VBOX
316 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
317#else
318 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
319#endif
320 qemu_host_page_bits++;
321 qemu_host_page_mask = ~(qemu_host_page_size - 1);
322 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
323 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
324#ifdef VBOX
325 /* We use other means to set reserved bit on our pages */
326#else
327#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
328 {
329 long long startaddr, endaddr;
330 FILE *f;
331 int n;
332
333 mmap_lock();
334 last_brk = (unsigned long)sbrk(0);
335 f = fopen("/proc/self/maps", "r");
336 if (f) {
337 do {
338 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
339 if (n == 2) {
340 startaddr = MIN(startaddr,
341 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
342 endaddr = MIN(endaddr,
343 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
344 page_set_flags(startaddr & TARGET_PAGE_MASK,
345 TARGET_PAGE_ALIGN(endaddr),
346 PAGE_RESERVED);
347 }
348 } while (!feof(f));
349 fclose(f);
350 }
351 mmap_unlock();
352 }
353#endif
354#endif
355}
356
357#ifndef VBOX
358static inline PageDesc **page_l1_map(target_ulong index)
359#else
360DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
361#endif
362{
363#ifndef VBOX
364#if TARGET_LONG_BITS > 32
365 /* Host memory outside guest VM. For 32-bit targets we have already
366 excluded high addresses. */
367 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
368 return NULL;
369#endif
370 return &l1_map[index >> L2_BITS];
371#else /* VBOX */
372 PageDesc **l1_map;
373 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
374 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
375 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
376 NULL);
377 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
378 if (RT_UNLIKELY(!l1_map))
379 {
380 unsigned i0 = index >> (L1_BITS + L2_BITS);
381 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
382 if (RT_UNLIKELY(!l1_map))
383 return NULL;
384 if (i0 >= l0_map_max_used)
385 l0_map_max_used = i0 + 1;
386 }
387 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
388#endif /* VBOX */
389}
390
391#ifndef VBOX
392static inline PageDesc *page_find_alloc(target_ulong index)
393#else
394DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
395#endif
396{
397 PageDesc **lp, *p;
398 lp = page_l1_map(index);
399 if (!lp)
400 return NULL;
401
402 p = *lp;
403 if (!p) {
404 /* allocate if not found */
405#if defined(CONFIG_USER_ONLY)
406 unsigned long addr;
407 size_t len = sizeof(PageDesc) * L2_SIZE;
408 /* Don't use qemu_malloc because it may recurse. */
409 p = mmap(0, len, PROT_READ | PROT_WRITE,
410 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
411 *lp = p;
412 addr = h2g(p);
413 if (addr == (target_ulong)addr) {
414 page_set_flags(addr & TARGET_PAGE_MASK,
415 TARGET_PAGE_ALIGN(addr + len),
416 PAGE_RESERVED);
417 }
418#else
419 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
420 *lp = p;
421#endif
422 }
423 return p + (index & (L2_SIZE - 1));
424}
425
426#ifndef VBOX
427static inline PageDesc *page_find(target_ulong index)
428#else
429DECLINLINE(PageDesc *) page_find(target_ulong index)
430#endif
431{
432 PageDesc **lp, *p;
433 lp = page_l1_map(index);
434 if (!lp)
435 return NULL;
436
437 p = *lp;
438 if (!p)
439 return 0;
440 return p + (index & (L2_SIZE - 1));
441}
442
443static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
444{
445 void **lp, **p;
446 PhysPageDesc *pd;
447
448 p = (void **)l1_phys_map;
449#if TARGET_PHYS_ADDR_SPACE_BITS > 32
450
451#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
452#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
453#endif
454 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
455 p = *lp;
456 if (!p) {
457 /* allocate if not found */
458 if (!alloc)
459 return NULL;
460 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
461 memset(p, 0, sizeof(void *) * L1_SIZE);
462 *lp = p;
463 }
464#endif
465 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
466 pd = *lp;
467 if (!pd) {
468 int i;
469 /* allocate if not found */
470 if (!alloc)
471 return NULL;
472 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
473 *lp = pd;
474 for (i = 0; i < L2_SIZE; i++)
475 pd[i].phys_offset = IO_MEM_UNASSIGNED;
476 }
477#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
478 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
479 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
480 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
481 return pd;
482#else
483 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
484#endif
485}
486
487#ifndef VBOX
488static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
489#else
490DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
491#endif
492{
493 return phys_page_find_alloc(index, 0);
494}
495
496#if !defined(CONFIG_USER_ONLY)
497static void tlb_protect_code(ram_addr_t ram_addr);
498static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
499 target_ulong vaddr);
500#define mmap_lock() do { } while(0)
501#define mmap_unlock() do { } while(0)
502#endif
503
504#ifdef VBOX
505/*
506 * We don't need such huge codegen buffer size, as execute most of the code
507 * in raw or hwacc mode
508 */
509#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
510#else
511#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
512#endif
513
514#if defined(CONFIG_USER_ONLY)
515/* Currently it is not recommanded to allocate big chunks of data in
516 user mode. It will change when a dedicated libc will be used */
517#define USE_STATIC_CODE_GEN_BUFFER
518#endif
519
520/* VBox allocates codegen buffer dynamically */
521#ifndef VBOX
522#ifdef USE_STATIC_CODE_GEN_BUFFER
523static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
524#endif
525#endif
526
527static void code_gen_alloc(unsigned long tb_size)
528{
529#ifdef USE_STATIC_CODE_GEN_BUFFER
530 code_gen_buffer = static_code_gen_buffer;
531 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
532 map_exec(code_gen_buffer, code_gen_buffer_size);
533#else
534#ifdef VBOX
535 /* We cannot use phys_ram_size here, as it's 0 now,
536 * it only gets initialized once RAM registration callback
537 * (REMR3NotifyPhysRamRegister()) called.
538 */
539 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
540#else
541 code_gen_buffer_size = tb_size;
542 if (code_gen_buffer_size == 0) {
543#if defined(CONFIG_USER_ONLY)
544 /* in user mode, phys_ram_size is not meaningful */
545 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
546#else
547 /* XXX: needs ajustments */
548 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
549#endif
550
551 }
552 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
553 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
554#endif /* VBOX */
555
556 /* The code gen buffer location may have constraints depending on
557 the host cpu and OS */
558#ifdef VBOX
559 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
560
561 if (!code_gen_buffer) {
562 LogRel(("REM: failed allocate codegen buffer %lld\n",
563 code_gen_buffer_size));
564 return;
565 }
566#else //!VBOX
567#if defined(__linux__)
568 {
569 int flags;
570 void *start = NULL;
571
572 flags = MAP_PRIVATE | MAP_ANONYMOUS;
573#if defined(__x86_64__)
574 flags |= MAP_32BIT;
575 /* Cannot map more than that */
576 if (code_gen_buffer_size > (800 * 1024 * 1024))
577 code_gen_buffer_size = (800 * 1024 * 1024);
578#elif defined(__sparc_v9__)
579 // Map the buffer below 2G, so we can use direct calls and branches
580 flags |= MAP_FIXED;
581 start = (void *) 0x60000000UL;
582 if (code_gen_buffer_size > (512 * 1024 * 1024))
583 code_gen_buffer_size = (512 * 1024 * 1024);
584#endif
585 code_gen_buffer = mmap(start, code_gen_buffer_size,
586 PROT_WRITE | PROT_READ | PROT_EXEC,
587 flags, -1, 0);
588 if (code_gen_buffer == MAP_FAILED) {
589 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
590 exit(1);
591 }
592 }
593#elif defined(__FreeBSD__)
594 {
595 int flags;
596 void *addr = NULL;
597 flags = MAP_PRIVATE | MAP_ANONYMOUS;
598#if defined(__x86_64__)
599 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
600 * 0x40000000 is free */
601 flags |= MAP_FIXED;
602 addr = (void *)0x40000000;
603 /* Cannot map more than that */
604 if (code_gen_buffer_size > (800 * 1024 * 1024))
605 code_gen_buffer_size = (800 * 1024 * 1024);
606#endif
607 code_gen_buffer = mmap(addr, code_gen_buffer_size,
608 PROT_WRITE | PROT_READ | PROT_EXEC,
609 flags, -1, 0);
610 if (code_gen_buffer == MAP_FAILED) {
611 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
612 exit(1);
613 }
614 }
615#else
616 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
617 if (!code_gen_buffer) {
618 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
619 exit(1);
620 }
621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
624#endif /* !VBOX */
625#endif /* !USE_STATIC_CODE_GEN_BUFFER */
626#ifndef VBOX
627 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
628#else
629 map_exec(code_gen_prologue, _1K);
630#endif
631
632 code_gen_buffer_max_size = code_gen_buffer_size -
633 code_gen_max_block_size();
634 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
635 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
636}
637
638/* Must be called before using the QEMU cpus. 'tb_size' is the size
639 (in bytes) allocated to the translation buffer. Zero means default
640 size. */
641void cpu_exec_init_all(unsigned long tb_size)
642{
643 cpu_gen_init();
644 code_gen_alloc(tb_size);
645 code_gen_ptr = code_gen_buffer;
646 page_init();
647#if !defined(CONFIG_USER_ONLY)
648 io_mem_init();
649#endif
650}
651
652#ifndef VBOX
653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
655#define CPU_COMMON_SAVE_VERSION 1
656
657static void cpu_common_save(QEMUFile *f, void *opaque)
658{
659 CPUState *env = opaque;
660
661 qemu_put_be32s(f, &env->halted);
662 qemu_put_be32s(f, &env->interrupt_request);
663}
664
665static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
666{
667 CPUState *env = opaque;
668
669 if (version_id != CPU_COMMON_SAVE_VERSION)
670 return -EINVAL;
671
672 qemu_get_be32s(f, &env->halted);
673 qemu_get_be32s(f, &env->interrupt_request);
674 tlb_flush(env, 1);
675
676 return 0;
677}
678#endif
679#endif //!VBOX
680
681void cpu_exec_init(CPUState *env)
682{
683 CPUState **penv;
684 int cpu_index;
685
686 env->next_cpu = NULL;
687 penv = &first_cpu;
688 cpu_index = 0;
689 while (*penv != NULL) {
690 penv = (CPUState **)&(*penv)->next_cpu;
691 cpu_index++;
692 }
693 env->cpu_index = cpu_index;
694 env->nb_watchpoints = 0;
695 *penv = env;
696#ifndef VBOX
697#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
698 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
699 cpu_common_save, cpu_common_load, env);
700 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
701 cpu_save, cpu_load, env);
702#endif
703#endif // !VBOX
704}
705
706#ifndef VBOX
707static inline void invalidate_page_bitmap(PageDesc *p)
708#else
709DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
710#endif
711{
712 if (p->code_bitmap) {
713 qemu_free(p->code_bitmap);
714 p->code_bitmap = NULL;
715 }
716 p->code_write_count = 0;
717}
718
719/* set to NULL all the 'first_tb' fields in all PageDescs */
720static void page_flush_tb(void)
721{
722 int i, j;
723 PageDesc *p;
724#ifdef VBOX
725 int k;
726#endif
727
728#ifdef VBOX
729 k = l0_map_max_used;
730 while (k-- > 0) {
731 PageDesc **l1_map = l0_map[k];
732 if (l1_map) {
733#endif
734 for(i = 0; i < L1_SIZE; i++) {
735 p = l1_map[i];
736 if (p) {
737 for(j = 0; j < L2_SIZE; j++) {
738 p->first_tb = NULL;
739 invalidate_page_bitmap(p);
740 p++;
741 }
742 }
743 }
744#ifdef VBOX
745 }
746 }
747#endif
748}
749
750/* flush all the translation blocks */
751/* XXX: tb_flush is currently not thread safe */
752void tb_flush(CPUState *env1)
753{
754 CPUState *env;
755#ifdef VBOX
756 STAM_PROFILE_START(&env1->StatTbFlush, a);
757#endif
758#if defined(DEBUG_FLUSH)
759 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
760 (unsigned long)(code_gen_ptr - code_gen_buffer),
761 nb_tbs, nb_tbs > 0 ?
762 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
763#endif
764 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
765 cpu_abort(env1, "Internal error: code buffer overflow\n");
766
767 nb_tbs = 0;
768
769 for(env = first_cpu; env != NULL; env = env->next_cpu) {
770 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
771 }
772
773 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
774 page_flush_tb();
775
776 code_gen_ptr = code_gen_buffer;
777 /* XXX: flush processor icache at this point if cache flush is
778 expensive */
779 tb_flush_count++;
780#ifdef VBOX
781 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
782#endif
783}
784
785#ifdef DEBUG_TB_CHECK
786static void tb_invalidate_check(target_ulong address)
787{
788 TranslationBlock *tb;
789 int i;
790 address &= TARGET_PAGE_MASK;
791 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
792 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
793 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
794 address >= tb->pc + tb->size)) {
795 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
796 address, (long)tb->pc, tb->size);
797 }
798 }
799 }
800}
801
802/* verify that all the pages have correct rights for code */
803static void tb_page_check(void)
804{
805 TranslationBlock *tb;
806 int i, flags1, flags2;
807
808 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
809 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
810 flags1 = page_get_flags(tb->pc);
811 flags2 = page_get_flags(tb->pc + tb->size - 1);
812 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
813 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
814 (long)tb->pc, tb->size, flags1, flags2);
815 }
816 }
817 }
818}
819
820static void tb_jmp_check(TranslationBlock *tb)
821{
822 TranslationBlock *tb1;
823 unsigned int n1;
824
825 /* suppress any remaining jumps to this TB */
826 tb1 = tb->jmp_first;
827 for(;;) {
828 n1 = (long)tb1 & 3;
829 tb1 = (TranslationBlock *)((long)tb1 & ~3);
830 if (n1 == 2)
831 break;
832 tb1 = tb1->jmp_next[n1];
833 }
834 /* check end of list */
835 if (tb1 != tb) {
836 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
837 }
838}
839#endif // DEBUG_TB_CHECK
840
841/* invalidate one TB */
842#ifndef VBOX
843static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
844 int next_offset)
845#else
846DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
847 int next_offset)
848#endif
849{
850 TranslationBlock *tb1;
851 for(;;) {
852 tb1 = *ptb;
853 if (tb1 == tb) {
854 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
855 break;
856 }
857 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
858 }
859}
860
861#ifndef VBOX
862static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
863#else
864DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
865#endif
866{
867 TranslationBlock *tb1;
868 unsigned int n1;
869
870 for(;;) {
871 tb1 = *ptb;
872 n1 = (long)tb1 & 3;
873 tb1 = (TranslationBlock *)((long)tb1 & ~3);
874 if (tb1 == tb) {
875 *ptb = tb1->page_next[n1];
876 break;
877 }
878 ptb = &tb1->page_next[n1];
879 }
880}
881
882#ifndef VBOX
883static inline void tb_jmp_remove(TranslationBlock *tb, int n)
884#else
885DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
886#endif
887{
888 TranslationBlock *tb1, **ptb;
889 unsigned int n1;
890
891 ptb = &tb->jmp_next[n];
892 tb1 = *ptb;
893 if (tb1) {
894 /* find tb(n) in circular list */
895 for(;;) {
896 tb1 = *ptb;
897 n1 = (long)tb1 & 3;
898 tb1 = (TranslationBlock *)((long)tb1 & ~3);
899 if (n1 == n && tb1 == tb)
900 break;
901 if (n1 == 2) {
902 ptb = &tb1->jmp_first;
903 } else {
904 ptb = &tb1->jmp_next[n1];
905 }
906 }
907 /* now we can suppress tb(n) from the list */
908 *ptb = tb->jmp_next[n];
909
910 tb->jmp_next[n] = NULL;
911 }
912}
913
914/* reset the jump entry 'n' of a TB so that it is not chained to
915 another TB */
916#ifndef VBOX
917static inline void tb_reset_jump(TranslationBlock *tb, int n)
918#else
919DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
920#endif
921{
922 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
923}
924
925void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
926{
927 CPUState *env;
928 PageDesc *p;
929 unsigned int h, n1;
930 target_phys_addr_t phys_pc;
931 TranslationBlock *tb1, *tb2;
932
933 /* remove the TB from the hash list */
934 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
935 h = tb_phys_hash_func(phys_pc);
936 tb_remove(&tb_phys_hash[h], tb,
937 offsetof(TranslationBlock, phys_hash_next));
938
939 /* remove the TB from the page list */
940 if (tb->page_addr[0] != page_addr) {
941 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
942 tb_page_remove(&p->first_tb, tb);
943 invalidate_page_bitmap(p);
944 }
945 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
946 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
947 tb_page_remove(&p->first_tb, tb);
948 invalidate_page_bitmap(p);
949 }
950
951 tb_invalidated_flag = 1;
952
953 /* remove the TB from the hash list */
954 h = tb_jmp_cache_hash_func(tb->pc);
955 for(env = first_cpu; env != NULL; env = env->next_cpu) {
956 if (env->tb_jmp_cache[h] == tb)
957 env->tb_jmp_cache[h] = NULL;
958 }
959
960 /* suppress this TB from the two jump lists */
961 tb_jmp_remove(tb, 0);
962 tb_jmp_remove(tb, 1);
963
964 /* suppress any remaining jumps to this TB */
965 tb1 = tb->jmp_first;
966 for(;;) {
967 n1 = (long)tb1 & 3;
968 if (n1 == 2)
969 break;
970 tb1 = (TranslationBlock *)((long)tb1 & ~3);
971 tb2 = tb1->jmp_next[n1];
972 tb_reset_jump(tb1, n1);
973 tb1->jmp_next[n1] = NULL;
974 tb1 = tb2;
975 }
976 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
977
978 tb_phys_invalidate_count++;
979}
980
981
982#ifdef VBOX
983void tb_invalidate_virt(CPUState *env, uint32_t eip)
984{
985# if 1
986 tb_flush(env);
987# else
988 uint8_t *cs_base, *pc;
989 unsigned int flags, h, phys_pc;
990 TranslationBlock *tb, **ptb;
991
992 flags = env->hflags;
993 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
994 cs_base = env->segs[R_CS].base;
995 pc = cs_base + eip;
996
997 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
998 flags);
999
1000 if(tb)
1001 {
1002# ifdef DEBUG
1003 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1004# endif
1005 tb_invalidate(tb);
1006 //Note: this will leak TBs, but the whole cache will be flushed
1007 // when it happens too often
1008 tb->pc = 0;
1009 tb->cs_base = 0;
1010 tb->flags = 0;
1011 }
1012# endif
1013}
1014
1015# ifdef VBOX_STRICT
1016/**
1017 * Gets the page offset.
1018 */
1019unsigned long get_phys_page_offset(target_ulong addr)
1020{
1021 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1022 return p ? p->phys_offset : 0;
1023}
1024# endif /* VBOX_STRICT */
1025#endif /* VBOX */
1026
1027#ifndef VBOX
1028static inline void set_bits(uint8_t *tab, int start, int len)
1029#else
1030DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
1031#endif
1032{
1033 int end, mask, end1;
1034
1035 end = start + len;
1036 tab += start >> 3;
1037 mask = 0xff << (start & 7);
1038 if ((start & ~7) == (end & ~7)) {
1039 if (start < end) {
1040 mask &= ~(0xff << (end & 7));
1041 *tab |= mask;
1042 }
1043 } else {
1044 *tab++ |= mask;
1045 start = (start + 8) & ~7;
1046 end1 = end & ~7;
1047 while (start < end1) {
1048 *tab++ = 0xff;
1049 start += 8;
1050 }
1051 if (start < end) {
1052 mask = ~(0xff << (end & 7));
1053 *tab |= mask;
1054 }
1055 }
1056}
1057
1058static void build_page_bitmap(PageDesc *p)
1059{
1060 int n, tb_start, tb_end;
1061 TranslationBlock *tb;
1062
1063 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1064 if (!p->code_bitmap)
1065 return;
1066 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1067
1068 tb = p->first_tb;
1069 while (tb != NULL) {
1070 n = (long)tb & 3;
1071 tb = (TranslationBlock *)((long)tb & ~3);
1072 /* NOTE: this is subtle as a TB may span two physical pages */
1073 if (n == 0) {
1074 /* NOTE: tb_end may be after the end of the page, but
1075 it is not a problem */
1076 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1077 tb_end = tb_start + tb->size;
1078 if (tb_end > TARGET_PAGE_SIZE)
1079 tb_end = TARGET_PAGE_SIZE;
1080 } else {
1081 tb_start = 0;
1082 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1083 }
1084 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1085 tb = tb->page_next[n];
1086 }
1087}
1088
1089TranslationBlock *tb_gen_code(CPUState *env,
1090 target_ulong pc, target_ulong cs_base,
1091 int flags, int cflags)
1092{
1093 TranslationBlock *tb;
1094 uint8_t *tc_ptr;
1095 target_ulong phys_pc, phys_page2, virt_page2;
1096 int code_gen_size;
1097
1098 phys_pc = get_phys_addr_code(env, pc);
1099 tb = tb_alloc(pc);
1100 if (!tb) {
1101 /* flush must be done */
1102 tb_flush(env);
1103 /* cannot fail at this point */
1104 tb = tb_alloc(pc);
1105 /* Don't forget to invalidate previous TB info. */
1106 tb_invalidated_flag = 1;
1107 }
1108 tc_ptr = code_gen_ptr;
1109 tb->tc_ptr = tc_ptr;
1110 tb->cs_base = cs_base;
1111 tb->flags = flags;
1112 tb->cflags = cflags;
1113 cpu_gen_code(env, tb, &code_gen_size);
1114 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1115
1116 /* check next page if needed */
1117 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1118 phys_page2 = -1;
1119 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1120 phys_page2 = get_phys_addr_code(env, virt_page2);
1121 }
1122 tb_link_phys(tb, phys_pc, phys_page2);
1123 return tb;
1124}
1125
1126/* invalidate all TBs which intersect with the target physical page
1127 starting in range [start;end[. NOTE: start and end must refer to
1128 the same physical page. 'is_cpu_write_access' should be true if called
1129 from a real cpu write access: the virtual CPU will exit the current
1130 TB if code is modified inside this TB. */
1131void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1132 int is_cpu_write_access)
1133{
1134 int n, current_tb_modified, current_tb_not_found, current_flags;
1135 CPUState *env = cpu_single_env;
1136 PageDesc *p;
1137 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1138 target_ulong tb_start, tb_end;
1139 target_ulong current_pc, current_cs_base;
1140
1141 p = page_find(start >> TARGET_PAGE_BITS);
1142 if (!p)
1143 return;
1144 if (!p->code_bitmap &&
1145 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1146 is_cpu_write_access) {
1147 /* build code bitmap */
1148 build_page_bitmap(p);
1149 }
1150
1151 /* we remove all the TBs in the range [start, end[ */
1152 /* XXX: see if in some cases it could be faster to invalidate all the code */
1153 current_tb_not_found = is_cpu_write_access;
1154 current_tb_modified = 0;
1155 current_tb = NULL; /* avoid warning */
1156 current_pc = 0; /* avoid warning */
1157 current_cs_base = 0; /* avoid warning */
1158 current_flags = 0; /* avoid warning */
1159 tb = p->first_tb;
1160 while (tb != NULL) {
1161 n = (long)tb & 3;
1162 tb = (TranslationBlock *)((long)tb & ~3);
1163 tb_next = tb->page_next[n];
1164 /* NOTE: this is subtle as a TB may span two physical pages */
1165 if (n == 0) {
1166 /* NOTE: tb_end may be after the end of the page, but
1167 it is not a problem */
1168 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1169 tb_end = tb_start + tb->size;
1170 } else {
1171 tb_start = tb->page_addr[1];
1172 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1173 }
1174 if (!(tb_end <= start || tb_start >= end)) {
1175#ifdef TARGET_HAS_PRECISE_SMC
1176 if (current_tb_not_found) {
1177 current_tb_not_found = 0;
1178 current_tb = NULL;
1179 if (env->mem_io_pc) {
1180 /* now we have a real cpu fault */
1181 current_tb = tb_find_pc(env->mem_io_pc);
1182 }
1183 }
1184 if (current_tb == tb &&
1185 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1186 /* If we are modifying the current TB, we must stop
1187 its execution. We could be more precise by checking
1188 that the modification is after the current PC, but it
1189 would require a specialized function to partially
1190 restore the CPU state */
1191
1192 current_tb_modified = 1;
1193 cpu_restore_state(current_tb, env,
1194 env->mem_io_pc, NULL);
1195#if defined(TARGET_I386)
1196 current_flags = env->hflags;
1197 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1198 current_cs_base = (target_ulong)env->segs[R_CS].base;
1199 current_pc = current_cs_base + env->eip;
1200#else
1201#error unsupported CPU
1202#endif
1203 }
1204#endif /* TARGET_HAS_PRECISE_SMC */
1205 /* we need to do that to handle the case where a signal
1206 occurs while doing tb_phys_invalidate() */
1207 saved_tb = NULL;
1208 if (env) {
1209 saved_tb = env->current_tb;
1210 env->current_tb = NULL;
1211 }
1212 tb_phys_invalidate(tb, -1);
1213 if (env) {
1214 env->current_tb = saved_tb;
1215 if (env->interrupt_request && env->current_tb)
1216 cpu_interrupt(env, env->interrupt_request);
1217 }
1218 }
1219 tb = tb_next;
1220 }
1221#if !defined(CONFIG_USER_ONLY)
1222 /* if no code remaining, no need to continue to use slow writes */
1223 if (!p->first_tb) {
1224 invalidate_page_bitmap(p);
1225 if (is_cpu_write_access) {
1226 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1227 }
1228 }
1229#endif
1230#ifdef TARGET_HAS_PRECISE_SMC
1231 if (current_tb_modified) {
1232 /* we generate a block containing just the instruction
1233 modifying the memory. It will ensure that it cannot modify
1234 itself */
1235 env->current_tb = NULL;
1236 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1237 cpu_resume_from_signal(env, NULL);
1238 }
1239#endif
1240}
1241
1242
1243/* len must be <= 8 and start must be a multiple of len */
1244#ifndef VBOX
1245static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1246#else
1247DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1248#endif
1249{
1250 PageDesc *p;
1251 int offset, b;
1252#if 0
1253 if (1) {
1254 if (loglevel) {
1255 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1256 cpu_single_env->mem_io_vaddr, len,
1257 cpu_single_env->eip,
1258 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1259 }
1260 }
1261#endif
1262 p = page_find(start >> TARGET_PAGE_BITS);
1263 if (!p)
1264 return;
1265 if (p->code_bitmap) {
1266 offset = start & ~TARGET_PAGE_MASK;
1267 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1268 if (b & ((1 << len) - 1))
1269 goto do_invalidate;
1270 } else {
1271 do_invalidate:
1272 tb_invalidate_phys_page_range(start, start + len, 1);
1273 }
1274}
1275
1276
1277#if !defined(CONFIG_SOFTMMU)
1278static void tb_invalidate_phys_page(target_phys_addr_t addr,
1279 unsigned long pc, void *puc)
1280{
1281 int n, current_flags, current_tb_modified;
1282 target_ulong current_pc, current_cs_base;
1283 PageDesc *p;
1284 TranslationBlock *tb, *current_tb;
1285#ifdef TARGET_HAS_PRECISE_SMC
1286 CPUState *env = cpu_single_env;
1287#endif
1288
1289 addr &= TARGET_PAGE_MASK;
1290 p = page_find(addr >> TARGET_PAGE_BITS);
1291 if (!p)
1292 return;
1293 tb = p->first_tb;
1294 current_tb_modified = 0;
1295 current_tb = NULL;
1296 current_pc = 0; /* avoid warning */
1297 current_cs_base = 0; /* avoid warning */
1298 current_flags = 0; /* avoid warning */
1299#ifdef TARGET_HAS_PRECISE_SMC
1300 if (tb && pc != 0) {
1301 current_tb = tb_find_pc(pc);
1302 }
1303#endif
1304 while (tb != NULL) {
1305 n = (long)tb & 3;
1306 tb = (TranslationBlock *)((long)tb & ~3);
1307#ifdef TARGET_HAS_PRECISE_SMC
1308 if (current_tb == tb &&
1309 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1310 /* If we are modifying the current TB, we must stop
1311 its execution. We could be more precise by checking
1312 that the modification is after the current PC, but it
1313 would require a specialized function to partially
1314 restore the CPU state */
1315
1316 current_tb_modified = 1;
1317 cpu_restore_state(current_tb, env, pc, puc);
1318#if defined(TARGET_I386)
1319 current_flags = env->hflags;
1320 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1321 current_cs_base = (target_ulong)env->segs[R_CS].base;
1322 current_pc = current_cs_base + env->eip;
1323#else
1324#error unsupported CPU
1325#endif
1326 }
1327#endif /* TARGET_HAS_PRECISE_SMC */
1328 tb_phys_invalidate(tb, addr);
1329 tb = tb->page_next[n];
1330 }
1331 p->first_tb = NULL;
1332#ifdef TARGET_HAS_PRECISE_SMC
1333 if (current_tb_modified) {
1334 /* we generate a block containing just the instruction
1335 modifying the memory. It will ensure that it cannot modify
1336 itself */
1337 env->current_tb = NULL;
1338 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1339 cpu_resume_from_signal(env, puc);
1340 }
1341#endif
1342}
1343#endif
1344
1345/* add the tb in the target page and protect it if necessary */
1346#ifndef VBOX
1347static inline void tb_alloc_page(TranslationBlock *tb,
1348 unsigned int n, target_ulong page_addr)
1349#else
1350DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1351 unsigned int n, target_ulong page_addr)
1352#endif
1353{
1354 PageDesc *p;
1355 TranslationBlock *last_first_tb;
1356
1357 tb->page_addr[n] = page_addr;
1358 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1359 tb->page_next[n] = p->first_tb;
1360 last_first_tb = p->first_tb;
1361 p->first_tb = (TranslationBlock *)((long)tb | n);
1362 invalidate_page_bitmap(p);
1363
1364#if defined(TARGET_HAS_SMC) || 1
1365
1366#if defined(CONFIG_USER_ONLY)
1367 if (p->flags & PAGE_WRITE) {
1368 target_ulong addr;
1369 PageDesc *p2;
1370 int prot;
1371
1372 /* force the host page as non writable (writes will have a
1373 page fault + mprotect overhead) */
1374 page_addr &= qemu_host_page_mask;
1375 prot = 0;
1376 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1377 addr += TARGET_PAGE_SIZE) {
1378
1379 p2 = page_find (addr >> TARGET_PAGE_BITS);
1380 if (!p2)
1381 continue;
1382 prot |= p2->flags;
1383 p2->flags &= ~PAGE_WRITE;
1384 page_get_flags(addr);
1385 }
1386 mprotect(g2h(page_addr), qemu_host_page_size,
1387 (prot & PAGE_BITS) & ~PAGE_WRITE);
1388#ifdef DEBUG_TB_INVALIDATE
1389 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1390 page_addr);
1391#endif
1392 }
1393#else
1394 /* if some code is already present, then the pages are already
1395 protected. So we handle the case where only the first TB is
1396 allocated in a physical page */
1397 if (!last_first_tb) {
1398 tlb_protect_code(page_addr);
1399 }
1400#endif
1401
1402#endif /* TARGET_HAS_SMC */
1403}
1404
1405/* Allocate a new translation block. Flush the translation buffer if
1406 too many translation blocks or too much generated code. */
1407TranslationBlock *tb_alloc(target_ulong pc)
1408{
1409 TranslationBlock *tb;
1410
1411 if (nb_tbs >= code_gen_max_blocks ||
1412#ifndef VBOX
1413 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1414#else
1415 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1416#endif
1417 return NULL;
1418 tb = &tbs[nb_tbs++];
1419 tb->pc = pc;
1420 tb->cflags = 0;
1421 return tb;
1422}
1423
1424void tb_free(TranslationBlock *tb)
1425{
1426 /* In practice this is mostly used for single use temporary TB
1427 Ignore the hard cases and just back up if this TB happens to
1428 be the last one generated. */
1429 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1430 code_gen_ptr = tb->tc_ptr;
1431 nb_tbs--;
1432 }
1433}
1434
1435/* add a new TB and link it to the physical page tables. phys_page2 is
1436 (-1) to indicate that only one page contains the TB. */
1437void tb_link_phys(TranslationBlock *tb,
1438 target_ulong phys_pc, target_ulong phys_page2)
1439{
1440 unsigned int h;
1441 TranslationBlock **ptb;
1442
1443 /* Grab the mmap lock to stop another thread invalidating this TB
1444 before we are done. */
1445 mmap_lock();
1446 /* add in the physical hash table */
1447 h = tb_phys_hash_func(phys_pc);
1448 ptb = &tb_phys_hash[h];
1449 tb->phys_hash_next = *ptb;
1450 *ptb = tb;
1451
1452 /* add in the page list */
1453 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1454 if (phys_page2 != -1)
1455 tb_alloc_page(tb, 1, phys_page2);
1456 else
1457 tb->page_addr[1] = -1;
1458
1459 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1460 tb->jmp_next[0] = NULL;
1461 tb->jmp_next[1] = NULL;
1462
1463 /* init original jump addresses */
1464 if (tb->tb_next_offset[0] != 0xffff)
1465 tb_reset_jump(tb, 0);
1466 if (tb->tb_next_offset[1] != 0xffff)
1467 tb_reset_jump(tb, 1);
1468
1469#ifdef DEBUG_TB_CHECK
1470 tb_page_check();
1471#endif
1472 mmap_unlock();
1473}
1474
1475/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1476 tb[1].tc_ptr. Return NULL if not found */
1477TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1478{
1479 int m_min, m_max, m;
1480 unsigned long v;
1481 TranslationBlock *tb;
1482
1483 if (nb_tbs <= 0)
1484 return NULL;
1485 if (tc_ptr < (unsigned long)code_gen_buffer ||
1486 tc_ptr >= (unsigned long)code_gen_ptr)
1487 return NULL;
1488 /* binary search (cf Knuth) */
1489 m_min = 0;
1490 m_max = nb_tbs - 1;
1491 while (m_min <= m_max) {
1492 m = (m_min + m_max) >> 1;
1493 tb = &tbs[m];
1494 v = (unsigned long)tb->tc_ptr;
1495 if (v == tc_ptr)
1496 return tb;
1497 else if (tc_ptr < v) {
1498 m_max = m - 1;
1499 } else {
1500 m_min = m + 1;
1501 }
1502 }
1503 return &tbs[m_max];
1504}
1505
1506static void tb_reset_jump_recursive(TranslationBlock *tb);
1507
1508#ifndef VBOX
1509static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1510#else
1511DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1512#endif
1513{
1514 TranslationBlock *tb1, *tb_next, **ptb;
1515 unsigned int n1;
1516
1517 tb1 = tb->jmp_next[n];
1518 if (tb1 != NULL) {
1519 /* find head of list */
1520 for(;;) {
1521 n1 = (long)tb1 & 3;
1522 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1523 if (n1 == 2)
1524 break;
1525 tb1 = tb1->jmp_next[n1];
1526 }
1527 /* we are now sure now that tb jumps to tb1 */
1528 tb_next = tb1;
1529
1530 /* remove tb from the jmp_first list */
1531 ptb = &tb_next->jmp_first;
1532 for(;;) {
1533 tb1 = *ptb;
1534 n1 = (long)tb1 & 3;
1535 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1536 if (n1 == n && tb1 == tb)
1537 break;
1538 ptb = &tb1->jmp_next[n1];
1539 }
1540 *ptb = tb->jmp_next[n];
1541 tb->jmp_next[n] = NULL;
1542
1543 /* suppress the jump to next tb in generated code */
1544 tb_reset_jump(tb, n);
1545
1546 /* suppress jumps in the tb on which we could have jumped */
1547 tb_reset_jump_recursive(tb_next);
1548 }
1549}
1550
1551static void tb_reset_jump_recursive(TranslationBlock *tb)
1552{
1553 tb_reset_jump_recursive2(tb, 0);
1554 tb_reset_jump_recursive2(tb, 1);
1555}
1556
1557#if defined(TARGET_HAS_ICE)
1558static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1559{
1560 target_ulong addr, pd;
1561 ram_addr_t ram_addr;
1562 PhysPageDesc *p;
1563
1564 addr = cpu_get_phys_page_debug(env, pc);
1565 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1566 if (!p) {
1567 pd = IO_MEM_UNASSIGNED;
1568 } else {
1569 pd = p->phys_offset;
1570 }
1571 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1572 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1573}
1574#endif
1575
1576/* Add a watchpoint. */
1577int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1578{
1579 int i;
1580
1581 for (i = 0; i < env->nb_watchpoints; i++) {
1582 if (addr == env->watchpoint[i].vaddr)
1583 return 0;
1584 }
1585 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1586 return -1;
1587
1588 i = env->nb_watchpoints++;
1589 env->watchpoint[i].vaddr = addr;
1590 env->watchpoint[i].type = type;
1591 tlb_flush_page(env, addr);
1592 /* FIXME: This flush is needed because of the hack to make memory ops
1593 terminate the TB. It can be removed once the proper IO trap and
1594 re-execute bits are in. */
1595 tb_flush(env);
1596 return i;
1597}
1598
1599/* Remove a watchpoint. */
1600int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1601{
1602 int i;
1603
1604 for (i = 0; i < env->nb_watchpoints; i++) {
1605 if (addr == env->watchpoint[i].vaddr) {
1606 env->nb_watchpoints--;
1607 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1608 tlb_flush_page(env, addr);
1609 return 0;
1610 }
1611 }
1612 return -1;
1613}
1614
1615/* Remove all watchpoints. */
1616void cpu_watchpoint_remove_all(CPUState *env) {
1617 int i;
1618
1619 for (i = 0; i < env->nb_watchpoints; i++) {
1620 tlb_flush_page(env, env->watchpoint[i].vaddr);
1621 }
1622 env->nb_watchpoints = 0;
1623}
1624
1625/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1626 breakpoint is reached */
1627int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1628{
1629#if defined(TARGET_HAS_ICE)
1630 int i;
1631
1632 for(i = 0; i < env->nb_breakpoints; i++) {
1633 if (env->breakpoints[i] == pc)
1634 return 0;
1635 }
1636
1637 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1638 return -1;
1639 env->breakpoints[env->nb_breakpoints++] = pc;
1640
1641 breakpoint_invalidate(env, pc);
1642 return 0;
1643#else
1644 return -1;
1645#endif
1646}
1647
1648/* remove all breakpoints */
1649void cpu_breakpoint_remove_all(CPUState *env) {
1650#if defined(TARGET_HAS_ICE)
1651 int i;
1652 for(i = 0; i < env->nb_breakpoints; i++) {
1653 breakpoint_invalidate(env, env->breakpoints[i]);
1654 }
1655 env->nb_breakpoints = 0;
1656#endif
1657}
1658
1659/* remove a breakpoint */
1660int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1661{
1662#if defined(TARGET_HAS_ICE)
1663 int i;
1664 for(i = 0; i < env->nb_breakpoints; i++) {
1665 if (env->breakpoints[i] == pc)
1666 goto found;
1667 }
1668 return -1;
1669 found:
1670 env->nb_breakpoints--;
1671 if (i < env->nb_breakpoints)
1672 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1673
1674 breakpoint_invalidate(env, pc);
1675 return 0;
1676#else
1677 return -1;
1678#endif
1679}
1680
1681/* enable or disable single step mode. EXCP_DEBUG is returned by the
1682 CPU loop after each instruction */
1683void cpu_single_step(CPUState *env, int enabled)
1684{
1685#if defined(TARGET_HAS_ICE)
1686 if (env->singlestep_enabled != enabled) {
1687 env->singlestep_enabled = enabled;
1688 /* must flush all the translated code to avoid inconsistancies */
1689 /* XXX: only flush what is necessary */
1690 tb_flush(env);
1691 }
1692#endif
1693}
1694
1695#ifndef VBOX
1696/* enable or disable low levels log */
1697void cpu_set_log(int log_flags)
1698{
1699 loglevel = log_flags;
1700 if (loglevel && !logfile) {
1701 logfile = fopen(logfilename, "w");
1702 if (!logfile) {
1703 perror(logfilename);
1704 _exit(1);
1705 }
1706#if !defined(CONFIG_SOFTMMU)
1707 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1708 {
1709 static uint8_t logfile_buf[4096];
1710 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1711 }
1712#else
1713 setvbuf(logfile, NULL, _IOLBF, 0);
1714#endif
1715 }
1716}
1717
1718void cpu_set_log_filename(const char *filename)
1719{
1720 logfilename = strdup(filename);
1721}
1722#endif /* !VBOX */
1723
1724/* mask must never be zero, except for A20 change call */
1725void cpu_interrupt(CPUState *env, int mask)
1726{
1727#if !defined(USE_NPTL)
1728 TranslationBlock *tb;
1729 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1730#endif
1731 int old_mask;
1732
1733 old_mask = env->interrupt_request;
1734#ifdef VBOX
1735 VM_ASSERT_EMT(env->pVM);
1736 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1737#else /* !VBOX */
1738 /* FIXME: This is probably not threadsafe. A different thread could
1739 be in the middle of a read-modify-write operation. */
1740 env->interrupt_request |= mask;
1741#endif /* !VBOX */
1742#if defined(USE_NPTL)
1743 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1744 problem and hope the cpu will stop of its own accord. For userspace
1745 emulation this often isn't actually as bad as it sounds. Often
1746 signals are used primarily to interrupt blocking syscalls. */
1747#else
1748 if (use_icount) {
1749 env->icount_decr.u16.high = 0xffff;
1750#ifndef CONFIG_USER_ONLY
1751 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1752 an async event happened and we need to process it. */
1753 if (!can_do_io(env)
1754 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1755 cpu_abort(env, "Raised interrupt while not in I/O function");
1756 }
1757#endif
1758 } else {
1759 tb = env->current_tb;
1760 /* if the cpu is currently executing code, we must unlink it and
1761 all the potentially executing TB */
1762 if (tb && !testandset(&interrupt_lock)) {
1763 env->current_tb = NULL;
1764 tb_reset_jump_recursive(tb);
1765 resetlock(&interrupt_lock);
1766 }
1767 }
1768#endif
1769}
1770
1771void cpu_reset_interrupt(CPUState *env, int mask)
1772{
1773#ifdef VBOX
1774 /*
1775 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1776 * for future changes!
1777 */
1778 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1779#else /* !VBOX */
1780 env->interrupt_request &= ~mask;
1781#endif /* !VBOX */
1782}
1783
1784#ifndef VBOX
1785CPULogItem cpu_log_items[] = {
1786 { CPU_LOG_TB_OUT_ASM, "out_asm",
1787 "show generated host assembly code for each compiled TB" },
1788 { CPU_LOG_TB_IN_ASM, "in_asm",
1789 "show target assembly code for each compiled TB" },
1790 { CPU_LOG_TB_OP, "op",
1791 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1792#ifdef TARGET_I386
1793 { CPU_LOG_TB_OP_OPT, "op_opt",
1794 "show micro ops after optimization for each compiled TB" },
1795#endif
1796 { CPU_LOG_INT, "int",
1797 "show interrupts/exceptions in short format" },
1798 { CPU_LOG_EXEC, "exec",
1799 "show trace before each executed TB (lots of logs)" },
1800 { CPU_LOG_TB_CPU, "cpu",
1801 "show CPU state before bloc translation" },
1802#ifdef TARGET_I386
1803 { CPU_LOG_PCALL, "pcall",
1804 "show protected mode far calls/returns/exceptions" },
1805#endif
1806#ifdef DEBUG_IOPORT
1807 { CPU_LOG_IOPORT, "ioport",
1808 "show all i/o ports accesses" },
1809#endif
1810 { 0, NULL, NULL },
1811};
1812
1813static int cmp1(const char *s1, int n, const char *s2)
1814{
1815 if (strlen(s2) != n)
1816 return 0;
1817 return memcmp(s1, s2, n) == 0;
1818}
1819
1820/* takes a comma separated list of log masks. Return 0 if error. */
1821int cpu_str_to_log_mask(const char *str)
1822{
1823 CPULogItem *item;
1824 int mask;
1825 const char *p, *p1;
1826
1827 p = str;
1828 mask = 0;
1829 for(;;) {
1830 p1 = strchr(p, ',');
1831 if (!p1)
1832 p1 = p + strlen(p);
1833 if(cmp1(p,p1-p,"all")) {
1834 for(item = cpu_log_items; item->mask != 0; item++) {
1835 mask |= item->mask;
1836 }
1837 } else {
1838 for(item = cpu_log_items; item->mask != 0; item++) {
1839 if (cmp1(p, p1 - p, item->name))
1840 goto found;
1841 }
1842 return 0;
1843 }
1844 found:
1845 mask |= item->mask;
1846 if (*p1 != ',')
1847 break;
1848 p = p1 + 1;
1849 }
1850 return mask;
1851}
1852#endif /* !VBOX */
1853
1854#ifndef VBOX /* VBOX: we have our own routine. */
1855void cpu_abort(CPUState *env, const char *fmt, ...)
1856{
1857 va_list ap;
1858
1859 va_start(ap, fmt);
1860 fprintf(stderr, "qemu: fatal: ");
1861 vfprintf(stderr, fmt, ap);
1862 fprintf(stderr, "\n");
1863#ifdef TARGET_I386
1864 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1865#else
1866 cpu_dump_state(env, stderr, fprintf, 0);
1867#endif
1868 va_end(ap);
1869 abort();
1870}
1871#endif /* !VBOX */
1872
1873#ifndef VBOX
1874CPUState *cpu_copy(CPUState *env)
1875{
1876 CPUState *new_env = cpu_init(env->cpu_model_str);
1877 /* preserve chaining and index */
1878 CPUState *next_cpu = new_env->next_cpu;
1879 int cpu_index = new_env->cpu_index;
1880 memcpy(new_env, env, sizeof(CPUState));
1881 new_env->next_cpu = next_cpu;
1882 new_env->cpu_index = cpu_index;
1883 return new_env;
1884}
1885#endif
1886
1887#if !defined(CONFIG_USER_ONLY)
1888
1889#ifndef VBOX
1890static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1891#else
1892DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1893#endif
1894{
1895 unsigned int i;
1896
1897 /* Discard jump cache entries for any tb which might potentially
1898 overlap the flushed page. */
1899 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1900 memset (&env->tb_jmp_cache[i], 0,
1901 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1902
1903 i = tb_jmp_cache_hash_page(addr);
1904 memset (&env->tb_jmp_cache[i], 0,
1905 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1906
1907#ifdef VBOX
1908 /* inform raw mode about TLB page flush */
1909 remR3FlushPage(env, addr);
1910#endif /* VBOX */
1911}
1912
1913/* NOTE: if flush_global is true, also flush global entries (not
1914 implemented yet) */
1915void tlb_flush(CPUState *env, int flush_global)
1916{
1917 int i;
1918#if defined(DEBUG_TLB)
1919 printf("tlb_flush:\n");
1920#endif
1921 /* must reset current TB so that interrupts cannot modify the
1922 links while we are modifying them */
1923 env->current_tb = NULL;
1924
1925 for(i = 0; i < CPU_TLB_SIZE; i++) {
1926 env->tlb_table[0][i].addr_read = -1;
1927 env->tlb_table[0][i].addr_write = -1;
1928 env->tlb_table[0][i].addr_code = -1;
1929 env->tlb_table[1][i].addr_read = -1;
1930 env->tlb_table[1][i].addr_write = -1;
1931 env->tlb_table[1][i].addr_code = -1;
1932#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1933 env->phys_addends[0][i] = -1;
1934 env->phys_addends[1][i] = -1;
1935#endif
1936#if (NB_MMU_MODES >= 3)
1937 env->tlb_table[2][i].addr_read = -1;
1938 env->tlb_table[2][i].addr_write = -1;
1939 env->tlb_table[2][i].addr_code = -1;
1940#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1941 env->phys_addends[2][i] = -1;
1942#endif
1943#if (NB_MMU_MODES == 4)
1944 env->tlb_table[3][i].addr_read = -1;
1945 env->tlb_table[3][i].addr_write = -1;
1946 env->tlb_table[3][i].addr_code = -1;
1947#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1948 env->phys_addends[3][i] = -1;
1949#endif
1950#endif
1951#endif
1952 }
1953
1954 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1955
1956#ifdef VBOX
1957 /* inform raw mode about TLB flush */
1958 remR3FlushTLB(env, flush_global);
1959#endif
1960#ifdef USE_KQEMU
1961 if (env->kqemu_enabled) {
1962 kqemu_flush(env, flush_global);
1963 }
1964#endif
1965 tlb_flush_count++;
1966}
1967
1968#ifndef VBOX
1969static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1970#else
1971DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1972#endif
1973{
1974 if (addr == (tlb_entry->addr_read &
1975 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1976 addr == (tlb_entry->addr_write &
1977 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1978 addr == (tlb_entry->addr_code &
1979 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1980 tlb_entry->addr_read = -1;
1981 tlb_entry->addr_write = -1;
1982 tlb_entry->addr_code = -1;
1983 }
1984}
1985
1986void tlb_flush_page(CPUState *env, target_ulong addr)
1987{
1988 int i;
1989
1990#if defined(DEBUG_TLB)
1991 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1992#endif
1993 /* must reset current TB so that interrupts cannot modify the
1994 links while we are modifying them */
1995 env->current_tb = NULL;
1996
1997 addr &= TARGET_PAGE_MASK;
1998 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1999 tlb_flush_entry(&env->tlb_table[0][i], addr);
2000 tlb_flush_entry(&env->tlb_table[1][i], addr);
2001#if (NB_MMU_MODES >= 3)
2002 tlb_flush_entry(&env->tlb_table[2][i], addr);
2003#if (NB_MMU_MODES == 4)
2004 tlb_flush_entry(&env->tlb_table[3][i], addr);
2005#endif
2006#endif
2007
2008 tlb_flush_jmp_cache(env, addr);
2009
2010#ifdef USE_KQEMU
2011 if (env->kqemu_enabled) {
2012 kqemu_flush_page(env, addr);
2013 }
2014#endif
2015}
2016
2017/* update the TLBs so that writes to code in the virtual page 'addr'
2018 can be detected */
2019static void tlb_protect_code(ram_addr_t ram_addr)
2020{
2021 cpu_physical_memory_reset_dirty(ram_addr,
2022 ram_addr + TARGET_PAGE_SIZE,
2023 CODE_DIRTY_FLAG);
2024#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2025 /** @todo Retest this? This function has changed... */
2026 remR3ProtectCode(cpu_single_env, ram_addr);
2027#endif
2028}
2029
2030/* update the TLB so that writes in physical page 'phys_addr' are no longer
2031 tested for self modifying code */
2032static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2033 target_ulong vaddr)
2034{
2035#ifdef VBOX
2036 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2037#endif
2038 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2039}
2040
2041#ifndef VBOX
2042static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2043 unsigned long start, unsigned long length)
2044#else
2045DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2046 unsigned long start, unsigned long length)
2047#endif
2048{
2049 unsigned long addr;
2050
2051#ifdef VBOX
2052 if (start & 3)
2053 return;
2054#endif
2055 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2056 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2057 if ((addr - start) < length) {
2058 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2059 }
2060 }
2061}
2062
2063void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2064 int dirty_flags)
2065{
2066 CPUState *env;
2067 unsigned long length, start1;
2068 int i, mask, len;
2069 uint8_t *p;
2070
2071 start &= TARGET_PAGE_MASK;
2072 end = TARGET_PAGE_ALIGN(end);
2073
2074 length = end - start;
2075 if (length == 0)
2076 return;
2077 len = length >> TARGET_PAGE_BITS;
2078#ifdef USE_KQEMU
2079 /* XXX: should not depend on cpu context */
2080 env = first_cpu;
2081 if (env->kqemu_enabled) {
2082 ram_addr_t addr;
2083 addr = start;
2084 for(i = 0; i < len; i++) {
2085 kqemu_set_notdirty(env, addr);
2086 addr += TARGET_PAGE_SIZE;
2087 }
2088 }
2089#endif
2090 mask = ~dirty_flags;
2091 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2092#ifdef VBOX
2093 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2094#endif
2095 for(i = 0; i < len; i++)
2096 p[i] &= mask;
2097
2098 /* we modify the TLB cache so that the dirty bit will be set again
2099 when accessing the range */
2100#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2101 start1 = start;
2102#elif !defined(VBOX)
2103 start1 = start + (unsigned long)phys_ram_base;
2104#else
2105 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo this can be harmful with VBOX_WITH_NEW_PHYS_CODE, fix interface/whatever. */
2106#endif
2107 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2108 for(i = 0; i < CPU_TLB_SIZE; i++)
2109 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2110 for(i = 0; i < CPU_TLB_SIZE; i++)
2111 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2112#if (NB_MMU_MODES >= 3)
2113 for(i = 0; i < CPU_TLB_SIZE; i++)
2114 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2115#if (NB_MMU_MODES == 4)
2116 for(i = 0; i < CPU_TLB_SIZE; i++)
2117 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2118#endif
2119#endif
2120 }
2121}
2122
2123#ifndef VBOX
2124int cpu_physical_memory_set_dirty_tracking(int enable)
2125{
2126 in_migration = enable;
2127 return 0;
2128}
2129
2130int cpu_physical_memory_get_dirty_tracking(void)
2131{
2132 return in_migration;
2133}
2134#endif
2135
2136#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2137DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2138#else
2139static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2140#endif
2141{
2142 ram_addr_t ram_addr;
2143
2144 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2145 /* RAM case */
2146#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2147 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2148#elif !defined(VBOX)
2149 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2150 tlb_entry->addend - (unsigned long)phys_ram_base;
2151#else
2152 Assert(phys_addend != -1);
2153 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2154#endif
2155 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2156 tlb_entry->addr_write |= TLB_NOTDIRTY;
2157 }
2158 }
2159}
2160
2161/* update the TLB according to the current state of the dirty bits */
2162void cpu_tlb_update_dirty(CPUState *env)
2163{
2164 int i;
2165#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2166 for(i = 0; i < CPU_TLB_SIZE; i++)
2167 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2168 for(i = 0; i < CPU_TLB_SIZE; i++)
2169 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2170#if (NB_MMU_MODES >= 3)
2171 for(i = 0; i < CPU_TLB_SIZE; i++)
2172 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2173#if (NB_MMU_MODES == 4)
2174 for(i = 0; i < CPU_TLB_SIZE; i++)
2175 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2176#endif
2177#endif
2178#else /* VBOX */
2179 for(i = 0; i < CPU_TLB_SIZE; i++)
2180 tlb_update_dirty(&env->tlb_table[0][i]);
2181 for(i = 0; i < CPU_TLB_SIZE; i++)
2182 tlb_update_dirty(&env->tlb_table[1][i]);
2183#if (NB_MMU_MODES >= 3)
2184 for(i = 0; i < CPU_TLB_SIZE; i++)
2185 tlb_update_dirty(&env->tlb_table[2][i]);
2186#if (NB_MMU_MODES == 4)
2187 for(i = 0; i < CPU_TLB_SIZE; i++)
2188 tlb_update_dirty(&env->tlb_table[3][i]);
2189#endif
2190#endif
2191#endif /* VBOX */
2192}
2193
2194#ifndef VBOX
2195static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2196#else
2197DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2198#endif
2199{
2200 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2201 tlb_entry->addr_write = vaddr;
2202}
2203
2204
2205/* update the TLB corresponding to virtual page vaddr and phys addr
2206 addr so that it is no longer dirty */
2207#ifndef VBOX
2208static inline void tlb_set_dirty(CPUState *env,
2209 unsigned long addr, target_ulong vaddr)
2210#else
2211DECLINLINE(void) tlb_set_dirty(CPUState *env,
2212 unsigned long addr, target_ulong vaddr)
2213#endif
2214{
2215 int i;
2216
2217 addr &= TARGET_PAGE_MASK;
2218 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2219 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2220 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2221#if (NB_MMU_MODES >= 3)
2222 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2223#if (NB_MMU_MODES == 4)
2224 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2225#endif
2226#endif
2227}
2228
2229/* add a new TLB entry. At most one entry for a given virtual address
2230 is permitted. Return 0 if OK or 2 if the page could not be mapped
2231 (can only happen in non SOFTMMU mode for I/O pages or pages
2232 conflicting with the host address space). */
2233int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2234 target_phys_addr_t paddr, int prot,
2235 int mmu_idx, int is_softmmu)
2236{
2237 PhysPageDesc *p;
2238 unsigned long pd;
2239 unsigned int index;
2240 target_ulong address;
2241 target_ulong code_address;
2242 target_phys_addr_t addend;
2243 int ret;
2244 CPUTLBEntry *te;
2245 int i;
2246 target_phys_addr_t iotlb;
2247#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2248 int read_mods = 0, write_mods = 0, code_mods = 0;
2249#endif
2250
2251 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2252 if (!p) {
2253 pd = IO_MEM_UNASSIGNED;
2254 } else {
2255 pd = p->phys_offset;
2256 }
2257#if defined(DEBUG_TLB)
2258 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2259 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2260#endif
2261
2262 ret = 0;
2263 address = vaddr;
2264 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2265 /* IO memory case (romd handled later) */
2266 address |= TLB_MMIO;
2267 }
2268#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2269 addend = pd & TARGET_PAGE_MASK;
2270#elif !defined(VBOX)
2271 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2272#else
2273 /** @todo this is racing the phys_page_find call above since it may register
2274 * a new chunk of memory... */
2275 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2276 pd & TARGET_PAGE_MASK,
2277 !!(prot & PAGE_WRITE));
2278#endif
2279
2280 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2281 /* Normal RAM. */
2282 iotlb = pd & TARGET_PAGE_MASK;
2283 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2284 iotlb |= IO_MEM_NOTDIRTY;
2285 else
2286 iotlb |= IO_MEM_ROM;
2287 } else {
2288 /* IO handlers are currently passed a phsical address.
2289 It would be nice to pass an offset from the base address
2290 of that region. This would avoid having to special case RAM,
2291 and avoid full address decoding in every device.
2292 We can't use the high bits of pd for this because
2293 IO_MEM_ROMD uses these as a ram address. */
2294 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2295 }
2296
2297 code_address = address;
2298
2299#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2300 if (addend & 0x3)
2301 {
2302 if (addend & 0x2)
2303 {
2304 /* catch write */
2305 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2306 write_mods |= TLB_MMIO;
2307 }
2308 else if (addend & 0x1)
2309 {
2310 /* catch all */
2311 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2312 {
2313 read_mods |= TLB_MMIO;
2314 write_mods |= TLB_MMIO;
2315 code_mods |= TLB_MMIO;
2316 }
2317 }
2318 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2319 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2320 addend &= ~(target_ulong)0x3;
2321 }
2322#endif
2323
2324 /* Make accesses to pages with watchpoints go via the
2325 watchpoint trap routines. */
2326 for (i = 0; i < env->nb_watchpoints; i++) {
2327 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2328 iotlb = io_mem_watch + paddr;
2329 /* TODO: The memory case can be optimized by not trapping
2330 reads of pages with a write breakpoint. */
2331 address |= TLB_MMIO;
2332 }
2333 }
2334
2335 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2336 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2337 te = &env->tlb_table[mmu_idx][index];
2338 te->addend = addend - vaddr;
2339 if (prot & PAGE_READ) {
2340 te->addr_read = address;
2341 } else {
2342 te->addr_read = -1;
2343 }
2344
2345 if (prot & PAGE_EXEC) {
2346 te->addr_code = code_address;
2347 } else {
2348 te->addr_code = -1;
2349 }
2350 if (prot & PAGE_WRITE) {
2351 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2352 (pd & IO_MEM_ROMD)) {
2353 /* Write access calls the I/O callback. */
2354 te->addr_write = address | TLB_MMIO;
2355 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2356 !cpu_physical_memory_is_dirty(pd)) {
2357 te->addr_write = address | TLB_NOTDIRTY;
2358 } else {
2359 te->addr_write = address;
2360 }
2361 } else {
2362 te->addr_write = -1;
2363 }
2364
2365#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2366 if (prot & PAGE_READ)
2367 te->addr_read |= read_mods;
2368 if (prot & PAGE_EXEC)
2369 te->addr_code |= code_mods;
2370 if (prot & PAGE_WRITE)
2371 te->addr_write |= write_mods;
2372
2373 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2374#endif
2375
2376#ifdef VBOX
2377 /* inform raw mode about TLB page change */
2378 remR3FlushPage(env, vaddr);
2379#endif
2380 return ret;
2381}
2382#if 0
2383/* called from signal handler: invalidate the code and unprotect the
2384 page. Return TRUE if the fault was succesfully handled. */
2385int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2386{
2387#if !defined(CONFIG_SOFTMMU)
2388 VirtPageDesc *vp;
2389
2390#if defined(DEBUG_TLB)
2391 printf("page_unprotect: addr=0x%08x\n", addr);
2392#endif
2393 addr &= TARGET_PAGE_MASK;
2394
2395 /* if it is not mapped, no need to worry here */
2396 if (addr >= MMAP_AREA_END)
2397 return 0;
2398 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2399 if (!vp)
2400 return 0;
2401 /* NOTE: in this case, validate_tag is _not_ tested as it
2402 validates only the code TLB */
2403 if (vp->valid_tag != virt_valid_tag)
2404 return 0;
2405 if (!(vp->prot & PAGE_WRITE))
2406 return 0;
2407#if defined(DEBUG_TLB)
2408 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2409 addr, vp->phys_addr, vp->prot);
2410#endif
2411 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2412 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2413 (unsigned long)addr, vp->prot);
2414 /* set the dirty bit */
2415 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2416 /* flush the code inside */
2417 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2418 return 1;
2419#elif defined(VBOX)
2420 addr &= TARGET_PAGE_MASK;
2421
2422 /* if it is not mapped, no need to worry here */
2423 if (addr >= MMAP_AREA_END)
2424 return 0;
2425 return 1;
2426#else
2427 return 0;
2428#endif
2429}
2430#endif /* 0 */
2431
2432#else
2433
2434void tlb_flush(CPUState *env, int flush_global)
2435{
2436}
2437
2438void tlb_flush_page(CPUState *env, target_ulong addr)
2439{
2440}
2441
2442int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2443 target_phys_addr_t paddr, int prot,
2444 int mmu_idx, int is_softmmu)
2445{
2446 return 0;
2447}
2448
2449#ifndef VBOX
2450/* dump memory mappings */
2451void page_dump(FILE *f)
2452{
2453 unsigned long start, end;
2454 int i, j, prot, prot1;
2455 PageDesc *p;
2456
2457 fprintf(f, "%-8s %-8s %-8s %s\n",
2458 "start", "end", "size", "prot");
2459 start = -1;
2460 end = -1;
2461 prot = 0;
2462 for(i = 0; i <= L1_SIZE; i++) {
2463 if (i < L1_SIZE)
2464 p = l1_map[i];
2465 else
2466 p = NULL;
2467 for(j = 0;j < L2_SIZE; j++) {
2468 if (!p)
2469 prot1 = 0;
2470 else
2471 prot1 = p[j].flags;
2472 if (prot1 != prot) {
2473 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2474 if (start != -1) {
2475 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2476 start, end, end - start,
2477 prot & PAGE_READ ? 'r' : '-',
2478 prot & PAGE_WRITE ? 'w' : '-',
2479 prot & PAGE_EXEC ? 'x' : '-');
2480 }
2481 if (prot1 != 0)
2482 start = end;
2483 else
2484 start = -1;
2485 prot = prot1;
2486 }
2487 if (!p)
2488 break;
2489 }
2490 }
2491}
2492#endif /* !VBOX */
2493
2494int page_get_flags(target_ulong address)
2495{
2496 PageDesc *p;
2497
2498 p = page_find(address >> TARGET_PAGE_BITS);
2499 if (!p)
2500 return 0;
2501 return p->flags;
2502}
2503
2504/* modify the flags of a page and invalidate the code if
2505 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2506 depending on PAGE_WRITE */
2507void page_set_flags(target_ulong start, target_ulong end, int flags)
2508{
2509 PageDesc *p;
2510 target_ulong addr;
2511
2512 start = start & TARGET_PAGE_MASK;
2513 end = TARGET_PAGE_ALIGN(end);
2514 if (flags & PAGE_WRITE)
2515 flags |= PAGE_WRITE_ORG;
2516#ifdef VBOX
2517 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2518#endif
2519 spin_lock(&tb_lock);
2520 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2521 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2522 /* if the write protection is set, then we invalidate the code
2523 inside */
2524 if (!(p->flags & PAGE_WRITE) &&
2525 (flags & PAGE_WRITE) &&
2526 p->first_tb) {
2527 tb_invalidate_phys_page(addr, 0, NULL);
2528 }
2529 p->flags = flags;
2530 }
2531 spin_unlock(&tb_lock);
2532}
2533
2534int page_check_range(target_ulong start, target_ulong len, int flags)
2535{
2536 PageDesc *p;
2537 target_ulong end;
2538 target_ulong addr;
2539
2540 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2541 start = start & TARGET_PAGE_MASK;
2542
2543 if( end < start )
2544 /* we've wrapped around */
2545 return -1;
2546 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2547 p = page_find(addr >> TARGET_PAGE_BITS);
2548 if( !p )
2549 return -1;
2550 if( !(p->flags & PAGE_VALID) )
2551 return -1;
2552
2553 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2554 return -1;
2555 if (flags & PAGE_WRITE) {
2556 if (!(p->flags & PAGE_WRITE_ORG))
2557 return -1;
2558 /* unprotect the page if it was put read-only because it
2559 contains translated code */
2560 if (!(p->flags & PAGE_WRITE)) {
2561 if (!page_unprotect(addr, 0, NULL))
2562 return -1;
2563 }
2564 return 0;
2565 }
2566 }
2567 return 0;
2568}
2569
2570/* called from signal handler: invalidate the code and unprotect the
2571 page. Return TRUE if the fault was succesfully handled. */
2572int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2573{
2574 unsigned int page_index, prot, pindex;
2575 PageDesc *p, *p1;
2576 target_ulong host_start, host_end, addr;
2577
2578 /* Technically this isn't safe inside a signal handler. However we
2579 know this only ever happens in a synchronous SEGV handler, so in
2580 practice it seems to be ok. */
2581 mmap_lock();
2582
2583 host_start = address & qemu_host_page_mask;
2584 page_index = host_start >> TARGET_PAGE_BITS;
2585 p1 = page_find(page_index);
2586 if (!p1) {
2587 mmap_unlock();
2588 return 0;
2589 }
2590 host_end = host_start + qemu_host_page_size;
2591 p = p1;
2592 prot = 0;
2593 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2594 prot |= p->flags;
2595 p++;
2596 }
2597 /* if the page was really writable, then we change its
2598 protection back to writable */
2599 if (prot & PAGE_WRITE_ORG) {
2600 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2601 if (!(p1[pindex].flags & PAGE_WRITE)) {
2602 mprotect((void *)g2h(host_start), qemu_host_page_size,
2603 (prot & PAGE_BITS) | PAGE_WRITE);
2604 p1[pindex].flags |= PAGE_WRITE;
2605 /* and since the content will be modified, we must invalidate
2606 the corresponding translated code. */
2607 tb_invalidate_phys_page(address, pc, puc);
2608#ifdef DEBUG_TB_CHECK
2609 tb_invalidate_check(address);
2610#endif
2611 mmap_unlock();
2612 return 1;
2613 }
2614 }
2615 mmap_unlock();
2616 return 0;
2617}
2618
2619static inline void tlb_set_dirty(CPUState *env,
2620 unsigned long addr, target_ulong vaddr)
2621{
2622}
2623#endif /* defined(CONFIG_USER_ONLY) */
2624
2625#if !defined(CONFIG_USER_ONLY)
2626static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2627 ram_addr_t memory);
2628static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2629 ram_addr_t orig_memory);
2630#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2631 need_subpage) \
2632 do { \
2633 if (addr > start_addr) \
2634 start_addr2 = 0; \
2635 else { \
2636 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2637 if (start_addr2 > 0) \
2638 need_subpage = 1; \
2639 } \
2640 \
2641 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2642 end_addr2 = TARGET_PAGE_SIZE - 1; \
2643 else { \
2644 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2645 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2646 need_subpage = 1; \
2647 } \
2648 } while (0)
2649
2650
2651/* register physical memory. 'size' must be a multiple of the target
2652 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2653 io memory page */
2654void cpu_register_physical_memory(target_phys_addr_t start_addr,
2655 unsigned long size,
2656 unsigned long phys_offset)
2657{
2658 target_phys_addr_t addr, end_addr;
2659 PhysPageDesc *p;
2660 CPUState *env;
2661 ram_addr_t orig_size = size;
2662 void *subpage;
2663
2664#ifdef USE_KQEMU
2665 /* XXX: should not depend on cpu context */
2666 env = first_cpu;
2667 if (env->kqemu_enabled) {
2668 kqemu_set_phys_mem(start_addr, size, phys_offset);
2669 }
2670#endif
2671 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2672 end_addr = start_addr + (target_phys_addr_t)size;
2673 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2674 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2675 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2676 ram_addr_t orig_memory = p->phys_offset;
2677 target_phys_addr_t start_addr2, end_addr2;
2678 int need_subpage = 0;
2679
2680 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2681 need_subpage);
2682 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2683 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2684 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2685 &p->phys_offset, orig_memory);
2686 } else {
2687 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2688 >> IO_MEM_SHIFT];
2689 }
2690 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2691 } else {
2692 p->phys_offset = phys_offset;
2693#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2694 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2695 (phys_offset & IO_MEM_ROMD))
2696#else
2697 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2698 || (phys_offset & IO_MEM_ROMD)
2699 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2700#endif
2701 phys_offset += TARGET_PAGE_SIZE;
2702 }
2703 } else {
2704 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2705 p->phys_offset = phys_offset;
2706#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2707 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2708 (phys_offset & IO_MEM_ROMD))
2709#else
2710 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2711 || (phys_offset & IO_MEM_ROMD)
2712 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2713#endif
2714 phys_offset += TARGET_PAGE_SIZE;
2715 else {
2716 target_phys_addr_t start_addr2, end_addr2;
2717 int need_subpage = 0;
2718
2719 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2720 end_addr2, need_subpage);
2721
2722 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2723 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2724 &p->phys_offset, IO_MEM_UNASSIGNED);
2725 subpage_register(subpage, start_addr2, end_addr2,
2726 phys_offset);
2727 }
2728 }
2729 }
2730 }
2731 /* since each CPU stores ram addresses in its TLB cache, we must
2732 reset the modified entries */
2733 /* XXX: slow ! */
2734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2735 tlb_flush(env, 1);
2736 }
2737}
2738
2739/* XXX: temporary until new memory mapping API */
2740uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2741{
2742 PhysPageDesc *p;
2743
2744 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2745 if (!p)
2746 return IO_MEM_UNASSIGNED;
2747 return p->phys_offset;
2748}
2749
2750#ifndef VBOX
2751/* XXX: better than nothing */
2752ram_addr_t qemu_ram_alloc(ram_addr_t size)
2753{
2754 ram_addr_t addr;
2755 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2756 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2757 (uint64_t)size, (uint64_t)phys_ram_size);
2758 abort();
2759 }
2760 addr = phys_ram_alloc_offset;
2761 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2762 return addr;
2763}
2764
2765void qemu_ram_free(ram_addr_t addr)
2766{
2767}
2768#endif
2769
2770
2771static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2772{
2773#ifdef DEBUG_UNASSIGNED
2774 printf("Unassigned mem read 0x%08x\n", (int)addr);
2775#endif
2776#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2777 do_unassigned_access(addr, 0, 0, 0, 1);
2778#endif
2779 return 0;
2780}
2781
2782static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2783{
2784#ifdef DEBUG_UNASSIGNED
2785 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2786#endif
2787#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2788 do_unassigned_access(addr, 0, 0, 0, 2);
2789#endif
2790 return 0;
2791}
2792
2793static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2794{
2795#ifdef DEBUG_UNASSIGNED
2796 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2797#endif
2798#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2799 do_unassigned_access(addr, 0, 0, 0, 4);
2800#endif
2801 return 0;
2802}
2803
2804static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2805{
2806#ifdef DEBUG_UNASSIGNED
2807 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2808#endif
2809}
2810
2811static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2812{
2813#ifdef DEBUG_UNASSIGNED
2814 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2815#endif
2816#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2817 do_unassigned_access(addr, 1, 0, 0, 2);
2818#endif
2819}
2820
2821static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2822{
2823#ifdef DEBUG_UNASSIGNED
2824 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2825#endif
2826#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2827 do_unassigned_access(addr, 1, 0, 0, 4);
2828#endif
2829}
2830static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2831 unassigned_mem_readb,
2832 unassigned_mem_readw,
2833 unassigned_mem_readl,
2834};
2835
2836static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2837 unassigned_mem_writeb,
2838 unassigned_mem_writew,
2839 unassigned_mem_writel,
2840};
2841
2842static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2843{
2844 unsigned long ram_addr;
2845 int dirty_flags;
2846#if defined(VBOX)
2847 ram_addr = addr;
2848#elif
2849 ram_addr = addr - (unsigned long)phys_ram_base;
2850#endif
2851#ifdef VBOX
2852 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2853 dirty_flags = 0xff;
2854 else
2855#endif /* VBOX */
2856 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2857 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2858#if !defined(CONFIG_USER_ONLY)
2859 tb_invalidate_phys_page_fast(ram_addr, 1);
2860# ifdef VBOX
2861 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2862 dirty_flags = 0xff;
2863 else
2864# endif /* VBOX */
2865 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2866#endif
2867 }
2868#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2869 remR3PhysWriteU8(addr, val);
2870#else
2871 stb_p((uint8_t *)(long)addr, val);
2872#endif
2873#ifdef USE_KQEMU
2874 if (cpu_single_env->kqemu_enabled &&
2875 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2876 kqemu_modify_page(cpu_single_env, ram_addr);
2877#endif
2878 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2879#ifdef VBOX
2880 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2881#endif /* !VBOX */
2882 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2883 /* we remove the notdirty callback only if the code has been
2884 flushed */
2885 if (dirty_flags == 0xff)
2886 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2887}
2888
2889static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2890{
2891 unsigned long ram_addr;
2892 int dirty_flags;
2893#if defined(VBOX)
2894 ram_addr = addr;
2895#else
2896 ram_addr = addr - (unsigned long)phys_ram_base;
2897#endif
2898#ifdef VBOX
2899 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2900 dirty_flags = 0xff;
2901 else
2902#endif /* VBOX */
2903 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2904 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2905#if !defined(CONFIG_USER_ONLY)
2906 tb_invalidate_phys_page_fast(ram_addr, 2);
2907# ifdef VBOX
2908 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2909 dirty_flags = 0xff;
2910 else
2911# endif /* VBOX */
2912 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2913#endif
2914 }
2915#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2916 remR3PhysWriteU16(addr, val);
2917#else
2918 stw_p((uint8_t *)(long)addr, val);
2919#endif
2920
2921#ifdef USE_KQEMU
2922 if (cpu_single_env->kqemu_enabled &&
2923 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2924 kqemu_modify_page(cpu_single_env, ram_addr);
2925#endif
2926 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2927#ifdef VBOX
2928 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2929#endif
2930 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2931 /* we remove the notdirty callback only if the code has been
2932 flushed */
2933 if (dirty_flags == 0xff)
2934 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2935}
2936
2937static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2938{
2939 unsigned long ram_addr;
2940 int dirty_flags;
2941#if defined(VBOX)
2942 ram_addr = addr;
2943#else
2944 ram_addr = addr - (unsigned long)phys_ram_base;
2945#endif
2946#ifdef VBOX
2947 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2948 dirty_flags = 0xff;
2949 else
2950#endif /* VBOX */
2951 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2952 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2953#if !defined(CONFIG_USER_ONLY)
2954 tb_invalidate_phys_page_fast(ram_addr, 4);
2955# ifdef VBOX
2956 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2957 dirty_flags = 0xff;
2958 else
2959# endif /* VBOX */
2960 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2961#endif
2962 }
2963#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2964 remR3PhysWriteU32(addr, val);
2965#else
2966 stl_p((uint8_t *)(long)addr, val);
2967#endif
2968#ifdef USE_KQEMU
2969 if (cpu_single_env->kqemu_enabled &&
2970 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2971 kqemu_modify_page(cpu_single_env, ram_addr);
2972#endif
2973 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2974#ifdef VBOX
2975 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2976#endif
2977 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2978 /* we remove the notdirty callback only if the code has been
2979 flushed */
2980 if (dirty_flags == 0xff)
2981 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2982}
2983
2984static CPUReadMemoryFunc *error_mem_read[3] = {
2985 NULL, /* never used */
2986 NULL, /* never used */
2987 NULL, /* never used */
2988};
2989
2990static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2991 notdirty_mem_writeb,
2992 notdirty_mem_writew,
2993 notdirty_mem_writel,
2994};
2995
2996
2997/* Generate a debug exception if a watchpoint has been hit. */
2998static void check_watchpoint(int offset, int flags)
2999{
3000 CPUState *env = cpu_single_env;
3001 target_ulong vaddr;
3002 int i;
3003
3004 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3005 for (i = 0; i < env->nb_watchpoints; i++) {
3006 if (vaddr == env->watchpoint[i].vaddr
3007 && (env->watchpoint[i].type & flags)) {
3008 env->watchpoint_hit = i + 1;
3009 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3010 break;
3011 }
3012 }
3013}
3014
3015/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3016 so these check for a hit then pass through to the normal out-of-line
3017 phys routines. */
3018static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3019{
3020 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3021 return ldub_phys(addr);
3022}
3023
3024static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3025{
3026 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3027 return lduw_phys(addr);
3028}
3029
3030static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3031{
3032 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3033 return ldl_phys(addr);
3034}
3035
3036static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3037 uint32_t val)
3038{
3039 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3040 stb_phys(addr, val);
3041}
3042
3043static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3044 uint32_t val)
3045{
3046 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3047 stw_phys(addr, val);
3048}
3049
3050static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3051 uint32_t val)
3052{
3053 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3054 stl_phys(addr, val);
3055}
3056
3057static CPUReadMemoryFunc *watch_mem_read[3] = {
3058 watch_mem_readb,
3059 watch_mem_readw,
3060 watch_mem_readl,
3061};
3062
3063static CPUWriteMemoryFunc *watch_mem_write[3] = {
3064 watch_mem_writeb,
3065 watch_mem_writew,
3066 watch_mem_writel,
3067};
3068
3069static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3070 unsigned int len)
3071{
3072 uint32_t ret;
3073 unsigned int idx;
3074
3075 idx = SUBPAGE_IDX(addr - mmio->base);
3076#if defined(DEBUG_SUBPAGE)
3077 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3078 mmio, len, addr, idx);
3079#endif
3080 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3081
3082 return ret;
3083}
3084
3085static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3086 uint32_t value, unsigned int len)
3087{
3088 unsigned int idx;
3089
3090 idx = SUBPAGE_IDX(addr - mmio->base);
3091#if defined(DEBUG_SUBPAGE)
3092 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3093 mmio, len, addr, idx, value);
3094#endif
3095 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3096}
3097
3098static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3099{
3100#if defined(DEBUG_SUBPAGE)
3101 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3102#endif
3103
3104 return subpage_readlen(opaque, addr, 0);
3105}
3106
3107static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3108 uint32_t value)
3109{
3110#if defined(DEBUG_SUBPAGE)
3111 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3112#endif
3113 subpage_writelen(opaque, addr, value, 0);
3114}
3115
3116static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3117{
3118#if defined(DEBUG_SUBPAGE)
3119 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3120#endif
3121
3122 return subpage_readlen(opaque, addr, 1);
3123}
3124
3125static void subpage_writew (void *opaque, target_phys_addr_t addr,
3126 uint32_t value)
3127{
3128#if defined(DEBUG_SUBPAGE)
3129 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3130#endif
3131 subpage_writelen(opaque, addr, value, 1);
3132}
3133
3134static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3135{
3136#if defined(DEBUG_SUBPAGE)
3137 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3138#endif
3139
3140 return subpage_readlen(opaque, addr, 2);
3141}
3142
3143static void subpage_writel (void *opaque,
3144 target_phys_addr_t addr, uint32_t value)
3145{
3146#if defined(DEBUG_SUBPAGE)
3147 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3148#endif
3149 subpage_writelen(opaque, addr, value, 2);
3150}
3151
3152static CPUReadMemoryFunc *subpage_read[] = {
3153 &subpage_readb,
3154 &subpage_readw,
3155 &subpage_readl,
3156};
3157
3158static CPUWriteMemoryFunc *subpage_write[] = {
3159 &subpage_writeb,
3160 &subpage_writew,
3161 &subpage_writel,
3162};
3163
3164static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3165 ram_addr_t memory)
3166{
3167 int idx, eidx;
3168 unsigned int i;
3169
3170 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3171 return -1;
3172 idx = SUBPAGE_IDX(start);
3173 eidx = SUBPAGE_IDX(end);
3174#if defined(DEBUG_SUBPAGE)
3175 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3176 mmio, start, end, idx, eidx, memory);
3177#endif
3178 memory >>= IO_MEM_SHIFT;
3179 for (; idx <= eidx; idx++) {
3180 for (i = 0; i < 4; i++) {
3181 if (io_mem_read[memory][i]) {
3182 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3183 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3184 }
3185 if (io_mem_write[memory][i]) {
3186 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3187 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3188 }
3189 }
3190 }
3191
3192 return 0;
3193}
3194
3195static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3196 ram_addr_t orig_memory)
3197{
3198 subpage_t *mmio;
3199 int subpage_memory;
3200
3201 mmio = qemu_mallocz(sizeof(subpage_t));
3202 if (mmio != NULL) {
3203 mmio->base = base;
3204 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3205#if defined(DEBUG_SUBPAGE)
3206 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3207 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3208#endif
3209 *phys = subpage_memory | IO_MEM_SUBPAGE;
3210 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3211 }
3212
3213 return mmio;
3214}
3215
3216static void io_mem_init(void)
3217{
3218 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3219 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3220 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3221#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
3222 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3223 io_mem_nb = 6;
3224#else
3225 io_mem_nb = 5;
3226#endif
3227
3228 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3229 watch_mem_write, NULL);
3230
3231#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3232 /* alloc dirty bits array */
3233 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3234 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3235#endif /* !VBOX */
3236}
3237
3238/* mem_read and mem_write are arrays of functions containing the
3239 function to access byte (index 0), word (index 1) and dword (index
3240 2). Functions can be omitted with a NULL function pointer. The
3241 registered functions may be modified dynamically later.
3242 If io_index is non zero, the corresponding io zone is
3243 modified. If it is zero, a new io zone is allocated. The return
3244 value can be used with cpu_register_physical_memory(). (-1) is
3245 returned if error. */
3246int cpu_register_io_memory(int io_index,
3247 CPUReadMemoryFunc **mem_read,
3248 CPUWriteMemoryFunc **mem_write,
3249 void *opaque)
3250{
3251 int i, subwidth = 0;
3252
3253 if (io_index <= 0) {
3254 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3255 return -1;
3256 io_index = io_mem_nb++;
3257 } else {
3258 if (io_index >= IO_MEM_NB_ENTRIES)
3259 return -1;
3260 }
3261
3262 for(i = 0;i < 3; i++) {
3263 if (!mem_read[i] || !mem_write[i])
3264 subwidth = IO_MEM_SUBWIDTH;
3265 io_mem_read[io_index][i] = mem_read[i];
3266 io_mem_write[io_index][i] = mem_write[i];
3267 }
3268 io_mem_opaque[io_index] = opaque;
3269 return (io_index << IO_MEM_SHIFT) | subwidth;
3270}
3271
3272CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3273{
3274 return io_mem_write[io_index >> IO_MEM_SHIFT];
3275}
3276
3277CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3278{
3279 return io_mem_read[io_index >> IO_MEM_SHIFT];
3280}
3281#endif /* !defined(CONFIG_USER_ONLY) */
3282
3283/* physical memory access (slow version, mainly for debug) */
3284#if defined(CONFIG_USER_ONLY)
3285void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3286 int len, int is_write)
3287{
3288 int l, flags;
3289 target_ulong page;
3290 void * p;
3291
3292 while (len > 0) {
3293 page = addr & TARGET_PAGE_MASK;
3294 l = (page + TARGET_PAGE_SIZE) - addr;
3295 if (l > len)
3296 l = len;
3297 flags = page_get_flags(page);
3298 if (!(flags & PAGE_VALID))
3299 return;
3300 if (is_write) {
3301 if (!(flags & PAGE_WRITE))
3302 return;
3303 /* XXX: this code should not depend on lock_user */
3304 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3305 /* FIXME - should this return an error rather than just fail? */
3306 return;
3307 memcpy(p, buf, len);
3308 unlock_user(p, addr, len);
3309 } else {
3310 if (!(flags & PAGE_READ))
3311 return;
3312 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3313 /* FIXME - should this return an error rather than just fail? */
3314 return;
3315 memcpy(buf, p, len);
3316 unlock_user(p, addr, 0);
3317 }
3318 len -= l;
3319 buf += l;
3320 addr += l;
3321 }
3322}
3323
3324#else
3325void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3326 int len, int is_write)
3327{
3328 int l, io_index;
3329 uint8_t *ptr;
3330 uint32_t val;
3331 target_phys_addr_t page;
3332 unsigned long pd;
3333 PhysPageDesc *p;
3334
3335 while (len > 0) {
3336 page = addr & TARGET_PAGE_MASK;
3337 l = (page + TARGET_PAGE_SIZE) - addr;
3338 if (l > len)
3339 l = len;
3340 p = phys_page_find(page >> TARGET_PAGE_BITS);
3341 if (!p) {
3342 pd = IO_MEM_UNASSIGNED;
3343 } else {
3344 pd = p->phys_offset;
3345 }
3346
3347 if (is_write) {
3348 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3349 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3350 /* XXX: could force cpu_single_env to NULL to avoid
3351 potential bugs */
3352 if (l >= 4 && ((addr & 3) == 0)) {
3353 /* 32 bit write access */
3354#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3355 val = ldl_p(buf);
3356#else
3357 val = *(const uint32_t *)buf;
3358#endif
3359 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3360 l = 4;
3361 } else if (l >= 2 && ((addr & 1) == 0)) {
3362 /* 16 bit write access */
3363#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3364 val = lduw_p(buf);
3365#else
3366 val = *(const uint16_t *)buf;
3367#endif
3368 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3369 l = 2;
3370 } else {
3371 /* 8 bit write access */
3372#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3373 val = ldub_p(buf);
3374#else
3375 val = *(const uint8_t *)buf;
3376#endif
3377 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3378 l = 1;
3379 }
3380 } else {
3381 unsigned long addr1;
3382 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3383 /* RAM case */
3384#ifdef VBOX
3385 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3386#else
3387 ptr = phys_ram_base + addr1;
3388 memcpy(ptr, buf, l);
3389#endif
3390 if (!cpu_physical_memory_is_dirty(addr1)) {
3391 /* invalidate code */
3392 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3393 /* set dirty bit */
3394#ifdef VBOX
3395 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3396#endif
3397 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3398 (0xff & ~CODE_DIRTY_FLAG);
3399 }
3400 }
3401 } else {
3402 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3403 !(pd & IO_MEM_ROMD)) {
3404 /* I/O case */
3405 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3406 if (l >= 4 && ((addr & 3) == 0)) {
3407 /* 32 bit read access */
3408 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3409#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3410 stl_p(buf, val);
3411#else
3412 *(uint32_t *)buf = val;
3413#endif
3414 l = 4;
3415 } else if (l >= 2 && ((addr & 1) == 0)) {
3416 /* 16 bit read access */
3417 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3418#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3419 stw_p(buf, val);
3420#else
3421 *(uint16_t *)buf = val;
3422#endif
3423 l = 2;
3424 } else {
3425 /* 8 bit read access */
3426 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3427#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3428 stb_p(buf, val);
3429#else
3430 *(uint8_t *)buf = val;
3431#endif
3432 l = 1;
3433 }
3434 } else {
3435 /* RAM case */
3436#ifdef VBOX
3437 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3438#else
3439 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3440 (addr & ~TARGET_PAGE_MASK);
3441 memcpy(buf, ptr, l);
3442#endif
3443 }
3444 }
3445 len -= l;
3446 buf += l;
3447 addr += l;
3448 }
3449}
3450
3451#ifndef VBOX
3452/* used for ROM loading : can write in RAM and ROM */
3453void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3454 const uint8_t *buf, int len)
3455{
3456 int l;
3457 uint8_t *ptr;
3458 target_phys_addr_t page;
3459 unsigned long pd;
3460 PhysPageDesc *p;
3461
3462 while (len > 0) {
3463 page = addr & TARGET_PAGE_MASK;
3464 l = (page + TARGET_PAGE_SIZE) - addr;
3465 if (l > len)
3466 l = len;
3467 p = phys_page_find(page >> TARGET_PAGE_BITS);
3468 if (!p) {
3469 pd = IO_MEM_UNASSIGNED;
3470 } else {
3471 pd = p->phys_offset;
3472 }
3473
3474 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3475 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3476 !(pd & IO_MEM_ROMD)) {
3477 /* do nothing */
3478 } else {
3479 unsigned long addr1;
3480 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3481 /* ROM/RAM case */
3482 ptr = phys_ram_base + addr1;
3483 memcpy(ptr, buf, l);
3484 }
3485 len -= l;
3486 buf += l;
3487 addr += l;
3488 }
3489}
3490#endif /* !VBOX */
3491
3492
3493/* warning: addr must be aligned */
3494uint32_t ldl_phys(target_phys_addr_t addr)
3495{
3496 int io_index;
3497 uint8_t *ptr;
3498 uint32_t val;
3499 unsigned long pd;
3500 PhysPageDesc *p;
3501
3502 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3503 if (!p) {
3504 pd = IO_MEM_UNASSIGNED;
3505 } else {
3506 pd = p->phys_offset;
3507 }
3508
3509 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3510 !(pd & IO_MEM_ROMD)) {
3511 /* I/O case */
3512 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3513 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3514 } else {
3515 /* RAM case */
3516#ifndef VBOX
3517 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3518 (addr & ~TARGET_PAGE_MASK);
3519 val = ldl_p(ptr);
3520#else
3521 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3522#endif
3523 }
3524 return val;
3525}
3526
3527/* warning: addr must be aligned */
3528uint64_t ldq_phys(target_phys_addr_t addr)
3529{
3530 int io_index;
3531 uint8_t *ptr;
3532 uint64_t val;
3533 unsigned long pd;
3534 PhysPageDesc *p;
3535
3536 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3537 if (!p) {
3538 pd = IO_MEM_UNASSIGNED;
3539 } else {
3540 pd = p->phys_offset;
3541 }
3542
3543 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3544 !(pd & IO_MEM_ROMD)) {
3545 /* I/O case */
3546 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3547#ifdef TARGET_WORDS_BIGENDIAN
3548 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3549 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3550#else
3551 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3552 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3553#endif
3554 } else {
3555 /* RAM case */
3556#ifndef VBOX
3557 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3558 (addr & ~TARGET_PAGE_MASK);
3559 val = ldq_p(ptr);
3560#else
3561 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3562#endif
3563 }
3564 return val;
3565}
3566
3567/* XXX: optimize */
3568uint32_t ldub_phys(target_phys_addr_t addr)
3569{
3570 uint8_t val;
3571 cpu_physical_memory_read(addr, &val, 1);
3572 return val;
3573}
3574
3575/* XXX: optimize */
3576uint32_t lduw_phys(target_phys_addr_t addr)
3577{
3578 uint16_t val;
3579 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3580 return tswap16(val);
3581}
3582
3583/* warning: addr must be aligned. The ram page is not masked as dirty
3584 and the code inside is not invalidated. It is useful if the dirty
3585 bits are used to track modified PTEs */
3586void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3587{
3588 int io_index;
3589 uint8_t *ptr;
3590 unsigned long pd;
3591 PhysPageDesc *p;
3592
3593 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3594 if (!p) {
3595 pd = IO_MEM_UNASSIGNED;
3596 } else {
3597 pd = p->phys_offset;
3598 }
3599
3600 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3601 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3602 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3603 } else {
3604#ifndef VBOX
3605 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3606 (addr & ~TARGET_PAGE_MASK);
3607 stl_p(ptr, val);
3608#else
3609 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3610#endif
3611#ifndef VBOX
3612 if (unlikely(in_migration)) {
3613 if (!cpu_physical_memory_is_dirty(addr1)) {
3614 /* invalidate code */
3615 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3616 /* set dirty bit */
3617 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3618 (0xff & ~CODE_DIRTY_FLAG);
3619 }
3620 }
3621#endif
3622 }
3623}
3624
3625void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3626{
3627 int io_index;
3628 uint8_t *ptr;
3629 unsigned long pd;
3630 PhysPageDesc *p;
3631
3632 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3633 if (!p) {
3634 pd = IO_MEM_UNASSIGNED;
3635 } else {
3636 pd = p->phys_offset;
3637 }
3638
3639 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3640 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3641#ifdef TARGET_WORDS_BIGENDIAN
3642 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3643 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3644#else
3645 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3646 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3647#endif
3648 } else {
3649#ifndef VBOX
3650 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3651 (addr & ~TARGET_PAGE_MASK);
3652 stq_p(ptr, val);
3653#else
3654 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3655#endif
3656 }
3657}
3658
3659
3660/* warning: addr must be aligned */
3661void stl_phys(target_phys_addr_t addr, uint32_t val)
3662{
3663 int io_index;
3664 uint8_t *ptr;
3665 unsigned long pd;
3666 PhysPageDesc *p;
3667
3668 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3669 if (!p) {
3670 pd = IO_MEM_UNASSIGNED;
3671 } else {
3672 pd = p->phys_offset;
3673 }
3674
3675 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3676 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3677 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3678 } else {
3679 unsigned long addr1;
3680 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3681 /* RAM case */
3682#ifndef VBOX
3683 ptr = phys_ram_base + addr1;
3684 stl_p(ptr, val);
3685#else
3686 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3687#endif
3688 if (!cpu_physical_memory_is_dirty(addr1)) {
3689 /* invalidate code */
3690 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3691 /* set dirty bit */
3692#ifdef VBOX
3693 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3694#endif
3695 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3696 (0xff & ~CODE_DIRTY_FLAG);
3697 }
3698 }
3699}
3700
3701/* XXX: optimize */
3702void stb_phys(target_phys_addr_t addr, uint32_t val)
3703{
3704 uint8_t v = val;
3705 cpu_physical_memory_write(addr, &v, 1);
3706}
3707
3708/* XXX: optimize */
3709void stw_phys(target_phys_addr_t addr, uint32_t val)
3710{
3711 uint16_t v = tswap16(val);
3712 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3713}
3714
3715/* XXX: optimize */
3716void stq_phys(target_phys_addr_t addr, uint64_t val)
3717{
3718 val = tswap64(val);
3719 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3720}
3721
3722#endif
3723
3724/* virtual memory access for debug */
3725int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3726 uint8_t *buf, int len, int is_write)
3727{
3728 int l;
3729 target_ulong page, phys_addr;
3730
3731 while (len > 0) {
3732 page = addr & TARGET_PAGE_MASK;
3733 phys_addr = cpu_get_phys_page_debug(env, page);
3734 /* if no physical page mapped, return an error */
3735 if (phys_addr == -1)
3736 return -1;
3737 l = (page + TARGET_PAGE_SIZE) - addr;
3738 if (l > len)
3739 l = len;
3740 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3741 buf, l, is_write);
3742 len -= l;
3743 buf += l;
3744 addr += l;
3745 }
3746 return 0;
3747}
3748
3749/* in deterministic execution mode, instructions doing device I/Os
3750 must be at the end of the TB */
3751void cpu_io_recompile(CPUState *env, void *retaddr)
3752{
3753 TranslationBlock *tb;
3754 uint32_t n, cflags;
3755 target_ulong pc, cs_base;
3756 uint64_t flags;
3757
3758 tb = tb_find_pc((unsigned long)retaddr);
3759 if (!tb) {
3760 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3761 retaddr);
3762 }
3763 n = env->icount_decr.u16.low + tb->icount;
3764 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3765 /* Calculate how many instructions had been executed before the fault
3766 occurred. */
3767 n = n - env->icount_decr.u16.low;
3768 /* Generate a new TB ending on the I/O insn. */
3769 n++;
3770 /* On MIPS and SH, delay slot instructions can only be restarted if
3771 they were already the first instruction in the TB. If this is not
3772 the first instruction in a TB then re-execute the preceding
3773 branch. */
3774#if defined(TARGET_MIPS)
3775 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3776 env->active_tc.PC -= 4;
3777 env->icount_decr.u16.low++;
3778 env->hflags &= ~MIPS_HFLAG_BMASK;
3779 }
3780#elif defined(TARGET_SH4)
3781 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3782 && n > 1) {
3783 env->pc -= 2;
3784 env->icount_decr.u16.low++;
3785 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3786 }
3787#endif
3788 /* This should never happen. */
3789 if (n > CF_COUNT_MASK)
3790 cpu_abort(env, "TB too big during recompile");
3791
3792 cflags = n | CF_LAST_IO;
3793 pc = tb->pc;
3794 cs_base = tb->cs_base;
3795 flags = tb->flags;
3796 tb_phys_invalidate(tb, -1);
3797 /* FIXME: In theory this could raise an exception. In practice
3798 we have already translated the block once so it's probably ok. */
3799 tb_gen_code(env, pc, cs_base, flags, cflags);
3800 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3801 the first in the TB) then we end up generating a whole new TB and
3802 repeating the fault, which is horribly inefficient.
3803 Better would be to execute just this insn uncached, or generate a
3804 second new TB. */
3805 cpu_resume_from_signal(env, NULL);
3806}
3807
3808#ifndef VBOX
3809void dump_exec_info(FILE *f,
3810 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3811{
3812 int i, target_code_size, max_target_code_size;
3813 int direct_jmp_count, direct_jmp2_count, cross_page;
3814 TranslationBlock *tb;
3815
3816 target_code_size = 0;
3817 max_target_code_size = 0;
3818 cross_page = 0;
3819 direct_jmp_count = 0;
3820 direct_jmp2_count = 0;
3821 for(i = 0; i < nb_tbs; i++) {
3822 tb = &tbs[i];
3823 target_code_size += tb->size;
3824 if (tb->size > max_target_code_size)
3825 max_target_code_size = tb->size;
3826 if (tb->page_addr[1] != -1)
3827 cross_page++;
3828 if (tb->tb_next_offset[0] != 0xffff) {
3829 direct_jmp_count++;
3830 if (tb->tb_next_offset[1] != 0xffff) {
3831 direct_jmp2_count++;
3832 }
3833 }
3834 }
3835 /* XXX: avoid using doubles ? */
3836 cpu_fprintf(f, "Translation buffer state:\n");
3837 cpu_fprintf(f, "gen code size %ld/%ld\n",
3838 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3839 cpu_fprintf(f, "TB count %d/%d\n",
3840 nb_tbs, code_gen_max_blocks);
3841 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3842 nb_tbs ? target_code_size / nb_tbs : 0,
3843 max_target_code_size);
3844 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3845 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3846 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3847 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3848 cross_page,
3849 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3850 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3851 direct_jmp_count,
3852 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3853 direct_jmp2_count,
3854 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3855 cpu_fprintf(f, "\nStatistics:\n");
3856 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3857 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3858 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3859 tcg_dump_info(f, cpu_fprintf);
3860}
3861#endif /* !VBOX */
3862
3863#if !defined(CONFIG_USER_ONLY)
3864
3865#define MMUSUFFIX _cmmu
3866#define GETPC() NULL
3867#define env cpu_single_env
3868#define SOFTMMU_CODE_ACCESS
3869
3870#define SHIFT 0
3871#include "softmmu_template.h"
3872
3873#define SHIFT 1
3874#include "softmmu_template.h"
3875
3876#define SHIFT 2
3877#include "softmmu_template.h"
3878
3879#define SHIFT 3
3880#include "softmmu_template.h"
3881
3882#undef env
3883
3884#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette