VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 20595

Last change on this file since 20595 was 19094, checked in by vboxsync, 16 years ago

gcc-4 fixes

  • Property svn:eol-style set to native
File size: 114.2 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140RTGCPHYS phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186#ifdef VBOX
187#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
188#endif
189
190#ifdef VBOX
191#define L0_SIZE (1 << L0_BITS)
192#endif
193#define L1_SIZE (1 << L1_BITS)
194#define L2_SIZE (1 << L2_BITS)
195
196static void io_mem_init(void);
197
198unsigned long qemu_real_host_page_size;
199unsigned long qemu_host_page_bits;
200unsigned long qemu_host_page_size;
201unsigned long qemu_host_page_mask;
202
203/* XXX: for system emulation, it could just be an array */
204#ifndef VBOX
205static PageDesc *l1_map[L1_SIZE];
206static PhysPageDesc **l1_phys_map;
207#else
208static unsigned l0_map_max_used = 0;
209static PageDesc **l0_map[L0_SIZE];
210static void **l0_phys_map[L0_SIZE];
211#endif
212
213#if !defined(CONFIG_USER_ONLY)
214static void io_mem_init(void);
215
216/* io memory support */
217CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
218CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
219void *io_mem_opaque[IO_MEM_NB_ENTRIES];
220static int io_mem_nb;
221static int io_mem_watch;
222#endif
223
224#ifndef VBOX
225/* log support */
226static const char *logfilename = "/tmp/qemu.log";
227#endif /* !VBOX */
228FILE *logfile;
229int loglevel;
230#ifndef VBOX
231static int log_append = 0;
232#endif
233
234/* statistics */
235#ifndef VBOX
236static int tlb_flush_count;
237static int tb_flush_count;
238static int tb_phys_invalidate_count;
239#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
240uint32_t tlb_flush_count;
241uint32_t tb_flush_count;
242uint32_t tb_phys_invalidate_count;
243#endif /* VBOX */
244
245#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
246typedef struct subpage_t {
247 target_phys_addr_t base;
248 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
249 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
250 void *opaque[TARGET_PAGE_SIZE][2][4];
251} subpage_t;
252
253
254#ifndef VBOX
255#ifdef _WIN32
256static void map_exec(void *addr, long size)
257{
258 DWORD old_protect;
259 VirtualProtect(addr, size,
260 PAGE_EXECUTE_READWRITE, &old_protect);
261
262}
263#else
264static void map_exec(void *addr, long size)
265{
266 unsigned long start, end, page_size;
267
268 page_size = getpagesize();
269 start = (unsigned long)addr;
270 start &= ~(page_size - 1);
271
272 end = (unsigned long)addr + size;
273 end += page_size - 1;
274 end &= ~(page_size - 1);
275
276 mprotect((void *)start, end - start,
277 PROT_READ | PROT_WRITE | PROT_EXEC);
278}
279#endif
280#else // VBOX
281static void map_exec(void *addr, long size)
282{
283 RTMemProtect(addr, size,
284 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
285}
286#endif
287
288static void page_init(void)
289{
290 /* NOTE: we can always suppose that qemu_host_page_size >=
291 TARGET_PAGE_SIZE */
292#ifdef VBOX
293 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
294 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
295 qemu_real_host_page_size = PAGE_SIZE;
296#else /* !VBOX */
297#ifdef _WIN32
298 {
299 SYSTEM_INFO system_info;
300 DWORD old_protect;
301
302 GetSystemInfo(&system_info);
303 qemu_real_host_page_size = system_info.dwPageSize;
304 }
305#else
306 qemu_real_host_page_size = getpagesize();
307#endif
308#endif /* !VBOX */
309
310 if (qemu_host_page_size == 0)
311 qemu_host_page_size = qemu_real_host_page_size;
312 if (qemu_host_page_size < TARGET_PAGE_SIZE)
313 qemu_host_page_size = TARGET_PAGE_SIZE;
314 qemu_host_page_bits = 0;
315#ifndef VBOX
316 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
317#else
318 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
319#endif
320 qemu_host_page_bits++;
321 qemu_host_page_mask = ~(qemu_host_page_size - 1);
322#ifndef VBOX
323 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
324 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
325#endif
326#ifdef VBOX
327 /* We use other means to set reserved bit on our pages */
328#else
329#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
330 {
331 long long startaddr, endaddr;
332 FILE *f;
333 int n;
334
335 mmap_lock();
336 last_brk = (unsigned long)sbrk(0);
337 f = fopen("/proc/self/maps", "r");
338 if (f) {
339 do {
340 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
341 if (n == 2) {
342 startaddr = MIN(startaddr,
343 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
344 endaddr = MIN(endaddr,
345 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
346 page_set_flags(startaddr & TARGET_PAGE_MASK,
347 TARGET_PAGE_ALIGN(endaddr),
348 PAGE_RESERVED);
349 }
350 } while (!feof(f));
351 fclose(f);
352 }
353 mmap_unlock();
354 }
355#endif
356#endif
357}
358
359#ifndef VBOX
360static inline PageDesc **page_l1_map(target_ulong index)
361#else
362DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
363#endif
364{
365#ifndef VBOX
366#if TARGET_LONG_BITS > 32
367 /* Host memory outside guest VM. For 32-bit targets we have already
368 excluded high addresses. */
369 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
370 return NULL;
371#endif
372 return &l1_map[index >> L2_BITS];
373#else /* VBOX */
374 PageDesc **l1_map;
375 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
376 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
377 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
378 NULL);
379 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
380 if (RT_UNLIKELY(!l1_map))
381 {
382 unsigned i0 = index >> (L1_BITS + L2_BITS);
383 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
384 if (RT_UNLIKELY(!l1_map))
385 return NULL;
386 if (i0 >= l0_map_max_used)
387 l0_map_max_used = i0 + 1;
388 }
389 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
390#endif /* VBOX */
391}
392
393#ifndef VBOX
394static inline PageDesc *page_find_alloc(target_ulong index)
395#else
396DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
397#endif
398{
399 PageDesc **lp, *p;
400 lp = page_l1_map(index);
401 if (!lp)
402 return NULL;
403
404 p = *lp;
405 if (!p) {
406 /* allocate if not found */
407#if defined(CONFIG_USER_ONLY)
408 unsigned long addr;
409 size_t len = sizeof(PageDesc) * L2_SIZE;
410 /* Don't use qemu_malloc because it may recurse. */
411 p = mmap(0, len, PROT_READ | PROT_WRITE,
412 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
413 *lp = p;
414 addr = h2g(p);
415 if (addr == (target_ulong)addr) {
416 page_set_flags(addr & TARGET_PAGE_MASK,
417 TARGET_PAGE_ALIGN(addr + len),
418 PAGE_RESERVED);
419 }
420#else
421 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
422 *lp = p;
423#endif
424 }
425 return p + (index & (L2_SIZE - 1));
426}
427
428#ifndef VBOX
429static inline PageDesc *page_find(target_ulong index)
430#else
431DECLINLINE(PageDesc *) page_find(target_ulong index)
432#endif
433{
434 PageDesc **lp, *p;
435 lp = page_l1_map(index);
436 if (!lp)
437 return NULL;
438
439 p = *lp;
440 if (!p)
441 return 0;
442 return p + (index & (L2_SIZE - 1));
443}
444
445static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
446{
447 void **lp, **p;
448 PhysPageDesc *pd;
449
450#ifndef VBOX
451 p = (void **)l1_phys_map;
452#if TARGET_PHYS_ADDR_SPACE_BITS > 32
453
454#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
455#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
456#endif
457 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
458 p = *lp;
459 if (!p) {
460 /* allocate if not found */
461 if (!alloc)
462 return NULL;
463 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
464 memset(p, 0, sizeof(void *) * L1_SIZE);
465 *lp = p;
466 }
467#endif
468#else /* VBOX */
469 /* level 0 lookup and lazy allocation of level 1 map. */
470 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
471 return NULL;
472 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
473 if (RT_UNLIKELY(!p)) {
474 if (!alloc)
475 return NULL;
476 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
477 memset(p, 0, sizeof(void **) * L1_SIZE);
478 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
479 }
480
481 /* level 1 lookup and lazy allocation of level 2 map. */
482#endif /* VBOX */
483 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
484 pd = *lp;
485 if (!pd) {
486 int i;
487 /* allocate if not found */
488 if (!alloc)
489 return NULL;
490 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
491 *lp = pd;
492 for (i = 0; i < L2_SIZE; i++)
493 pd[i].phys_offset = IO_MEM_UNASSIGNED;
494 }
495 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
496}
497
498#ifndef VBOX
499static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
500#else
501DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
502#endif
503{
504 return phys_page_find_alloc(index, 0);
505}
506
507#if !defined(CONFIG_USER_ONLY)
508static void tlb_protect_code(ram_addr_t ram_addr);
509static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
510 target_ulong vaddr);
511#define mmap_lock() do { } while(0)
512#define mmap_unlock() do { } while(0)
513#endif
514
515#ifdef VBOX
516/*
517 * We don't need such huge codegen buffer size, as execute most of the code
518 * in raw or hwacc mode
519 */
520#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
521#else
522#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
523#endif
524
525#if defined(CONFIG_USER_ONLY)
526/* Currently it is not recommanded to allocate big chunks of data in
527 user mode. It will change when a dedicated libc will be used */
528#define USE_STATIC_CODE_GEN_BUFFER
529#endif
530
531/* VBox allocates codegen buffer dynamically */
532#ifndef VBOX
533#ifdef USE_STATIC_CODE_GEN_BUFFER
534static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
535#endif
536#endif
537
538static void code_gen_alloc(unsigned long tb_size)
539{
540#ifdef USE_STATIC_CODE_GEN_BUFFER
541 code_gen_buffer = static_code_gen_buffer;
542 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
543 map_exec(code_gen_buffer, code_gen_buffer_size);
544#else
545#ifdef VBOX
546 /* We cannot use phys_ram_size here, as it's 0 now,
547 * it only gets initialized once RAM registration callback
548 * (REMR3NotifyPhysRamRegister()) called.
549 */
550 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
551#else
552 code_gen_buffer_size = tb_size;
553 if (code_gen_buffer_size == 0) {
554#if defined(CONFIG_USER_ONLY)
555 /* in user mode, phys_ram_size is not meaningful */
556 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
557#else
558 /* XXX: needs ajustments */
559 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
560#endif
561
562 }
563 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
564 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
565#endif /* VBOX */
566
567 /* The code gen buffer location may have constraints depending on
568 the host cpu and OS */
569#ifdef VBOX
570 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
571
572 if (!code_gen_buffer) {
573 LogRel(("REM: failed allocate codegen buffer %lld\n",
574 code_gen_buffer_size));
575 return;
576 }
577#else //!VBOX
578#if defined(__linux__)
579 {
580 int flags;
581 void *start = NULL;
582
583 flags = MAP_PRIVATE | MAP_ANONYMOUS;
584#if defined(__x86_64__)
585 flags |= MAP_32BIT;
586 /* Cannot map more than that */
587 if (code_gen_buffer_size > (800 * 1024 * 1024))
588 code_gen_buffer_size = (800 * 1024 * 1024);
589#elif defined(__sparc_v9__)
590 // Map the buffer below 2G, so we can use direct calls and branches
591 flags |= MAP_FIXED;
592 start = (void *) 0x60000000UL;
593 if (code_gen_buffer_size > (512 * 1024 * 1024))
594 code_gen_buffer_size = (512 * 1024 * 1024);
595#endif
596 code_gen_buffer = mmap(start, code_gen_buffer_size,
597 PROT_WRITE | PROT_READ | PROT_EXEC,
598 flags, -1, 0);
599 if (code_gen_buffer == MAP_FAILED) {
600 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
601 exit(1);
602 }
603 }
604#elif defined(__FreeBSD__)
605 {
606 int flags;
607 void *addr = NULL;
608 flags = MAP_PRIVATE | MAP_ANONYMOUS;
609#if defined(__x86_64__)
610 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
611 * 0x40000000 is free */
612 flags |= MAP_FIXED;
613 addr = (void *)0x40000000;
614 /* Cannot map more than that */
615 if (code_gen_buffer_size > (800 * 1024 * 1024))
616 code_gen_buffer_size = (800 * 1024 * 1024);
617#endif
618 code_gen_buffer = mmap(addr, code_gen_buffer_size,
619 PROT_WRITE | PROT_READ | PROT_EXEC,
620 flags, -1, 0);
621 if (code_gen_buffer == MAP_FAILED) {
622 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
623 exit(1);
624 }
625 }
626#else
627 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
628 if (!code_gen_buffer) {
629 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
630 exit(1);
631 }
632 map_exec(code_gen_buffer, code_gen_buffer_size);
633#endif
634 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
635#endif /* !VBOX */
636#endif /* !USE_STATIC_CODE_GEN_BUFFER */
637#ifndef VBOX
638 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
639#else
640 map_exec(code_gen_prologue, _1K);
641#endif
642
643 code_gen_buffer_max_size = code_gen_buffer_size -
644 code_gen_max_block_size();
645 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
646 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
647}
648
649/* Must be called before using the QEMU cpus. 'tb_size' is the size
650 (in bytes) allocated to the translation buffer. Zero means default
651 size. */
652void cpu_exec_init_all(unsigned long tb_size)
653{
654 cpu_gen_init();
655 code_gen_alloc(tb_size);
656 code_gen_ptr = code_gen_buffer;
657 page_init();
658#if !defined(CONFIG_USER_ONLY)
659 io_mem_init();
660#endif
661}
662
663#ifndef VBOX
664#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
665
666#define CPU_COMMON_SAVE_VERSION 1
667
668static void cpu_common_save(QEMUFile *f, void *opaque)
669{
670 CPUState *env = opaque;
671
672 qemu_put_be32s(f, &env->halted);
673 qemu_put_be32s(f, &env->interrupt_request);
674}
675
676static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
677{
678 CPUState *env = opaque;
679
680 if (version_id != CPU_COMMON_SAVE_VERSION)
681 return -EINVAL;
682
683 qemu_get_be32s(f, &env->halted);
684 qemu_get_be32s(f, &env->interrupt_request);
685 tlb_flush(env, 1);
686
687 return 0;
688}
689#endif
690#endif //!VBOX
691
692void cpu_exec_init(CPUState *env)
693{
694 CPUState **penv;
695 int cpu_index;
696
697 env->next_cpu = NULL;
698 penv = &first_cpu;
699 cpu_index = 0;
700 while (*penv != NULL) {
701 penv = (CPUState **)&(*penv)->next_cpu;
702 cpu_index++;
703 }
704 env->cpu_index = cpu_index;
705 env->nb_watchpoints = 0;
706 *penv = env;
707#ifndef VBOX
708#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
709 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
710 cpu_common_save, cpu_common_load, env);
711 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
712 cpu_save, cpu_load, env);
713#endif
714#endif // !VBOX
715}
716
717#ifndef VBOX
718static inline void invalidate_page_bitmap(PageDesc *p)
719#else
720DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
721#endif
722{
723 if (p->code_bitmap) {
724 qemu_free(p->code_bitmap);
725 p->code_bitmap = NULL;
726 }
727 p->code_write_count = 0;
728}
729
730/* set to NULL all the 'first_tb' fields in all PageDescs */
731static void page_flush_tb(void)
732{
733 int i, j;
734 PageDesc *p;
735#ifdef VBOX
736 int k;
737#endif
738
739#ifdef VBOX
740 k = l0_map_max_used;
741 while (k-- > 0) {
742 PageDesc **l1_map = l0_map[k];
743 if (l1_map) {
744#endif
745 for(i = 0; i < L1_SIZE; i++) {
746 p = l1_map[i];
747 if (p) {
748 for(j = 0; j < L2_SIZE; j++) {
749 p->first_tb = NULL;
750 invalidate_page_bitmap(p);
751 p++;
752 }
753 }
754 }
755#ifdef VBOX
756 }
757 }
758#endif
759}
760
761/* flush all the translation blocks */
762/* XXX: tb_flush is currently not thread safe */
763void tb_flush(CPUState *env1)
764{
765 CPUState *env;
766#ifdef VBOX
767 STAM_PROFILE_START(&env1->StatTbFlush, a);
768#endif
769#if defined(DEBUG_FLUSH)
770 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
771 (unsigned long)(code_gen_ptr - code_gen_buffer),
772 nb_tbs, nb_tbs > 0 ?
773 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
774#endif
775 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
776 cpu_abort(env1, "Internal error: code buffer overflow\n");
777
778 nb_tbs = 0;
779
780 for(env = first_cpu; env != NULL; env = env->next_cpu) {
781 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
782 }
783
784 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
785 page_flush_tb();
786
787 code_gen_ptr = code_gen_buffer;
788 /* XXX: flush processor icache at this point if cache flush is
789 expensive */
790 tb_flush_count++;
791#ifdef VBOX
792 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
793#endif
794}
795
796#ifdef DEBUG_TB_CHECK
797static void tb_invalidate_check(target_ulong address)
798{
799 TranslationBlock *tb;
800 int i;
801 address &= TARGET_PAGE_MASK;
802 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
803 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
804 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
805 address >= tb->pc + tb->size)) {
806 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
807 address, (long)tb->pc, tb->size);
808 }
809 }
810 }
811}
812
813/* verify that all the pages have correct rights for code */
814static void tb_page_check(void)
815{
816 TranslationBlock *tb;
817 int i, flags1, flags2;
818
819 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
820 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
821 flags1 = page_get_flags(tb->pc);
822 flags2 = page_get_flags(tb->pc + tb->size - 1);
823 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
824 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
825 (long)tb->pc, tb->size, flags1, flags2);
826 }
827 }
828 }
829}
830
831static void tb_jmp_check(TranslationBlock *tb)
832{
833 TranslationBlock *tb1;
834 unsigned int n1;
835
836 /* suppress any remaining jumps to this TB */
837 tb1 = tb->jmp_first;
838 for(;;) {
839 n1 = (long)tb1 & 3;
840 tb1 = (TranslationBlock *)((long)tb1 & ~3);
841 if (n1 == 2)
842 break;
843 tb1 = tb1->jmp_next[n1];
844 }
845 /* check end of list */
846 if (tb1 != tb) {
847 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
848 }
849}
850#endif // DEBUG_TB_CHECK
851
852/* invalidate one TB */
853#ifndef VBOX
854static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
855 int next_offset)
856#else
857DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
858 int next_offset)
859#endif
860{
861 TranslationBlock *tb1;
862 for(;;) {
863 tb1 = *ptb;
864 if (tb1 == tb) {
865 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
866 break;
867 }
868 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
869 }
870}
871
872#ifndef VBOX
873static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
874#else
875DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
876#endif
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
883 n1 = (long)tb1 & 3;
884 tb1 = (TranslationBlock *)((long)tb1 & ~3);
885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
893#ifndef VBOX
894static inline void tb_jmp_remove(TranslationBlock *tb, int n)
895#else
896DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
897#endif
898{
899 TranslationBlock *tb1, **ptb;
900 unsigned int n1;
901
902 ptb = &tb->jmp_next[n];
903 tb1 = *ptb;
904 if (tb1) {
905 /* find tb(n) in circular list */
906 for(;;) {
907 tb1 = *ptb;
908 n1 = (long)tb1 & 3;
909 tb1 = (TranslationBlock *)((long)tb1 & ~3);
910 if (n1 == n && tb1 == tb)
911 break;
912 if (n1 == 2) {
913 ptb = &tb1->jmp_first;
914 } else {
915 ptb = &tb1->jmp_next[n1];
916 }
917 }
918 /* now we can suppress tb(n) from the list */
919 *ptb = tb->jmp_next[n];
920
921 tb->jmp_next[n] = NULL;
922 }
923}
924
925/* reset the jump entry 'n' of a TB so that it is not chained to
926 another TB */
927#ifndef VBOX
928static inline void tb_reset_jump(TranslationBlock *tb, int n)
929#else
930DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
931#endif
932{
933 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
934}
935
936void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
937{
938 CPUState *env;
939 PageDesc *p;
940 unsigned int h, n1;
941 target_phys_addr_t phys_pc;
942 TranslationBlock *tb1, *tb2;
943
944 /* remove the TB from the hash list */
945 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
946 h = tb_phys_hash_func(phys_pc);
947 tb_remove(&tb_phys_hash[h], tb,
948 offsetof(TranslationBlock, phys_hash_next));
949
950 /* remove the TB from the page list */
951 if (tb->page_addr[0] != page_addr) {
952 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
953 tb_page_remove(&p->first_tb, tb);
954 invalidate_page_bitmap(p);
955 }
956 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
957 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
958 tb_page_remove(&p->first_tb, tb);
959 invalidate_page_bitmap(p);
960 }
961
962 tb_invalidated_flag = 1;
963
964 /* remove the TB from the hash list */
965 h = tb_jmp_cache_hash_func(tb->pc);
966 for(env = first_cpu; env != NULL; env = env->next_cpu) {
967 if (env->tb_jmp_cache[h] == tb)
968 env->tb_jmp_cache[h] = NULL;
969 }
970
971 /* suppress this TB from the two jump lists */
972 tb_jmp_remove(tb, 0);
973 tb_jmp_remove(tb, 1);
974
975 /* suppress any remaining jumps to this TB */
976 tb1 = tb->jmp_first;
977 for(;;) {
978 n1 = (long)tb1 & 3;
979 if (n1 == 2)
980 break;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 tb2 = tb1->jmp_next[n1];
983 tb_reset_jump(tb1, n1);
984 tb1->jmp_next[n1] = NULL;
985 tb1 = tb2;
986 }
987 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
988
989 tb_phys_invalidate_count++;
990}
991
992
993#ifdef VBOX
994void tb_invalidate_virt(CPUState *env, uint32_t eip)
995{
996# if 1
997 tb_flush(env);
998# else
999 uint8_t *cs_base, *pc;
1000 unsigned int flags, h, phys_pc;
1001 TranslationBlock *tb, **ptb;
1002
1003 flags = env->hflags;
1004 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1005 cs_base = env->segs[R_CS].base;
1006 pc = cs_base + eip;
1007
1008 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
1009 flags);
1010
1011 if(tb)
1012 {
1013# ifdef DEBUG
1014 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1015# endif
1016 tb_invalidate(tb);
1017 //Note: this will leak TBs, but the whole cache will be flushed
1018 // when it happens too often
1019 tb->pc = 0;
1020 tb->cs_base = 0;
1021 tb->flags = 0;
1022 }
1023# endif
1024}
1025
1026# ifdef VBOX_STRICT
1027/**
1028 * Gets the page offset.
1029 */
1030unsigned long get_phys_page_offset(target_ulong addr)
1031{
1032 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1033 return p ? p->phys_offset : 0;
1034}
1035# endif /* VBOX_STRICT */
1036#endif /* VBOX */
1037
1038#ifndef VBOX
1039static inline void set_bits(uint8_t *tab, int start, int len)
1040#else
1041DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
1042#endif
1043{
1044 int end, mask, end1;
1045
1046 end = start + len;
1047 tab += start >> 3;
1048 mask = 0xff << (start & 7);
1049 if ((start & ~7) == (end & ~7)) {
1050 if (start < end) {
1051 mask &= ~(0xff << (end & 7));
1052 *tab |= mask;
1053 }
1054 } else {
1055 *tab++ |= mask;
1056 start = (start + 8) & ~7;
1057 end1 = end & ~7;
1058 while (start < end1) {
1059 *tab++ = 0xff;
1060 start += 8;
1061 }
1062 if (start < end) {
1063 mask = ~(0xff << (end & 7));
1064 *tab |= mask;
1065 }
1066 }
1067}
1068
1069static void build_page_bitmap(PageDesc *p)
1070{
1071 int n, tb_start, tb_end;
1072 TranslationBlock *tb;
1073
1074 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1075 if (!p->code_bitmap)
1076 return;
1077 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1078
1079 tb = p->first_tb;
1080 while (tb != NULL) {
1081 n = (long)tb & 3;
1082 tb = (TranslationBlock *)((long)tb & ~3);
1083 /* NOTE: this is subtle as a TB may span two physical pages */
1084 if (n == 0) {
1085 /* NOTE: tb_end may be after the end of the page, but
1086 it is not a problem */
1087 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1088 tb_end = tb_start + tb->size;
1089 if (tb_end > TARGET_PAGE_SIZE)
1090 tb_end = TARGET_PAGE_SIZE;
1091 } else {
1092 tb_start = 0;
1093 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1094 }
1095 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1096 tb = tb->page_next[n];
1097 }
1098}
1099
1100TranslationBlock *tb_gen_code(CPUState *env,
1101 target_ulong pc, target_ulong cs_base,
1102 int flags, int cflags)
1103{
1104 TranslationBlock *tb;
1105 uint8_t *tc_ptr;
1106 target_ulong phys_pc, phys_page2, virt_page2;
1107 int code_gen_size;
1108
1109 phys_pc = get_phys_addr_code(env, pc);
1110 tb = tb_alloc(pc);
1111 if (!tb) {
1112 /* flush must be done */
1113 tb_flush(env);
1114 /* cannot fail at this point */
1115 tb = tb_alloc(pc);
1116 /* Don't forget to invalidate previous TB info. */
1117 tb_invalidated_flag = 1;
1118 }
1119 tc_ptr = code_gen_ptr;
1120 tb->tc_ptr = tc_ptr;
1121 tb->cs_base = cs_base;
1122 tb->flags = flags;
1123 tb->cflags = cflags;
1124 cpu_gen_code(env, tb, &code_gen_size);
1125 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1126
1127 /* check next page if needed */
1128 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1129 phys_page2 = -1;
1130 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1131 phys_page2 = get_phys_addr_code(env, virt_page2);
1132 }
1133 tb_link_phys(tb, phys_pc, phys_page2);
1134 return tb;
1135}
1136
1137/* invalidate all TBs which intersect with the target physical page
1138 starting in range [start;end[. NOTE: start and end must refer to
1139 the same physical page. 'is_cpu_write_access' should be true if called
1140 from a real cpu write access: the virtual CPU will exit the current
1141 TB if code is modified inside this TB. */
1142void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1143 int is_cpu_write_access)
1144{
1145 int n, current_tb_modified, current_tb_not_found, current_flags;
1146 CPUState *env = cpu_single_env;
1147 PageDesc *p;
1148 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1149 target_ulong tb_start, tb_end;
1150 target_ulong current_pc, current_cs_base;
1151
1152 p = page_find(start >> TARGET_PAGE_BITS);
1153 if (!p)
1154 return;
1155 if (!p->code_bitmap &&
1156 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1157 is_cpu_write_access) {
1158 /* build code bitmap */
1159 build_page_bitmap(p);
1160 }
1161
1162 /* we remove all the TBs in the range [start, end[ */
1163 /* XXX: see if in some cases it could be faster to invalidate all the code */
1164 current_tb_not_found = is_cpu_write_access;
1165 current_tb_modified = 0;
1166 current_tb = NULL; /* avoid warning */
1167 current_pc = 0; /* avoid warning */
1168 current_cs_base = 0; /* avoid warning */
1169 current_flags = 0; /* avoid warning */
1170 tb = p->first_tb;
1171 while (tb != NULL) {
1172 n = (long)tb & 3;
1173 tb = (TranslationBlock *)((long)tb & ~3);
1174 tb_next = tb->page_next[n];
1175 /* NOTE: this is subtle as a TB may span two physical pages */
1176 if (n == 0) {
1177 /* NOTE: tb_end may be after the end of the page, but
1178 it is not a problem */
1179 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1180 tb_end = tb_start + tb->size;
1181 } else {
1182 tb_start = tb->page_addr[1];
1183 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1184 }
1185 if (!(tb_end <= start || tb_start >= end)) {
1186#ifdef TARGET_HAS_PRECISE_SMC
1187 if (current_tb_not_found) {
1188 current_tb_not_found = 0;
1189 current_tb = NULL;
1190 if (env->mem_io_pc) {
1191 /* now we have a real cpu fault */
1192 current_tb = tb_find_pc(env->mem_io_pc);
1193 }
1194 }
1195 if (current_tb == tb &&
1196 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
1202
1203 current_tb_modified = 1;
1204 cpu_restore_state(current_tb, env,
1205 env->mem_io_pc, NULL);
1206#if defined(TARGET_I386)
1207 current_flags = env->hflags;
1208 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1209 current_cs_base = (target_ulong)env->segs[R_CS].base;
1210 current_pc = current_cs_base + env->eip;
1211#else
1212#error unsupported CPU
1213#endif
1214 }
1215#endif /* TARGET_HAS_PRECISE_SMC */
1216 /* we need to do that to handle the case where a signal
1217 occurs while doing tb_phys_invalidate() */
1218 saved_tb = NULL;
1219 if (env) {
1220 saved_tb = env->current_tb;
1221 env->current_tb = NULL;
1222 }
1223 tb_phys_invalidate(tb, -1);
1224 if (env) {
1225 env->current_tb = saved_tb;
1226 if (env->interrupt_request && env->current_tb)
1227 cpu_interrupt(env, env->interrupt_request);
1228 }
1229 }
1230 tb = tb_next;
1231 }
1232#if !defined(CONFIG_USER_ONLY)
1233 /* if no code remaining, no need to continue to use slow writes */
1234 if (!p->first_tb) {
1235 invalidate_page_bitmap(p);
1236 if (is_cpu_write_access) {
1237 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1238 }
1239 }
1240#endif
1241#ifdef TARGET_HAS_PRECISE_SMC
1242 if (current_tb_modified) {
1243 /* we generate a block containing just the instruction
1244 modifying the memory. It will ensure that it cannot modify
1245 itself */
1246 env->current_tb = NULL;
1247 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1248 cpu_resume_from_signal(env, NULL);
1249 }
1250#endif
1251}
1252
1253
1254/* len must be <= 8 and start must be a multiple of len */
1255#ifndef VBOX
1256static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1257#else
1258DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1259#endif
1260{
1261 PageDesc *p;
1262 int offset, b;
1263#if 0
1264 if (1) {
1265 if (loglevel) {
1266 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1267 cpu_single_env->mem_io_vaddr, len,
1268 cpu_single_env->eip,
1269 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1270 }
1271 }
1272#endif
1273 p = page_find(start >> TARGET_PAGE_BITS);
1274 if (!p)
1275 return;
1276 if (p->code_bitmap) {
1277 offset = start & ~TARGET_PAGE_MASK;
1278 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1279 if (b & ((1 << len) - 1))
1280 goto do_invalidate;
1281 } else {
1282 do_invalidate:
1283 tb_invalidate_phys_page_range(start, start + len, 1);
1284 }
1285}
1286
1287
1288#if !defined(CONFIG_SOFTMMU)
1289static void tb_invalidate_phys_page(target_phys_addr_t addr,
1290 unsigned long pc, void *puc)
1291{
1292 int n, current_flags, current_tb_modified;
1293 target_ulong current_pc, current_cs_base;
1294 PageDesc *p;
1295 TranslationBlock *tb, *current_tb;
1296#ifdef TARGET_HAS_PRECISE_SMC
1297 CPUState *env = cpu_single_env;
1298#endif
1299
1300 addr &= TARGET_PAGE_MASK;
1301 p = page_find(addr >> TARGET_PAGE_BITS);
1302 if (!p)
1303 return;
1304 tb = p->first_tb;
1305 current_tb_modified = 0;
1306 current_tb = NULL;
1307 current_pc = 0; /* avoid warning */
1308 current_cs_base = 0; /* avoid warning */
1309 current_flags = 0; /* avoid warning */
1310#ifdef TARGET_HAS_PRECISE_SMC
1311 if (tb && pc != 0) {
1312 current_tb = tb_find_pc(pc);
1313 }
1314#endif
1315 while (tb != NULL) {
1316 n = (long)tb & 3;
1317 tb = (TranslationBlock *)((long)tb & ~3);
1318#ifdef TARGET_HAS_PRECISE_SMC
1319 if (current_tb == tb &&
1320 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1321 /* If we are modifying the current TB, we must stop
1322 its execution. We could be more precise by checking
1323 that the modification is after the current PC, but it
1324 would require a specialized function to partially
1325 restore the CPU state */
1326
1327 current_tb_modified = 1;
1328 cpu_restore_state(current_tb, env, pc, puc);
1329#if defined(TARGET_I386)
1330 current_flags = env->hflags;
1331 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1332 current_cs_base = (target_ulong)env->segs[R_CS].base;
1333 current_pc = current_cs_base + env->eip;
1334#else
1335#error unsupported CPU
1336#endif
1337 }
1338#endif /* TARGET_HAS_PRECISE_SMC */
1339 tb_phys_invalidate(tb, addr);
1340 tb = tb->page_next[n];
1341 }
1342 p->first_tb = NULL;
1343#ifdef TARGET_HAS_PRECISE_SMC
1344 if (current_tb_modified) {
1345 /* we generate a block containing just the instruction
1346 modifying the memory. It will ensure that it cannot modify
1347 itself */
1348 env->current_tb = NULL;
1349 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1350 cpu_resume_from_signal(env, puc);
1351 }
1352#endif
1353}
1354#endif
1355
1356/* add the tb in the target page and protect it if necessary */
1357#ifndef VBOX
1358static inline void tb_alloc_page(TranslationBlock *tb,
1359 unsigned int n, target_ulong page_addr)
1360#else
1361DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1362 unsigned int n, target_ulong page_addr)
1363#endif
1364{
1365 PageDesc *p;
1366 TranslationBlock *last_first_tb;
1367
1368 tb->page_addr[n] = page_addr;
1369 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1370 tb->page_next[n] = p->first_tb;
1371 last_first_tb = p->first_tb;
1372 p->first_tb = (TranslationBlock *)((long)tb | n);
1373 invalidate_page_bitmap(p);
1374
1375#if defined(TARGET_HAS_SMC) || 1
1376
1377#if defined(CONFIG_USER_ONLY)
1378 if (p->flags & PAGE_WRITE) {
1379 target_ulong addr;
1380 PageDesc *p2;
1381 int prot;
1382
1383 /* force the host page as non writable (writes will have a
1384 page fault + mprotect overhead) */
1385 page_addr &= qemu_host_page_mask;
1386 prot = 0;
1387 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1388 addr += TARGET_PAGE_SIZE) {
1389
1390 p2 = page_find (addr >> TARGET_PAGE_BITS);
1391 if (!p2)
1392 continue;
1393 prot |= p2->flags;
1394 p2->flags &= ~PAGE_WRITE;
1395 page_get_flags(addr);
1396 }
1397 mprotect(g2h(page_addr), qemu_host_page_size,
1398 (prot & PAGE_BITS) & ~PAGE_WRITE);
1399#ifdef DEBUG_TB_INVALIDATE
1400 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1401 page_addr);
1402#endif
1403 }
1404#else
1405 /* if some code is already present, then the pages are already
1406 protected. So we handle the case where only the first TB is
1407 allocated in a physical page */
1408 if (!last_first_tb) {
1409 tlb_protect_code(page_addr);
1410 }
1411#endif
1412
1413#endif /* TARGET_HAS_SMC */
1414}
1415
1416/* Allocate a new translation block. Flush the translation buffer if
1417 too many translation blocks or too much generated code. */
1418TranslationBlock *tb_alloc(target_ulong pc)
1419{
1420 TranslationBlock *tb;
1421
1422 if (nb_tbs >= code_gen_max_blocks ||
1423#ifndef VBOX
1424 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1425#else
1426 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1427#endif
1428 return NULL;
1429 tb = &tbs[nb_tbs++];
1430 tb->pc = pc;
1431 tb->cflags = 0;
1432 return tb;
1433}
1434
1435void tb_free(TranslationBlock *tb)
1436{
1437 /* In practice this is mostly used for single use temporary TB
1438 Ignore the hard cases and just back up if this TB happens to
1439 be the last one generated. */
1440 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1441 code_gen_ptr = tb->tc_ptr;
1442 nb_tbs--;
1443 }
1444}
1445
1446/* add a new TB and link it to the physical page tables. phys_page2 is
1447 (-1) to indicate that only one page contains the TB. */
1448void tb_link_phys(TranslationBlock *tb,
1449 target_ulong phys_pc, target_ulong phys_page2)
1450{
1451 unsigned int h;
1452 TranslationBlock **ptb;
1453
1454 /* Grab the mmap lock to stop another thread invalidating this TB
1455 before we are done. */
1456 mmap_lock();
1457 /* add in the physical hash table */
1458 h = tb_phys_hash_func(phys_pc);
1459 ptb = &tb_phys_hash[h];
1460 tb->phys_hash_next = *ptb;
1461 *ptb = tb;
1462
1463 /* add in the page list */
1464 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1465 if (phys_page2 != -1)
1466 tb_alloc_page(tb, 1, phys_page2);
1467 else
1468 tb->page_addr[1] = -1;
1469
1470 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1471 tb->jmp_next[0] = NULL;
1472 tb->jmp_next[1] = NULL;
1473
1474 /* init original jump addresses */
1475 if (tb->tb_next_offset[0] != 0xffff)
1476 tb_reset_jump(tb, 0);
1477 if (tb->tb_next_offset[1] != 0xffff)
1478 tb_reset_jump(tb, 1);
1479
1480#ifdef DEBUG_TB_CHECK
1481 tb_page_check();
1482#endif
1483 mmap_unlock();
1484}
1485
1486/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1487 tb[1].tc_ptr. Return NULL if not found */
1488TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1489{
1490 int m_min, m_max, m;
1491 unsigned long v;
1492 TranslationBlock *tb;
1493
1494 if (nb_tbs <= 0)
1495 return NULL;
1496 if (tc_ptr < (unsigned long)code_gen_buffer ||
1497 tc_ptr >= (unsigned long)code_gen_ptr)
1498 return NULL;
1499 /* binary search (cf Knuth) */
1500 m_min = 0;
1501 m_max = nb_tbs - 1;
1502 while (m_min <= m_max) {
1503 m = (m_min + m_max) >> 1;
1504 tb = &tbs[m];
1505 v = (unsigned long)tb->tc_ptr;
1506 if (v == tc_ptr)
1507 return tb;
1508 else if (tc_ptr < v) {
1509 m_max = m - 1;
1510 } else {
1511 m_min = m + 1;
1512 }
1513 }
1514 return &tbs[m_max];
1515}
1516
1517static void tb_reset_jump_recursive(TranslationBlock *tb);
1518
1519#ifndef VBOX
1520static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1521#else
1522DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1523#endif
1524{
1525 TranslationBlock *tb1, *tb_next, **ptb;
1526 unsigned int n1;
1527
1528 tb1 = tb->jmp_next[n];
1529 if (tb1 != NULL) {
1530 /* find head of list */
1531 for(;;) {
1532 n1 = (long)tb1 & 3;
1533 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1534 if (n1 == 2)
1535 break;
1536 tb1 = tb1->jmp_next[n1];
1537 }
1538 /* we are now sure now that tb jumps to tb1 */
1539 tb_next = tb1;
1540
1541 /* remove tb from the jmp_first list */
1542 ptb = &tb_next->jmp_first;
1543 for(;;) {
1544 tb1 = *ptb;
1545 n1 = (long)tb1 & 3;
1546 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1547 if (n1 == n && tb1 == tb)
1548 break;
1549 ptb = &tb1->jmp_next[n1];
1550 }
1551 *ptb = tb->jmp_next[n];
1552 tb->jmp_next[n] = NULL;
1553
1554 /* suppress the jump to next tb in generated code */
1555 tb_reset_jump(tb, n);
1556
1557 /* suppress jumps in the tb on which we could have jumped */
1558 tb_reset_jump_recursive(tb_next);
1559 }
1560}
1561
1562static void tb_reset_jump_recursive(TranslationBlock *tb)
1563{
1564 tb_reset_jump_recursive2(tb, 0);
1565 tb_reset_jump_recursive2(tb, 1);
1566}
1567
1568#if defined(TARGET_HAS_ICE)
1569static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1570{
1571 target_ulong addr, pd;
1572 ram_addr_t ram_addr;
1573 PhysPageDesc *p;
1574
1575 addr = cpu_get_phys_page_debug(env, pc);
1576 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1577 if (!p) {
1578 pd = IO_MEM_UNASSIGNED;
1579 } else {
1580 pd = p->phys_offset;
1581 }
1582 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1583 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1584}
1585#endif
1586
1587/* Add a watchpoint. */
1588int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1589{
1590 int i;
1591
1592 for (i = 0; i < env->nb_watchpoints; i++) {
1593 if (addr == env->watchpoint[i].vaddr)
1594 return 0;
1595 }
1596 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1597 return -1;
1598
1599 i = env->nb_watchpoints++;
1600 env->watchpoint[i].vaddr = addr;
1601 env->watchpoint[i].type = type;
1602 tlb_flush_page(env, addr);
1603 /* FIXME: This flush is needed because of the hack to make memory ops
1604 terminate the TB. It can be removed once the proper IO trap and
1605 re-execute bits are in. */
1606 tb_flush(env);
1607 return i;
1608}
1609
1610/* Remove a watchpoint. */
1611int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1612{
1613 int i;
1614
1615 for (i = 0; i < env->nb_watchpoints; i++) {
1616 if (addr == env->watchpoint[i].vaddr) {
1617 env->nb_watchpoints--;
1618 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1619 tlb_flush_page(env, addr);
1620 return 0;
1621 }
1622 }
1623 return -1;
1624}
1625
1626/* Remove all watchpoints. */
1627void cpu_watchpoint_remove_all(CPUState *env) {
1628 int i;
1629
1630 for (i = 0; i < env->nb_watchpoints; i++) {
1631 tlb_flush_page(env, env->watchpoint[i].vaddr);
1632 }
1633 env->nb_watchpoints = 0;
1634}
1635
1636/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1637 breakpoint is reached */
1638int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1639{
1640#if defined(TARGET_HAS_ICE)
1641 int i;
1642
1643 for(i = 0; i < env->nb_breakpoints; i++) {
1644 if (env->breakpoints[i] == pc)
1645 return 0;
1646 }
1647
1648 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1649 return -1;
1650 env->breakpoints[env->nb_breakpoints++] = pc;
1651
1652 breakpoint_invalidate(env, pc);
1653 return 0;
1654#else
1655 return -1;
1656#endif
1657}
1658
1659/* remove all breakpoints */
1660void cpu_breakpoint_remove_all(CPUState *env) {
1661#if defined(TARGET_HAS_ICE)
1662 int i;
1663 for(i = 0; i < env->nb_breakpoints; i++) {
1664 breakpoint_invalidate(env, env->breakpoints[i]);
1665 }
1666 env->nb_breakpoints = 0;
1667#endif
1668}
1669
1670/* remove a breakpoint */
1671int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1672{
1673#if defined(TARGET_HAS_ICE)
1674 int i;
1675 for(i = 0; i < env->nb_breakpoints; i++) {
1676 if (env->breakpoints[i] == pc)
1677 goto found;
1678 }
1679 return -1;
1680 found:
1681 env->nb_breakpoints--;
1682 if (i < env->nb_breakpoints)
1683 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1684
1685 breakpoint_invalidate(env, pc);
1686 return 0;
1687#else
1688 return -1;
1689#endif
1690}
1691
1692/* enable or disable single step mode. EXCP_DEBUG is returned by the
1693 CPU loop after each instruction */
1694void cpu_single_step(CPUState *env, int enabled)
1695{
1696#if defined(TARGET_HAS_ICE)
1697 if (env->singlestep_enabled != enabled) {
1698 env->singlestep_enabled = enabled;
1699 /* must flush all the translated code to avoid inconsistancies */
1700 /* XXX: only flush what is necessary */
1701 tb_flush(env);
1702 }
1703#endif
1704}
1705
1706#ifndef VBOX
1707/* enable or disable low levels log */
1708void cpu_set_log(int log_flags)
1709{
1710 loglevel = log_flags;
1711 if (loglevel && !logfile) {
1712 logfile = fopen(logfilename, "w");
1713 if (!logfile) {
1714 perror(logfilename);
1715 _exit(1);
1716 }
1717#if !defined(CONFIG_SOFTMMU)
1718 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1719 {
1720 static uint8_t logfile_buf[4096];
1721 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1722 }
1723#else
1724 setvbuf(logfile, NULL, _IOLBF, 0);
1725#endif
1726 }
1727}
1728
1729void cpu_set_log_filename(const char *filename)
1730{
1731 logfilename = strdup(filename);
1732}
1733#endif /* !VBOX */
1734
1735/* mask must never be zero, except for A20 change call */
1736void cpu_interrupt(CPUState *env, int mask)
1737{
1738#if !defined(USE_NPTL)
1739 TranslationBlock *tb;
1740 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1741#endif
1742 int old_mask;
1743
1744 old_mask = env->interrupt_request;
1745#ifdef VBOX
1746 VM_ASSERT_EMT(env->pVM);
1747 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1748#else /* !VBOX */
1749 /* FIXME: This is probably not threadsafe. A different thread could
1750 be in the middle of a read-modify-write operation. */
1751 env->interrupt_request |= mask;
1752#endif /* !VBOX */
1753#if defined(USE_NPTL)
1754 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1755 problem and hope the cpu will stop of its own accord. For userspace
1756 emulation this often isn't actually as bad as it sounds. Often
1757 signals are used primarily to interrupt blocking syscalls. */
1758#else
1759 if (use_icount) {
1760 env->icount_decr.u16.high = 0xffff;
1761#ifndef CONFIG_USER_ONLY
1762 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1763 an async event happened and we need to process it. */
1764 if (!can_do_io(env)
1765 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1766 cpu_abort(env, "Raised interrupt while not in I/O function");
1767 }
1768#endif
1769 } else {
1770 tb = env->current_tb;
1771 /* if the cpu is currently executing code, we must unlink it and
1772 all the potentially executing TB */
1773 if (tb && !testandset(&interrupt_lock)) {
1774 env->current_tb = NULL;
1775 tb_reset_jump_recursive(tb);
1776 resetlock(&interrupt_lock);
1777 }
1778 }
1779#endif
1780}
1781
1782void cpu_reset_interrupt(CPUState *env, int mask)
1783{
1784#ifdef VBOX
1785 /*
1786 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1787 * for future changes!
1788 */
1789 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1790#else /* !VBOX */
1791 env->interrupt_request &= ~mask;
1792#endif /* !VBOX */
1793}
1794
1795#ifndef VBOX
1796CPULogItem cpu_log_items[] = {
1797 { CPU_LOG_TB_OUT_ASM, "out_asm",
1798 "show generated host assembly code for each compiled TB" },
1799 { CPU_LOG_TB_IN_ASM, "in_asm",
1800 "show target assembly code for each compiled TB" },
1801 { CPU_LOG_TB_OP, "op",
1802 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1803#ifdef TARGET_I386
1804 { CPU_LOG_TB_OP_OPT, "op_opt",
1805 "show micro ops after optimization for each compiled TB" },
1806#endif
1807 { CPU_LOG_INT, "int",
1808 "show interrupts/exceptions in short format" },
1809 { CPU_LOG_EXEC, "exec",
1810 "show trace before each executed TB (lots of logs)" },
1811 { CPU_LOG_TB_CPU, "cpu",
1812 "show CPU state before bloc translation" },
1813#ifdef TARGET_I386
1814 { CPU_LOG_PCALL, "pcall",
1815 "show protected mode far calls/returns/exceptions" },
1816#endif
1817#ifdef DEBUG_IOPORT
1818 { CPU_LOG_IOPORT, "ioport",
1819 "show all i/o ports accesses" },
1820#endif
1821 { 0, NULL, NULL },
1822};
1823
1824static int cmp1(const char *s1, int n, const char *s2)
1825{
1826 if (strlen(s2) != n)
1827 return 0;
1828 return memcmp(s1, s2, n) == 0;
1829}
1830
1831/* takes a comma separated list of log masks. Return 0 if error. */
1832int cpu_str_to_log_mask(const char *str)
1833{
1834 CPULogItem *item;
1835 int mask;
1836 const char *p, *p1;
1837
1838 p = str;
1839 mask = 0;
1840 for(;;) {
1841 p1 = strchr(p, ',');
1842 if (!p1)
1843 p1 = p + strlen(p);
1844 if(cmp1(p,p1-p,"all")) {
1845 for(item = cpu_log_items; item->mask != 0; item++) {
1846 mask |= item->mask;
1847 }
1848 } else {
1849 for(item = cpu_log_items; item->mask != 0; item++) {
1850 if (cmp1(p, p1 - p, item->name))
1851 goto found;
1852 }
1853 return 0;
1854 }
1855 found:
1856 mask |= item->mask;
1857 if (*p1 != ',')
1858 break;
1859 p = p1 + 1;
1860 }
1861 return mask;
1862}
1863#endif /* !VBOX */
1864
1865#ifndef VBOX /* VBOX: we have our own routine. */
1866void cpu_abort(CPUState *env, const char *fmt, ...)
1867{
1868 va_list ap;
1869
1870 va_start(ap, fmt);
1871 fprintf(stderr, "qemu: fatal: ");
1872 vfprintf(stderr, fmt, ap);
1873 fprintf(stderr, "\n");
1874#ifdef TARGET_I386
1875 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1876#else
1877 cpu_dump_state(env, stderr, fprintf, 0);
1878#endif
1879 va_end(ap);
1880 abort();
1881}
1882#endif /* !VBOX */
1883
1884#ifndef VBOX
1885CPUState *cpu_copy(CPUState *env)
1886{
1887 CPUState *new_env = cpu_init(env->cpu_model_str);
1888 /* preserve chaining and index */
1889 CPUState *next_cpu = new_env->next_cpu;
1890 int cpu_index = new_env->cpu_index;
1891 memcpy(new_env, env, sizeof(CPUState));
1892 new_env->next_cpu = next_cpu;
1893 new_env->cpu_index = cpu_index;
1894 return new_env;
1895}
1896#endif
1897
1898#if !defined(CONFIG_USER_ONLY)
1899
1900#ifndef VBOX
1901static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1902#else
1903DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1904#endif
1905{
1906 unsigned int i;
1907
1908 /* Discard jump cache entries for any tb which might potentially
1909 overlap the flushed page. */
1910 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1911 memset (&env->tb_jmp_cache[i], 0,
1912 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1913
1914 i = tb_jmp_cache_hash_page(addr);
1915 memset (&env->tb_jmp_cache[i], 0,
1916 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1917
1918#ifdef VBOX
1919 /* inform raw mode about TLB page flush */
1920 remR3FlushPage(env, addr);
1921#endif /* VBOX */
1922}
1923
1924/* NOTE: if flush_global is true, also flush global entries (not
1925 implemented yet) */
1926void tlb_flush(CPUState *env, int flush_global)
1927{
1928 int i;
1929#if defined(DEBUG_TLB)
1930 printf("tlb_flush:\n");
1931#endif
1932 /* must reset current TB so that interrupts cannot modify the
1933 links while we are modifying them */
1934 env->current_tb = NULL;
1935
1936 for(i = 0; i < CPU_TLB_SIZE; i++) {
1937 env->tlb_table[0][i].addr_read = -1;
1938 env->tlb_table[0][i].addr_write = -1;
1939 env->tlb_table[0][i].addr_code = -1;
1940 env->tlb_table[1][i].addr_read = -1;
1941 env->tlb_table[1][i].addr_write = -1;
1942 env->tlb_table[1][i].addr_code = -1;
1943#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1944 env->phys_addends[0][i] = -1;
1945 env->phys_addends[1][i] = -1;
1946#endif
1947#if (NB_MMU_MODES >= 3)
1948 env->tlb_table[2][i].addr_read = -1;
1949 env->tlb_table[2][i].addr_write = -1;
1950 env->tlb_table[2][i].addr_code = -1;
1951#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1952 env->phys_addends[2][i] = -1;
1953#endif
1954#if (NB_MMU_MODES == 4)
1955 env->tlb_table[3][i].addr_read = -1;
1956 env->tlb_table[3][i].addr_write = -1;
1957 env->tlb_table[3][i].addr_code = -1;
1958#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1959 env->phys_addends[3][i] = -1;
1960#endif
1961#endif
1962#endif
1963 }
1964
1965 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1966
1967#ifdef VBOX
1968 /* inform raw mode about TLB flush */
1969 remR3FlushTLB(env, flush_global);
1970#endif
1971#ifdef USE_KQEMU
1972 if (env->kqemu_enabled) {
1973 kqemu_flush(env, flush_global);
1974 }
1975#endif
1976 tlb_flush_count++;
1977}
1978
1979#ifndef VBOX
1980static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1981#else
1982DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1983#endif
1984{
1985 if (addr == (tlb_entry->addr_read &
1986 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1987 addr == (tlb_entry->addr_write &
1988 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1989 addr == (tlb_entry->addr_code &
1990 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1991 tlb_entry->addr_read = -1;
1992 tlb_entry->addr_write = -1;
1993 tlb_entry->addr_code = -1;
1994 }
1995}
1996
1997void tlb_flush_page(CPUState *env, target_ulong addr)
1998{
1999 int i;
2000
2001#if defined(DEBUG_TLB)
2002 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2003#endif
2004 /* must reset current TB so that interrupts cannot modify the
2005 links while we are modifying them */
2006 env->current_tb = NULL;
2007
2008 addr &= TARGET_PAGE_MASK;
2009 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2010 tlb_flush_entry(&env->tlb_table[0][i], addr);
2011 tlb_flush_entry(&env->tlb_table[1][i], addr);
2012#if (NB_MMU_MODES >= 3)
2013 tlb_flush_entry(&env->tlb_table[2][i], addr);
2014#if (NB_MMU_MODES == 4)
2015 tlb_flush_entry(&env->tlb_table[3][i], addr);
2016#endif
2017#endif
2018
2019 tlb_flush_jmp_cache(env, addr);
2020
2021#ifdef USE_KQEMU
2022 if (env->kqemu_enabled) {
2023 kqemu_flush_page(env, addr);
2024 }
2025#endif
2026}
2027
2028/* update the TLBs so that writes to code in the virtual page 'addr'
2029 can be detected */
2030static void tlb_protect_code(ram_addr_t ram_addr)
2031{
2032 cpu_physical_memory_reset_dirty(ram_addr,
2033 ram_addr + TARGET_PAGE_SIZE,
2034 CODE_DIRTY_FLAG);
2035#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2036 /** @todo Retest this? This function has changed... */
2037 remR3ProtectCode(cpu_single_env, ram_addr);
2038#endif
2039}
2040
2041/* update the TLB so that writes in physical page 'phys_addr' are no longer
2042 tested for self modifying code */
2043static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2044 target_ulong vaddr)
2045{
2046#ifdef VBOX
2047 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2048#endif
2049 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2050}
2051
2052#ifndef VBOX
2053static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2054 unsigned long start, unsigned long length)
2055#else
2056DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2057 unsigned long start, unsigned long length)
2058#endif
2059{
2060 unsigned long addr;
2061
2062#ifdef VBOX
2063 if (start & 3)
2064 return;
2065#endif
2066 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2067 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2068 if ((addr - start) < length) {
2069 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2070 }
2071 }
2072}
2073
2074void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2075 int dirty_flags)
2076{
2077 CPUState *env;
2078 unsigned long length, start1;
2079 int i, mask, len;
2080 uint8_t *p;
2081
2082 start &= TARGET_PAGE_MASK;
2083 end = TARGET_PAGE_ALIGN(end);
2084
2085 length = end - start;
2086 if (length == 0)
2087 return;
2088 len = length >> TARGET_PAGE_BITS;
2089#ifdef USE_KQEMU
2090 /* XXX: should not depend on cpu context */
2091 env = first_cpu;
2092 if (env->kqemu_enabled) {
2093 ram_addr_t addr;
2094 addr = start;
2095 for(i = 0; i < len; i++) {
2096 kqemu_set_notdirty(env, addr);
2097 addr += TARGET_PAGE_SIZE;
2098 }
2099 }
2100#endif
2101 mask = ~dirty_flags;
2102 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2103#ifdef VBOX
2104 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2105#endif
2106 for(i = 0; i < len; i++)
2107 p[i] &= mask;
2108
2109 /* we modify the TLB cache so that the dirty bit will be set again
2110 when accessing the range */
2111#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2112 start1 = start;
2113#elif !defined(VBOX)
2114 start1 = start + (unsigned long)phys_ram_base;
2115#else
2116 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2117#endif
2118 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2119 for(i = 0; i < CPU_TLB_SIZE; i++)
2120 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2121 for(i = 0; i < CPU_TLB_SIZE; i++)
2122 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2123#if (NB_MMU_MODES >= 3)
2124 for(i = 0; i < CPU_TLB_SIZE; i++)
2125 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2126#if (NB_MMU_MODES == 4)
2127 for(i = 0; i < CPU_TLB_SIZE; i++)
2128 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2129#endif
2130#endif
2131 }
2132}
2133
2134#ifndef VBOX
2135int cpu_physical_memory_set_dirty_tracking(int enable)
2136{
2137 in_migration = enable;
2138 return 0;
2139}
2140
2141int cpu_physical_memory_get_dirty_tracking(void)
2142{
2143 return in_migration;
2144}
2145#endif
2146
2147#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2148DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2149#else
2150static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2151#endif
2152{
2153 ram_addr_t ram_addr;
2154
2155 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2156 /* RAM case */
2157#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2158 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2159#elif !defined(VBOX)
2160 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2161 tlb_entry->addend - (unsigned long)phys_ram_base;
2162#else
2163 Assert(phys_addend != -1);
2164 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2165#endif
2166 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2167 tlb_entry->addr_write |= TLB_NOTDIRTY;
2168 }
2169 }
2170}
2171
2172/* update the TLB according to the current state of the dirty bits */
2173void cpu_tlb_update_dirty(CPUState *env)
2174{
2175 int i;
2176#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2177 for(i = 0; i < CPU_TLB_SIZE; i++)
2178 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2179 for(i = 0; i < CPU_TLB_SIZE; i++)
2180 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2181#if (NB_MMU_MODES >= 3)
2182 for(i = 0; i < CPU_TLB_SIZE; i++)
2183 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2184#if (NB_MMU_MODES == 4)
2185 for(i = 0; i < CPU_TLB_SIZE; i++)
2186 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2187#endif
2188#endif
2189#else /* VBOX */
2190 for(i = 0; i < CPU_TLB_SIZE; i++)
2191 tlb_update_dirty(&env->tlb_table[0][i]);
2192 for(i = 0; i < CPU_TLB_SIZE; i++)
2193 tlb_update_dirty(&env->tlb_table[1][i]);
2194#if (NB_MMU_MODES >= 3)
2195 for(i = 0; i < CPU_TLB_SIZE; i++)
2196 tlb_update_dirty(&env->tlb_table[2][i]);
2197#if (NB_MMU_MODES == 4)
2198 for(i = 0; i < CPU_TLB_SIZE; i++)
2199 tlb_update_dirty(&env->tlb_table[3][i]);
2200#endif
2201#endif
2202#endif /* VBOX */
2203}
2204
2205#ifndef VBOX
2206static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2207#else
2208DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2209#endif
2210{
2211 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2212 tlb_entry->addr_write = vaddr;
2213}
2214
2215
2216/* update the TLB corresponding to virtual page vaddr and phys addr
2217 addr so that it is no longer dirty */
2218#ifndef VBOX
2219static inline void tlb_set_dirty(CPUState *env,
2220 unsigned long addr, target_ulong vaddr)
2221#else
2222DECLINLINE(void) tlb_set_dirty(CPUState *env,
2223 unsigned long addr, target_ulong vaddr)
2224#endif
2225{
2226 int i;
2227
2228 addr &= TARGET_PAGE_MASK;
2229 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2230 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2231 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2232#if (NB_MMU_MODES >= 3)
2233 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2234#if (NB_MMU_MODES == 4)
2235 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2236#endif
2237#endif
2238}
2239
2240/* add a new TLB entry. At most one entry for a given virtual address
2241 is permitted. Return 0 if OK or 2 if the page could not be mapped
2242 (can only happen in non SOFTMMU mode for I/O pages or pages
2243 conflicting with the host address space). */
2244int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2245 target_phys_addr_t paddr, int prot,
2246 int mmu_idx, int is_softmmu)
2247{
2248 PhysPageDesc *p;
2249 unsigned long pd;
2250 unsigned int index;
2251 target_ulong address;
2252 target_ulong code_address;
2253 target_phys_addr_t addend;
2254 int ret;
2255 CPUTLBEntry *te;
2256 int i;
2257 target_phys_addr_t iotlb;
2258#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2259 int read_mods = 0, write_mods = 0, code_mods = 0;
2260#endif
2261
2262 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
2265 } else {
2266 pd = p->phys_offset;
2267 }
2268#if defined(DEBUG_TLB)
2269 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2270 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2271#endif
2272
2273 ret = 0;
2274 address = vaddr;
2275 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2276 /* IO memory case (romd handled later) */
2277 address |= TLB_MMIO;
2278 }
2279#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2280 addend = pd & TARGET_PAGE_MASK;
2281#elif !defined(VBOX)
2282 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2283#else
2284 /** @todo this is racing the phys_page_find call above since it may register
2285 * a new chunk of memory... */
2286 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2287 pd & TARGET_PAGE_MASK,
2288 !!(prot & PAGE_WRITE));
2289#endif
2290
2291 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2292 /* Normal RAM. */
2293 iotlb = pd & TARGET_PAGE_MASK;
2294 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2295 iotlb |= IO_MEM_NOTDIRTY;
2296 else
2297 iotlb |= IO_MEM_ROM;
2298 } else {
2299 /* IO handlers are currently passed a phsical address.
2300 It would be nice to pass an offset from the base address
2301 of that region. This would avoid having to special case RAM,
2302 and avoid full address decoding in every device.
2303 We can't use the high bits of pd for this because
2304 IO_MEM_ROMD uses these as a ram address. */
2305 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2306 }
2307
2308 code_address = address;
2309
2310#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2311 if (addend & 0x3)
2312 {
2313 if (addend & 0x2)
2314 {
2315 /* catch write */
2316 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2317 write_mods |= TLB_MMIO;
2318 }
2319 else if (addend & 0x1)
2320 {
2321 /* catch all */
2322 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2323 {
2324 read_mods |= TLB_MMIO;
2325 write_mods |= TLB_MMIO;
2326 code_mods |= TLB_MMIO;
2327 }
2328 }
2329 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2330 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2331 addend &= ~(target_ulong)0x3;
2332 }
2333#endif
2334
2335 /* Make accesses to pages with watchpoints go via the
2336 watchpoint trap routines. */
2337 for (i = 0; i < env->nb_watchpoints; i++) {
2338 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2339 iotlb = io_mem_watch + paddr;
2340 /* TODO: The memory case can be optimized by not trapping
2341 reads of pages with a write breakpoint. */
2342 address |= TLB_MMIO;
2343 }
2344 }
2345
2346 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2347 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2348 te = &env->tlb_table[mmu_idx][index];
2349 te->addend = addend - vaddr;
2350 if (prot & PAGE_READ) {
2351 te->addr_read = address;
2352 } else {
2353 te->addr_read = -1;
2354 }
2355
2356 if (prot & PAGE_EXEC) {
2357 te->addr_code = code_address;
2358 } else {
2359 te->addr_code = -1;
2360 }
2361 if (prot & PAGE_WRITE) {
2362 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2363 (pd & IO_MEM_ROMD)) {
2364 /* Write access calls the I/O callback. */
2365 te->addr_write = address | TLB_MMIO;
2366 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2367 !cpu_physical_memory_is_dirty(pd)) {
2368 te->addr_write = address | TLB_NOTDIRTY;
2369 } else {
2370 te->addr_write = address;
2371 }
2372 } else {
2373 te->addr_write = -1;
2374 }
2375
2376#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2377 if (prot & PAGE_READ)
2378 te->addr_read |= read_mods;
2379 if (prot & PAGE_EXEC)
2380 te->addr_code |= code_mods;
2381 if (prot & PAGE_WRITE)
2382 te->addr_write |= write_mods;
2383
2384 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2385#endif
2386
2387#ifdef VBOX
2388 /* inform raw mode about TLB page change */
2389 remR3FlushPage(env, vaddr);
2390#endif
2391 return ret;
2392}
2393#if 0
2394/* called from signal handler: invalidate the code and unprotect the
2395 page. Return TRUE if the fault was succesfully handled. */
2396int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2397{
2398#if !defined(CONFIG_SOFTMMU)
2399 VirtPageDesc *vp;
2400
2401#if defined(DEBUG_TLB)
2402 printf("page_unprotect: addr=0x%08x\n", addr);
2403#endif
2404 addr &= TARGET_PAGE_MASK;
2405
2406 /* if it is not mapped, no need to worry here */
2407 if (addr >= MMAP_AREA_END)
2408 return 0;
2409 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2410 if (!vp)
2411 return 0;
2412 /* NOTE: in this case, validate_tag is _not_ tested as it
2413 validates only the code TLB */
2414 if (vp->valid_tag != virt_valid_tag)
2415 return 0;
2416 if (!(vp->prot & PAGE_WRITE))
2417 return 0;
2418#if defined(DEBUG_TLB)
2419 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2420 addr, vp->phys_addr, vp->prot);
2421#endif
2422 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2423 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2424 (unsigned long)addr, vp->prot);
2425 /* set the dirty bit */
2426 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2427 /* flush the code inside */
2428 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2429 return 1;
2430#elif defined(VBOX)
2431 addr &= TARGET_PAGE_MASK;
2432
2433 /* if it is not mapped, no need to worry here */
2434 if (addr >= MMAP_AREA_END)
2435 return 0;
2436 return 1;
2437#else
2438 return 0;
2439#endif
2440}
2441#endif /* 0 */
2442
2443#else
2444
2445void tlb_flush(CPUState *env, int flush_global)
2446{
2447}
2448
2449void tlb_flush_page(CPUState *env, target_ulong addr)
2450{
2451}
2452
2453int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2454 target_phys_addr_t paddr, int prot,
2455 int mmu_idx, int is_softmmu)
2456{
2457 return 0;
2458}
2459
2460#ifndef VBOX
2461/* dump memory mappings */
2462void page_dump(FILE *f)
2463{
2464 unsigned long start, end;
2465 int i, j, prot, prot1;
2466 PageDesc *p;
2467
2468 fprintf(f, "%-8s %-8s %-8s %s\n",
2469 "start", "end", "size", "prot");
2470 start = -1;
2471 end = -1;
2472 prot = 0;
2473 for(i = 0; i <= L1_SIZE; i++) {
2474 if (i < L1_SIZE)
2475 p = l1_map[i];
2476 else
2477 p = NULL;
2478 for(j = 0;j < L2_SIZE; j++) {
2479 if (!p)
2480 prot1 = 0;
2481 else
2482 prot1 = p[j].flags;
2483 if (prot1 != prot) {
2484 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2485 if (start != -1) {
2486 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2487 start, end, end - start,
2488 prot & PAGE_READ ? 'r' : '-',
2489 prot & PAGE_WRITE ? 'w' : '-',
2490 prot & PAGE_EXEC ? 'x' : '-');
2491 }
2492 if (prot1 != 0)
2493 start = end;
2494 else
2495 start = -1;
2496 prot = prot1;
2497 }
2498 if (!p)
2499 break;
2500 }
2501 }
2502}
2503#endif /* !VBOX */
2504
2505int page_get_flags(target_ulong address)
2506{
2507 PageDesc *p;
2508
2509 p = page_find(address >> TARGET_PAGE_BITS);
2510 if (!p)
2511 return 0;
2512 return p->flags;
2513}
2514
2515/* modify the flags of a page and invalidate the code if
2516 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2517 depending on PAGE_WRITE */
2518void page_set_flags(target_ulong start, target_ulong end, int flags)
2519{
2520 PageDesc *p;
2521 target_ulong addr;
2522
2523 start = start & TARGET_PAGE_MASK;
2524 end = TARGET_PAGE_ALIGN(end);
2525 if (flags & PAGE_WRITE)
2526 flags |= PAGE_WRITE_ORG;
2527#ifdef VBOX
2528 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2529#endif
2530 spin_lock(&tb_lock);
2531 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2532 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2533 /* if the write protection is set, then we invalidate the code
2534 inside */
2535 if (!(p->flags & PAGE_WRITE) &&
2536 (flags & PAGE_WRITE) &&
2537 p->first_tb) {
2538 tb_invalidate_phys_page(addr, 0, NULL);
2539 }
2540 p->flags = flags;
2541 }
2542 spin_unlock(&tb_lock);
2543}
2544
2545int page_check_range(target_ulong start, target_ulong len, int flags)
2546{
2547 PageDesc *p;
2548 target_ulong end;
2549 target_ulong addr;
2550
2551 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2552 start = start & TARGET_PAGE_MASK;
2553
2554 if( end < start )
2555 /* we've wrapped around */
2556 return -1;
2557 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2558 p = page_find(addr >> TARGET_PAGE_BITS);
2559 if( !p )
2560 return -1;
2561 if( !(p->flags & PAGE_VALID) )
2562 return -1;
2563
2564 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2565 return -1;
2566 if (flags & PAGE_WRITE) {
2567 if (!(p->flags & PAGE_WRITE_ORG))
2568 return -1;
2569 /* unprotect the page if it was put read-only because it
2570 contains translated code */
2571 if (!(p->flags & PAGE_WRITE)) {
2572 if (!page_unprotect(addr, 0, NULL))
2573 return -1;
2574 }
2575 return 0;
2576 }
2577 }
2578 return 0;
2579}
2580
2581/* called from signal handler: invalidate the code and unprotect the
2582 page. Return TRUE if the fault was succesfully handled. */
2583int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2584{
2585 unsigned int page_index, prot, pindex;
2586 PageDesc *p, *p1;
2587 target_ulong host_start, host_end, addr;
2588
2589 /* Technically this isn't safe inside a signal handler. However we
2590 know this only ever happens in a synchronous SEGV handler, so in
2591 practice it seems to be ok. */
2592 mmap_lock();
2593
2594 host_start = address & qemu_host_page_mask;
2595 page_index = host_start >> TARGET_PAGE_BITS;
2596 p1 = page_find(page_index);
2597 if (!p1) {
2598 mmap_unlock();
2599 return 0;
2600 }
2601 host_end = host_start + qemu_host_page_size;
2602 p = p1;
2603 prot = 0;
2604 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2605 prot |= p->flags;
2606 p++;
2607 }
2608 /* if the page was really writable, then we change its
2609 protection back to writable */
2610 if (prot & PAGE_WRITE_ORG) {
2611 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2612 if (!(p1[pindex].flags & PAGE_WRITE)) {
2613 mprotect((void *)g2h(host_start), qemu_host_page_size,
2614 (prot & PAGE_BITS) | PAGE_WRITE);
2615 p1[pindex].flags |= PAGE_WRITE;
2616 /* and since the content will be modified, we must invalidate
2617 the corresponding translated code. */
2618 tb_invalidate_phys_page(address, pc, puc);
2619#ifdef DEBUG_TB_CHECK
2620 tb_invalidate_check(address);
2621#endif
2622 mmap_unlock();
2623 return 1;
2624 }
2625 }
2626 mmap_unlock();
2627 return 0;
2628}
2629
2630static inline void tlb_set_dirty(CPUState *env,
2631 unsigned long addr, target_ulong vaddr)
2632{
2633}
2634#endif /* defined(CONFIG_USER_ONLY) */
2635
2636#if !defined(CONFIG_USER_ONLY)
2637static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2638 ram_addr_t memory);
2639static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2640 ram_addr_t orig_memory);
2641#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2642 need_subpage) \
2643 do { \
2644 if (addr > start_addr) \
2645 start_addr2 = 0; \
2646 else { \
2647 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2648 if (start_addr2 > 0) \
2649 need_subpage = 1; \
2650 } \
2651 \
2652 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2653 end_addr2 = TARGET_PAGE_SIZE - 1; \
2654 else { \
2655 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2656 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2657 need_subpage = 1; \
2658 } \
2659 } while (0)
2660
2661
2662/* register physical memory. 'size' must be a multiple of the target
2663 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2664 io memory page */
2665void cpu_register_physical_memory(target_phys_addr_t start_addr,
2666 unsigned long size,
2667 unsigned long phys_offset)
2668{
2669 target_phys_addr_t addr, end_addr;
2670 PhysPageDesc *p;
2671 CPUState *env;
2672 ram_addr_t orig_size = size;
2673 void *subpage;
2674
2675#ifdef USE_KQEMU
2676 /* XXX: should not depend on cpu context */
2677 env = first_cpu;
2678 if (env->kqemu_enabled) {
2679 kqemu_set_phys_mem(start_addr, size, phys_offset);
2680 }
2681#endif
2682 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2683 end_addr = start_addr + (target_phys_addr_t)size;
2684 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2685 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2686 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2687 ram_addr_t orig_memory = p->phys_offset;
2688 target_phys_addr_t start_addr2, end_addr2;
2689 int need_subpage = 0;
2690
2691 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2692 need_subpage);
2693 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2694 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2695 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2696 &p->phys_offset, orig_memory);
2697 } else {
2698 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2699 >> IO_MEM_SHIFT];
2700 }
2701 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2702 } else {
2703 p->phys_offset = phys_offset;
2704 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2705 (phys_offset & IO_MEM_ROMD))
2706 phys_offset += TARGET_PAGE_SIZE;
2707 }
2708 } else {
2709 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2710 p->phys_offset = phys_offset;
2711 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2712 (phys_offset & IO_MEM_ROMD))
2713 phys_offset += TARGET_PAGE_SIZE;
2714 else {
2715 target_phys_addr_t start_addr2, end_addr2;
2716 int need_subpage = 0;
2717
2718 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2719 end_addr2, need_subpage);
2720
2721 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2722 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2723 &p->phys_offset, IO_MEM_UNASSIGNED);
2724 subpage_register(subpage, start_addr2, end_addr2,
2725 phys_offset);
2726 }
2727 }
2728 }
2729 }
2730 /* since each CPU stores ram addresses in its TLB cache, we must
2731 reset the modified entries */
2732 /* XXX: slow ! */
2733 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2734 tlb_flush(env, 1);
2735 }
2736}
2737
2738/* XXX: temporary until new memory mapping API */
2739uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2740{
2741 PhysPageDesc *p;
2742
2743 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744 if (!p)
2745 return IO_MEM_UNASSIGNED;
2746 return p->phys_offset;
2747}
2748
2749#ifndef VBOX
2750/* XXX: better than nothing */
2751ram_addr_t qemu_ram_alloc(ram_addr_t size)
2752{
2753 ram_addr_t addr;
2754 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2755 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2756 (uint64_t)size, (uint64_t)phys_ram_size);
2757 abort();
2758 }
2759 addr = phys_ram_alloc_offset;
2760 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2761 return addr;
2762}
2763
2764void qemu_ram_free(ram_addr_t addr)
2765{
2766}
2767#endif
2768
2769
2770static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2771{
2772#ifdef DEBUG_UNASSIGNED
2773 printf("Unassigned mem read 0x%08x\n", (int)addr);
2774#endif
2775#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2776 do_unassigned_access(addr, 0, 0, 0, 1);
2777#endif
2778 return 0;
2779}
2780
2781static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2782{
2783#ifdef DEBUG_UNASSIGNED
2784 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2785#endif
2786#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2787 do_unassigned_access(addr, 0, 0, 0, 2);
2788#endif
2789 return 0;
2790}
2791
2792static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2793{
2794#ifdef DEBUG_UNASSIGNED
2795 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2796#endif
2797#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2798 do_unassigned_access(addr, 0, 0, 0, 4);
2799#endif
2800 return 0;
2801}
2802
2803static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2804{
2805#ifdef DEBUG_UNASSIGNED
2806 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2807#endif
2808}
2809
2810static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2811{
2812#ifdef DEBUG_UNASSIGNED
2813 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2814#endif
2815#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2816 do_unassigned_access(addr, 1, 0, 0, 2);
2817#endif
2818}
2819
2820static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2821{
2822#ifdef DEBUG_UNASSIGNED
2823 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2824#endif
2825#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2826 do_unassigned_access(addr, 1, 0, 0, 4);
2827#endif
2828}
2829static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2830 unassigned_mem_readb,
2831 unassigned_mem_readw,
2832 unassigned_mem_readl,
2833};
2834
2835static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2836 unassigned_mem_writeb,
2837 unassigned_mem_writew,
2838 unassigned_mem_writel,
2839};
2840
2841static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2842{
2843 unsigned long ram_addr;
2844 int dirty_flags;
2845#if defined(VBOX)
2846 ram_addr = addr;
2847#else
2848 ram_addr = addr - (unsigned long)phys_ram_base;
2849#endif
2850#ifdef VBOX
2851 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2852 dirty_flags = 0xff;
2853 else
2854#endif /* VBOX */
2855 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2856 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2857#if !defined(CONFIG_USER_ONLY)
2858 tb_invalidate_phys_page_fast(ram_addr, 1);
2859# ifdef VBOX
2860 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2861 dirty_flags = 0xff;
2862 else
2863# endif /* VBOX */
2864 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2865#endif
2866 }
2867#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2868 remR3PhysWriteU8(addr, val);
2869#else
2870 stb_p((uint8_t *)(long)addr, val);
2871#endif
2872#ifdef USE_KQEMU
2873 if (cpu_single_env->kqemu_enabled &&
2874 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2875 kqemu_modify_page(cpu_single_env, ram_addr);
2876#endif
2877 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2878#ifdef VBOX
2879 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2880#endif /* !VBOX */
2881 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2882 /* we remove the notdirty callback only if the code has been
2883 flushed */
2884 if (dirty_flags == 0xff)
2885 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2886}
2887
2888static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2889{
2890 unsigned long ram_addr;
2891 int dirty_flags;
2892#if defined(VBOX)
2893 ram_addr = addr;
2894#else
2895 ram_addr = addr - (unsigned long)phys_ram_base;
2896#endif
2897#ifdef VBOX
2898 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2899 dirty_flags = 0xff;
2900 else
2901#endif /* VBOX */
2902 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2903 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2904#if !defined(CONFIG_USER_ONLY)
2905 tb_invalidate_phys_page_fast(ram_addr, 2);
2906# ifdef VBOX
2907 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2908 dirty_flags = 0xff;
2909 else
2910# endif /* VBOX */
2911 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2912#endif
2913 }
2914#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2915 remR3PhysWriteU16(addr, val);
2916#else
2917 stw_p((uint8_t *)(long)addr, val);
2918#endif
2919
2920#ifdef USE_KQEMU
2921 if (cpu_single_env->kqemu_enabled &&
2922 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2923 kqemu_modify_page(cpu_single_env, ram_addr);
2924#endif
2925 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2926#ifdef VBOX
2927 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2928#endif
2929 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2930 /* we remove the notdirty callback only if the code has been
2931 flushed */
2932 if (dirty_flags == 0xff)
2933 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2934}
2935
2936static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2937{
2938 unsigned long ram_addr;
2939 int dirty_flags;
2940#if defined(VBOX)
2941 ram_addr = addr;
2942#else
2943 ram_addr = addr - (unsigned long)phys_ram_base;
2944#endif
2945#ifdef VBOX
2946 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2947 dirty_flags = 0xff;
2948 else
2949#endif /* VBOX */
2950 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2951 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2952#if !defined(CONFIG_USER_ONLY)
2953 tb_invalidate_phys_page_fast(ram_addr, 4);
2954# ifdef VBOX
2955 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2956 dirty_flags = 0xff;
2957 else
2958# endif /* VBOX */
2959 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2960#endif
2961 }
2962#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2963 remR3PhysWriteU32(addr, val);
2964#else
2965 stl_p((uint8_t *)(long)addr, val);
2966#endif
2967#ifdef USE_KQEMU
2968 if (cpu_single_env->kqemu_enabled &&
2969 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2970 kqemu_modify_page(cpu_single_env, ram_addr);
2971#endif
2972 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2973#ifdef VBOX
2974 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2975#endif
2976 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2977 /* we remove the notdirty callback only if the code has been
2978 flushed */
2979 if (dirty_flags == 0xff)
2980 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2981}
2982
2983static CPUReadMemoryFunc *error_mem_read[3] = {
2984 NULL, /* never used */
2985 NULL, /* never used */
2986 NULL, /* never used */
2987};
2988
2989static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2990 notdirty_mem_writeb,
2991 notdirty_mem_writew,
2992 notdirty_mem_writel,
2993};
2994
2995
2996/* Generate a debug exception if a watchpoint has been hit. */
2997static void check_watchpoint(int offset, int flags)
2998{
2999 CPUState *env = cpu_single_env;
3000 target_ulong vaddr;
3001 int i;
3002
3003 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3004 for (i = 0; i < env->nb_watchpoints; i++) {
3005 if (vaddr == env->watchpoint[i].vaddr
3006 && (env->watchpoint[i].type & flags)) {
3007 env->watchpoint_hit = i + 1;
3008 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3009 break;
3010 }
3011 }
3012}
3013
3014/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3015 so these check for a hit then pass through to the normal out-of-line
3016 phys routines. */
3017static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3018{
3019 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3020 return ldub_phys(addr);
3021}
3022
3023static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3024{
3025 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3026 return lduw_phys(addr);
3027}
3028
3029static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3030{
3031 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3032 return ldl_phys(addr);
3033}
3034
3035static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3036 uint32_t val)
3037{
3038 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3039 stb_phys(addr, val);
3040}
3041
3042static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3043 uint32_t val)
3044{
3045 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3046 stw_phys(addr, val);
3047}
3048
3049static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3050 uint32_t val)
3051{
3052 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3053 stl_phys(addr, val);
3054}
3055
3056static CPUReadMemoryFunc *watch_mem_read[3] = {
3057 watch_mem_readb,
3058 watch_mem_readw,
3059 watch_mem_readl,
3060};
3061
3062static CPUWriteMemoryFunc *watch_mem_write[3] = {
3063 watch_mem_writeb,
3064 watch_mem_writew,
3065 watch_mem_writel,
3066};
3067
3068static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3069 unsigned int len)
3070{
3071 uint32_t ret;
3072 unsigned int idx;
3073
3074 idx = SUBPAGE_IDX(addr - mmio->base);
3075#if defined(DEBUG_SUBPAGE)
3076 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3077 mmio, len, addr, idx);
3078#endif
3079 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3080
3081 return ret;
3082}
3083
3084static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3085 uint32_t value, unsigned int len)
3086{
3087 unsigned int idx;
3088
3089 idx = SUBPAGE_IDX(addr - mmio->base);
3090#if defined(DEBUG_SUBPAGE)
3091 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3092 mmio, len, addr, idx, value);
3093#endif
3094 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3095}
3096
3097static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3098{
3099#if defined(DEBUG_SUBPAGE)
3100 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3101#endif
3102
3103 return subpage_readlen(opaque, addr, 0);
3104}
3105
3106static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3107 uint32_t value)
3108{
3109#if defined(DEBUG_SUBPAGE)
3110 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3111#endif
3112 subpage_writelen(opaque, addr, value, 0);
3113}
3114
3115static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3116{
3117#if defined(DEBUG_SUBPAGE)
3118 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3119#endif
3120
3121 return subpage_readlen(opaque, addr, 1);
3122}
3123
3124static void subpage_writew (void *opaque, target_phys_addr_t addr,
3125 uint32_t value)
3126{
3127#if defined(DEBUG_SUBPAGE)
3128 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3129#endif
3130 subpage_writelen(opaque, addr, value, 1);
3131}
3132
3133static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3134{
3135#if defined(DEBUG_SUBPAGE)
3136 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3137#endif
3138
3139 return subpage_readlen(opaque, addr, 2);
3140}
3141
3142static void subpage_writel (void *opaque,
3143 target_phys_addr_t addr, uint32_t value)
3144{
3145#if defined(DEBUG_SUBPAGE)
3146 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3147#endif
3148 subpage_writelen(opaque, addr, value, 2);
3149}
3150
3151static CPUReadMemoryFunc *subpage_read[] = {
3152 &subpage_readb,
3153 &subpage_readw,
3154 &subpage_readl,
3155};
3156
3157static CPUWriteMemoryFunc *subpage_write[] = {
3158 &subpage_writeb,
3159 &subpage_writew,
3160 &subpage_writel,
3161};
3162
3163static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3164 ram_addr_t memory)
3165{
3166 int idx, eidx;
3167 unsigned int i;
3168
3169 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3170 return -1;
3171 idx = SUBPAGE_IDX(start);
3172 eidx = SUBPAGE_IDX(end);
3173#if defined(DEBUG_SUBPAGE)
3174 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3175 mmio, start, end, idx, eidx, memory);
3176#endif
3177 memory >>= IO_MEM_SHIFT;
3178 for (; idx <= eidx; idx++) {
3179 for (i = 0; i < 4; i++) {
3180 if (io_mem_read[memory][i]) {
3181 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3182 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3183 }
3184 if (io_mem_write[memory][i]) {
3185 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3186 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3187 }
3188 }
3189 }
3190
3191 return 0;
3192}
3193
3194static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3195 ram_addr_t orig_memory)
3196{
3197 subpage_t *mmio;
3198 int subpage_memory;
3199
3200 mmio = qemu_mallocz(sizeof(subpage_t));
3201 if (mmio != NULL) {
3202 mmio->base = base;
3203 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3204#if defined(DEBUG_SUBPAGE)
3205 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3206 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3207#endif
3208 *phys = subpage_memory | IO_MEM_SUBPAGE;
3209 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3210 }
3211
3212 return mmio;
3213}
3214
3215static void io_mem_init(void)
3216{
3217 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3218 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3219 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3220 io_mem_nb = 5;
3221
3222 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3223 watch_mem_write, NULL);
3224
3225#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3226 /* alloc dirty bits array */
3227 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3228 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3229#endif /* !VBOX */
3230}
3231
3232/* mem_read and mem_write are arrays of functions containing the
3233 function to access byte (index 0), word (index 1) and dword (index
3234 2). Functions can be omitted with a NULL function pointer. The
3235 registered functions may be modified dynamically later.
3236 If io_index is non zero, the corresponding io zone is
3237 modified. If it is zero, a new io zone is allocated. The return
3238 value can be used with cpu_register_physical_memory(). (-1) is
3239 returned if error. */
3240int cpu_register_io_memory(int io_index,
3241 CPUReadMemoryFunc **mem_read,
3242 CPUWriteMemoryFunc **mem_write,
3243 void *opaque)
3244{
3245 int i, subwidth = 0;
3246
3247 if (io_index <= 0) {
3248 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3249 return -1;
3250 io_index = io_mem_nb++;
3251 } else {
3252 if (io_index >= IO_MEM_NB_ENTRIES)
3253 return -1;
3254 }
3255
3256 for(i = 0;i < 3; i++) {
3257 if (!mem_read[i] || !mem_write[i])
3258 subwidth = IO_MEM_SUBWIDTH;
3259 io_mem_read[io_index][i] = mem_read[i];
3260 io_mem_write[io_index][i] = mem_write[i];
3261 }
3262 io_mem_opaque[io_index] = opaque;
3263 return (io_index << IO_MEM_SHIFT) | subwidth;
3264}
3265
3266CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3267{
3268 return io_mem_write[io_index >> IO_MEM_SHIFT];
3269}
3270
3271CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3272{
3273 return io_mem_read[io_index >> IO_MEM_SHIFT];
3274}
3275#endif /* !defined(CONFIG_USER_ONLY) */
3276
3277/* physical memory access (slow version, mainly for debug) */
3278#if defined(CONFIG_USER_ONLY)
3279void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3280 int len, int is_write)
3281{
3282 int l, flags;
3283 target_ulong page;
3284 void * p;
3285
3286 while (len > 0) {
3287 page = addr & TARGET_PAGE_MASK;
3288 l = (page + TARGET_PAGE_SIZE) - addr;
3289 if (l > len)
3290 l = len;
3291 flags = page_get_flags(page);
3292 if (!(flags & PAGE_VALID))
3293 return;
3294 if (is_write) {
3295 if (!(flags & PAGE_WRITE))
3296 return;
3297 /* XXX: this code should not depend on lock_user */
3298 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3299 /* FIXME - should this return an error rather than just fail? */
3300 return;
3301 memcpy(p, buf, len);
3302 unlock_user(p, addr, len);
3303 } else {
3304 if (!(flags & PAGE_READ))
3305 return;
3306 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3307 /* FIXME - should this return an error rather than just fail? */
3308 return;
3309 memcpy(buf, p, len);
3310 unlock_user(p, addr, 0);
3311 }
3312 len -= l;
3313 buf += l;
3314 addr += l;
3315 }
3316}
3317
3318#else
3319void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3320 int len, int is_write)
3321{
3322 int l, io_index;
3323 uint8_t *ptr;
3324 uint32_t val;
3325 target_phys_addr_t page;
3326 unsigned long pd;
3327 PhysPageDesc *p;
3328
3329 while (len > 0) {
3330 page = addr & TARGET_PAGE_MASK;
3331 l = (page + TARGET_PAGE_SIZE) - addr;
3332 if (l > len)
3333 l = len;
3334 p = phys_page_find(page >> TARGET_PAGE_BITS);
3335 if (!p) {
3336 pd = IO_MEM_UNASSIGNED;
3337 } else {
3338 pd = p->phys_offset;
3339 }
3340
3341 if (is_write) {
3342 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3343 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3344 /* XXX: could force cpu_single_env to NULL to avoid
3345 potential bugs */
3346 if (l >= 4 && ((addr & 3) == 0)) {
3347 /* 32 bit write access */
3348#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3349 val = ldl_p(buf);
3350#else
3351 val = *(const uint32_t *)buf;
3352#endif
3353 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3354 l = 4;
3355 } else if (l >= 2 && ((addr & 1) == 0)) {
3356 /* 16 bit write access */
3357#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3358 val = lduw_p(buf);
3359#else
3360 val = *(const uint16_t *)buf;
3361#endif
3362 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3363 l = 2;
3364 } else {
3365 /* 8 bit write access */
3366#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3367 val = ldub_p(buf);
3368#else
3369 val = *(const uint8_t *)buf;
3370#endif
3371 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3372 l = 1;
3373 }
3374 } else {
3375 unsigned long addr1;
3376 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3377 /* RAM case */
3378#ifdef VBOX
3379 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3380#else
3381 ptr = phys_ram_base + addr1;
3382 memcpy(ptr, buf, l);
3383#endif
3384 if (!cpu_physical_memory_is_dirty(addr1)) {
3385 /* invalidate code */
3386 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3387 /* set dirty bit */
3388#ifdef VBOX
3389 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3390#endif
3391 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3392 (0xff & ~CODE_DIRTY_FLAG);
3393 }
3394 }
3395 } else {
3396 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3397 !(pd & IO_MEM_ROMD)) {
3398 /* I/O case */
3399 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3400 if (l >= 4 && ((addr & 3) == 0)) {
3401 /* 32 bit read access */
3402 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3403#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3404 stl_p(buf, val);
3405#else
3406 *(uint32_t *)buf = val;
3407#endif
3408 l = 4;
3409 } else if (l >= 2 && ((addr & 1) == 0)) {
3410 /* 16 bit read access */
3411 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3412#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3413 stw_p(buf, val);
3414#else
3415 *(uint16_t *)buf = val;
3416#endif
3417 l = 2;
3418 } else {
3419 /* 8 bit read access */
3420 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3421#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3422 stb_p(buf, val);
3423#else
3424 *(uint8_t *)buf = val;
3425#endif
3426 l = 1;
3427 }
3428 } else {
3429 /* RAM case */
3430#ifdef VBOX
3431 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3432#else
3433 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3434 (addr & ~TARGET_PAGE_MASK);
3435 memcpy(buf, ptr, l);
3436#endif
3437 }
3438 }
3439 len -= l;
3440 buf += l;
3441 addr += l;
3442 }
3443}
3444
3445#ifndef VBOX
3446/* used for ROM loading : can write in RAM and ROM */
3447void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3448 const uint8_t *buf, int len)
3449{
3450 int l;
3451 uint8_t *ptr;
3452 target_phys_addr_t page;
3453 unsigned long pd;
3454 PhysPageDesc *p;
3455
3456 while (len > 0) {
3457 page = addr & TARGET_PAGE_MASK;
3458 l = (page + TARGET_PAGE_SIZE) - addr;
3459 if (l > len)
3460 l = len;
3461 p = phys_page_find(page >> TARGET_PAGE_BITS);
3462 if (!p) {
3463 pd = IO_MEM_UNASSIGNED;
3464 } else {
3465 pd = p->phys_offset;
3466 }
3467
3468 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3469 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3470 !(pd & IO_MEM_ROMD)) {
3471 /* do nothing */
3472 } else {
3473 unsigned long addr1;
3474 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3475 /* ROM/RAM case */
3476 ptr = phys_ram_base + addr1;
3477 memcpy(ptr, buf, l);
3478 }
3479 len -= l;
3480 buf += l;
3481 addr += l;
3482 }
3483}
3484#endif /* !VBOX */
3485
3486
3487/* warning: addr must be aligned */
3488uint32_t ldl_phys(target_phys_addr_t addr)
3489{
3490 int io_index;
3491 uint8_t *ptr;
3492 uint32_t val;
3493 unsigned long pd;
3494 PhysPageDesc *p;
3495
3496 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3497 if (!p) {
3498 pd = IO_MEM_UNASSIGNED;
3499 } else {
3500 pd = p->phys_offset;
3501 }
3502
3503 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3504 !(pd & IO_MEM_ROMD)) {
3505 /* I/O case */
3506 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3507 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3508 } else {
3509 /* RAM case */
3510#ifndef VBOX
3511 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3512 (addr & ~TARGET_PAGE_MASK);
3513 val = ldl_p(ptr);
3514#else
3515 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3516#endif
3517 }
3518 return val;
3519}
3520
3521/* warning: addr must be aligned */
3522uint64_t ldq_phys(target_phys_addr_t addr)
3523{
3524 int io_index;
3525 uint8_t *ptr;
3526 uint64_t val;
3527 unsigned long pd;
3528 PhysPageDesc *p;
3529
3530 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3531 if (!p) {
3532 pd = IO_MEM_UNASSIGNED;
3533 } else {
3534 pd = p->phys_offset;
3535 }
3536
3537 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3538 !(pd & IO_MEM_ROMD)) {
3539 /* I/O case */
3540 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3541#ifdef TARGET_WORDS_BIGENDIAN
3542 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3543 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3544#else
3545 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3546 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3547#endif
3548 } else {
3549 /* RAM case */
3550#ifndef VBOX
3551 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3552 (addr & ~TARGET_PAGE_MASK);
3553 val = ldq_p(ptr);
3554#else
3555 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3556#endif
3557 }
3558 return val;
3559}
3560
3561/* XXX: optimize */
3562uint32_t ldub_phys(target_phys_addr_t addr)
3563{
3564 uint8_t val;
3565 cpu_physical_memory_read(addr, &val, 1);
3566 return val;
3567}
3568
3569/* XXX: optimize */
3570uint32_t lduw_phys(target_phys_addr_t addr)
3571{
3572 uint16_t val;
3573 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3574 return tswap16(val);
3575}
3576
3577/* warning: addr must be aligned. The ram page is not masked as dirty
3578 and the code inside is not invalidated. It is useful if the dirty
3579 bits are used to track modified PTEs */
3580void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3581{
3582 int io_index;
3583 uint8_t *ptr;
3584 unsigned long pd;
3585 PhysPageDesc *p;
3586
3587 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3588 if (!p) {
3589 pd = IO_MEM_UNASSIGNED;
3590 } else {
3591 pd = p->phys_offset;
3592 }
3593
3594 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3595 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3596 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3597 } else {
3598#ifndef VBOX
3599 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3600 (addr & ~TARGET_PAGE_MASK);
3601 stl_p(ptr, val);
3602#else
3603 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3604#endif
3605#ifndef VBOX
3606 if (unlikely(in_migration)) {
3607 if (!cpu_physical_memory_is_dirty(addr1)) {
3608 /* invalidate code */
3609 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3610 /* set dirty bit */
3611 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3612 (0xff & ~CODE_DIRTY_FLAG);
3613 }
3614 }
3615#endif
3616 }
3617}
3618
3619void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3620{
3621 int io_index;
3622 uint8_t *ptr;
3623 unsigned long pd;
3624 PhysPageDesc *p;
3625
3626 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3627 if (!p) {
3628 pd = IO_MEM_UNASSIGNED;
3629 } else {
3630 pd = p->phys_offset;
3631 }
3632
3633 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3634 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3635#ifdef TARGET_WORDS_BIGENDIAN
3636 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3637 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3638#else
3639 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3640 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3641#endif
3642 } else {
3643#ifndef VBOX
3644 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3645 (addr & ~TARGET_PAGE_MASK);
3646 stq_p(ptr, val);
3647#else
3648 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3649#endif
3650 }
3651}
3652
3653
3654/* warning: addr must be aligned */
3655void stl_phys(target_phys_addr_t addr, uint32_t val)
3656{
3657 int io_index;
3658 uint8_t *ptr;
3659 unsigned long pd;
3660 PhysPageDesc *p;
3661
3662 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3663 if (!p) {
3664 pd = IO_MEM_UNASSIGNED;
3665 } else {
3666 pd = p->phys_offset;
3667 }
3668
3669 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3670 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3671 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3672 } else {
3673 unsigned long addr1;
3674 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3675 /* RAM case */
3676#ifndef VBOX
3677 ptr = phys_ram_base + addr1;
3678 stl_p(ptr, val);
3679#else
3680 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3681#endif
3682 if (!cpu_physical_memory_is_dirty(addr1)) {
3683 /* invalidate code */
3684 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3685 /* set dirty bit */
3686#ifdef VBOX
3687 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3688#endif
3689 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3690 (0xff & ~CODE_DIRTY_FLAG);
3691 }
3692 }
3693}
3694
3695/* XXX: optimize */
3696void stb_phys(target_phys_addr_t addr, uint32_t val)
3697{
3698 uint8_t v = val;
3699 cpu_physical_memory_write(addr, &v, 1);
3700}
3701
3702/* XXX: optimize */
3703void stw_phys(target_phys_addr_t addr, uint32_t val)
3704{
3705 uint16_t v = tswap16(val);
3706 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3707}
3708
3709/* XXX: optimize */
3710void stq_phys(target_phys_addr_t addr, uint64_t val)
3711{
3712 val = tswap64(val);
3713 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3714}
3715
3716#endif
3717
3718/* virtual memory access for debug */
3719int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3720 uint8_t *buf, int len, int is_write)
3721{
3722 int l;
3723 target_ulong page, phys_addr;
3724
3725 while (len > 0) {
3726 page = addr & TARGET_PAGE_MASK;
3727 phys_addr = cpu_get_phys_page_debug(env, page);
3728 /* if no physical page mapped, return an error */
3729 if (phys_addr == -1)
3730 return -1;
3731 l = (page + TARGET_PAGE_SIZE) - addr;
3732 if (l > len)
3733 l = len;
3734 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3735 buf, l, is_write);
3736 len -= l;
3737 buf += l;
3738 addr += l;
3739 }
3740 return 0;
3741}
3742
3743/* in deterministic execution mode, instructions doing device I/Os
3744 must be at the end of the TB */
3745void cpu_io_recompile(CPUState *env, void *retaddr)
3746{
3747 TranslationBlock *tb;
3748 uint32_t n, cflags;
3749 target_ulong pc, cs_base;
3750 uint64_t flags;
3751
3752 tb = tb_find_pc((unsigned long)retaddr);
3753 if (!tb) {
3754 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3755 retaddr);
3756 }
3757 n = env->icount_decr.u16.low + tb->icount;
3758 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3759 /* Calculate how many instructions had been executed before the fault
3760 occurred. */
3761 n = n - env->icount_decr.u16.low;
3762 /* Generate a new TB ending on the I/O insn. */
3763 n++;
3764 /* On MIPS and SH, delay slot instructions can only be restarted if
3765 they were already the first instruction in the TB. If this is not
3766 the first instruction in a TB then re-execute the preceding
3767 branch. */
3768#if defined(TARGET_MIPS)
3769 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3770 env->active_tc.PC -= 4;
3771 env->icount_decr.u16.low++;
3772 env->hflags &= ~MIPS_HFLAG_BMASK;
3773 }
3774#elif defined(TARGET_SH4)
3775 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3776 && n > 1) {
3777 env->pc -= 2;
3778 env->icount_decr.u16.low++;
3779 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3780 }
3781#endif
3782 /* This should never happen. */
3783 if (n > CF_COUNT_MASK)
3784 cpu_abort(env, "TB too big during recompile");
3785
3786 cflags = n | CF_LAST_IO;
3787 pc = tb->pc;
3788 cs_base = tb->cs_base;
3789 flags = tb->flags;
3790 tb_phys_invalidate(tb, -1);
3791 /* FIXME: In theory this could raise an exception. In practice
3792 we have already translated the block once so it's probably ok. */
3793 tb_gen_code(env, pc, cs_base, flags, cflags);
3794 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3795 the first in the TB) then we end up generating a whole new TB and
3796 repeating the fault, which is horribly inefficient.
3797 Better would be to execute just this insn uncached, or generate a
3798 second new TB. */
3799 cpu_resume_from_signal(env, NULL);
3800}
3801
3802#ifndef VBOX
3803void dump_exec_info(FILE *f,
3804 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3805{
3806 int i, target_code_size, max_target_code_size;
3807 int direct_jmp_count, direct_jmp2_count, cross_page;
3808 TranslationBlock *tb;
3809
3810 target_code_size = 0;
3811 max_target_code_size = 0;
3812 cross_page = 0;
3813 direct_jmp_count = 0;
3814 direct_jmp2_count = 0;
3815 for(i = 0; i < nb_tbs; i++) {
3816 tb = &tbs[i];
3817 target_code_size += tb->size;
3818 if (tb->size > max_target_code_size)
3819 max_target_code_size = tb->size;
3820 if (tb->page_addr[1] != -1)
3821 cross_page++;
3822 if (tb->tb_next_offset[0] != 0xffff) {
3823 direct_jmp_count++;
3824 if (tb->tb_next_offset[1] != 0xffff) {
3825 direct_jmp2_count++;
3826 }
3827 }
3828 }
3829 /* XXX: avoid using doubles ? */
3830 cpu_fprintf(f, "Translation buffer state:\n");
3831 cpu_fprintf(f, "gen code size %ld/%ld\n",
3832 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3833 cpu_fprintf(f, "TB count %d/%d\n",
3834 nb_tbs, code_gen_max_blocks);
3835 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3836 nb_tbs ? target_code_size / nb_tbs : 0,
3837 max_target_code_size);
3838 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3839 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3840 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3841 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3842 cross_page,
3843 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3844 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3845 direct_jmp_count,
3846 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3847 direct_jmp2_count,
3848 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3849 cpu_fprintf(f, "\nStatistics:\n");
3850 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3851 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3852 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3853 tcg_dump_info(f, cpu_fprintf);
3854}
3855#endif /* !VBOX */
3856
3857#if !defined(CONFIG_USER_ONLY)
3858
3859#define MMUSUFFIX _cmmu
3860#define GETPC() NULL
3861#define env cpu_single_env
3862#define SOFTMMU_CODE_ACCESS
3863
3864#define SHIFT 0
3865#include "softmmu_template.h"
3866
3867#define SHIFT 1
3868#include "softmmu_template.h"
3869
3870#define SHIFT 2
3871#include "softmmu_template.h"
3872
3873#define SHIFT 3
3874#include "softmmu_template.h"
3875
3876#undef env
3877
3878#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette