VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 37689

Last change on this file since 37689 was 37689, checked in by vboxsync, 13 years ago

recompiler: Merged in changes from 0.13.0.

  • Property svn:eol-style set to native
File size: 13.1 KB
Line 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28#include "qemu-timer.h"
29
30#define DATA_SIZE (1 << SHIFT)
31
32#if DATA_SIZE == 8
33#define SUFFIX q
34#define USUFFIX q
35#define DATA_TYPE uint64_t
36#ifdef VBOX
37# define DATA_TYPE_PROMOTED uint64_t
38#endif
39#elif DATA_SIZE == 4
40#define SUFFIX l
41#define USUFFIX l
42#define DATA_TYPE uint32_t
43#ifdef VBOX
44# define DATA_TYPE_PROMOTED RTCCUINTREG
45#endif
46#elif DATA_SIZE == 2
47#define SUFFIX w
48#define USUFFIX uw
49#define DATA_TYPE uint16_t
50#ifdef VBOX
51# define DATA_TYPE_PROMOTED RTCCUINTREG
52#endif
53#elif DATA_SIZE == 1
54#define SUFFIX b
55#define USUFFIX ub
56#define DATA_TYPE uint8_t
57#ifdef VBOX
58# define DATA_TYPE_PROMOTED RTCCUINTREG
59#endif
60#else
61#error unsupported data size
62#endif
63
64#ifdef SOFTMMU_CODE_ACCESS
65#define READ_ACCESS_TYPE 2
66#define ADDR_READ addr_code
67#else
68#define READ_ACCESS_TYPE 0
69#define ADDR_READ addr_read
70#endif
71
72static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
73 int mmu_idx,
74 void *retaddr);
75static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
76 target_ulong addr,
77 void *retaddr)
78{
79 DATA_TYPE res;
80 int index;
81 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
82 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
83 env->mem_io_pc = (unsigned long)retaddr;
84 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
85 && !can_do_io(env)) {
86 cpu_io_recompile(env, retaddr);
87 }
88
89 env->mem_io_vaddr = addr;
90#if SHIFT <= 2
91 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
92#else
93#ifdef TARGET_WORDS_BIGENDIAN
94 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
95 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
96#else
97 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
98 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
99#endif
100#endif /* SHIFT > 2 */
101 return res;
102}
103
104/* handle all cases except unaligned access which span two pages */
105#ifndef VBOX
106DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
107 int mmu_idx)
108#else
109/* Load helpers invoked from generated code, and TCG makes an assumption
110 that valid value takes the whole register, why gcc after 4.3 may
111 use only lower part of register for smaller types. So force promotion. */
112DATA_TYPE_PROMOTED REGPARM
113glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
114 int mmu_idx)
115#endif
116{
117 DATA_TYPE res;
118 int index;
119 target_ulong tlb_addr;
120 target_phys_addr_t ioaddr;
121 unsigned long addend;
122 void *retaddr;
123
124 /* test if there is match for unaligned or IO access */
125 /* XXX: could done more in memory macro in a non portable way */
126 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
127 redo:
128 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
129 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
130 if (tlb_addr & ~TARGET_PAGE_MASK) {
131 /* IO access */
132 if ((addr & (DATA_SIZE - 1)) != 0)
133 goto do_unaligned_access;
134 retaddr = GETPC();
135 ioaddr = env->iotlb[mmu_idx][index];
136 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
137 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
138 /* slow unaligned access (it spans two pages or IO) */
139 do_unaligned_access:
140 retaddr = GETPC();
141#ifdef ALIGNED_ONLY
142 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
143#endif
144 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
145 mmu_idx, retaddr);
146 } else {
147 /* unaligned/aligned access in the same page */
148#ifdef ALIGNED_ONLY
149 if ((addr & (DATA_SIZE - 1)) != 0) {
150 retaddr = GETPC();
151 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
152 }
153#endif
154 addend = env->tlb_table[mmu_idx][index].addend;
155 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
156 }
157 } else {
158 /* the page is not in the TLB : fill it */
159 retaddr = GETPC();
160#ifdef ALIGNED_ONLY
161 if ((addr & (DATA_SIZE - 1)) != 0)
162 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
163#endif
164 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
165 goto redo;
166 }
167 return res;
168}
169
170/* handle all unaligned cases */
171static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
172 int mmu_idx,
173 void *retaddr)
174{
175 DATA_TYPE res, res1, res2;
176 int index, shift;
177 target_phys_addr_t ioaddr;
178 unsigned long addend;
179 target_ulong tlb_addr, addr1, addr2;
180
181 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
182 redo:
183 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
184 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
185 if (tlb_addr & ~TARGET_PAGE_MASK) {
186 /* IO access */
187 if ((addr & (DATA_SIZE - 1)) != 0)
188 goto do_unaligned_access;
189 ioaddr = env->iotlb[mmu_idx][index];
190 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
191 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
192 do_unaligned_access:
193 /* slow unaligned access (it spans two pages) */
194 addr1 = addr & ~(DATA_SIZE - 1);
195 addr2 = addr1 + DATA_SIZE;
196 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
197 mmu_idx, retaddr);
198 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
199 mmu_idx, retaddr);
200 shift = (addr & (DATA_SIZE - 1)) * 8;
201#ifdef TARGET_WORDS_BIGENDIAN
202 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
203#else
204 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
205#endif
206 res = (DATA_TYPE)res;
207 } else {
208 /* unaligned/aligned access in the same page */
209 addend = env->tlb_table[mmu_idx][index].addend;
210 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
211 }
212 } else {
213 /* the page is not in the TLB : fill it */
214 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
215 goto redo;
216 }
217 return res;
218}
219
220#ifndef SOFTMMU_CODE_ACCESS
221
222static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
223 DATA_TYPE val,
224 int mmu_idx,
225 void *retaddr);
226
227static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
228 DATA_TYPE val,
229 target_ulong addr,
230 void *retaddr)
231{
232 int index;
233 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
234 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
235 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
236 && !can_do_io(env)) {
237 cpu_io_recompile(env, retaddr);
238 }
239
240 env->mem_io_vaddr = addr;
241 env->mem_io_pc = (unsigned long)retaddr;
242#if SHIFT <= 2
243 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
244#else
245#ifdef TARGET_WORDS_BIGENDIAN
246 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
247 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
248#else
249 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
250 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
251#endif
252#endif /* SHIFT > 2 */
253}
254
255void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
256 DATA_TYPE val,
257 int mmu_idx)
258{
259 target_phys_addr_t ioaddr;
260 unsigned long addend;
261 target_ulong tlb_addr;
262 void *retaddr;
263 int index;
264
265 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
266 redo:
267 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
268 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
269 if (tlb_addr & ~TARGET_PAGE_MASK) {
270 /* IO access */
271 if ((addr & (DATA_SIZE - 1)) != 0)
272 goto do_unaligned_access;
273 retaddr = GETPC();
274 ioaddr = env->iotlb[mmu_idx][index];
275 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
276 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
277 do_unaligned_access:
278 retaddr = GETPC();
279#ifdef ALIGNED_ONLY
280 do_unaligned_access(addr, 1, mmu_idx, retaddr);
281#endif
282 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
283 mmu_idx, retaddr);
284 } else {
285 /* aligned/unaligned access in the same page */
286#ifdef ALIGNED_ONLY
287 if ((addr & (DATA_SIZE - 1)) != 0) {
288 retaddr = GETPC();
289 do_unaligned_access(addr, 1, mmu_idx, retaddr);
290 }
291#endif
292 addend = env->tlb_table[mmu_idx][index].addend;
293 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
294 }
295 } else {
296 /* the page is not in the TLB : fill it */
297 retaddr = GETPC();
298#ifdef ALIGNED_ONLY
299 if ((addr & (DATA_SIZE - 1)) != 0)
300 do_unaligned_access(addr, 1, mmu_idx, retaddr);
301#endif
302 tlb_fill(addr, 1, mmu_idx, retaddr);
303 goto redo;
304 }
305}
306
307/* handles all unaligned cases */
308static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
309 DATA_TYPE val,
310 int mmu_idx,
311 void *retaddr)
312{
313 target_phys_addr_t ioaddr;
314 unsigned long addend;
315 target_ulong tlb_addr;
316 int index, i;
317
318 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
319 redo:
320 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
321 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
322 if (tlb_addr & ~TARGET_PAGE_MASK) {
323 /* IO access */
324 if ((addr & (DATA_SIZE - 1)) != 0)
325 goto do_unaligned_access;
326 ioaddr = env->iotlb[mmu_idx][index];
327 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
328 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
329 do_unaligned_access:
330 /* XXX: not efficient, but simple */
331 /* Note: relies on the fact that tlb_fill() does not remove the
332 * previous page from the TLB cache. */
333 for(i = DATA_SIZE - 1; i >= 0; i--) {
334#ifdef TARGET_WORDS_BIGENDIAN
335 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
336 mmu_idx, retaddr);
337#else
338 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
339 mmu_idx, retaddr);
340#endif
341 }
342 } else {
343 /* aligned/unaligned access in the same page */
344 addend = env->tlb_table[mmu_idx][index].addend;
345 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
346 }
347 } else {
348 /* the page is not in the TLB : fill it */
349 tlb_fill(addr, 1, mmu_idx, retaddr);
350 goto redo;
351 }
352}
353
354#endif /* !defined(SOFTMMU_CODE_ACCESS) */
355
356#ifdef VBOX
357# undef DATA_TYPE_PROMOTED
358#endif
359#undef READ_ACCESS_TYPE
360#undef SHIFT
361#undef DATA_TYPE
362#undef SUFFIX
363#undef USUFFIX
364#undef DATA_SIZE
365#undef ADDR_READ
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette