VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 34341

Last change on this file since 34341 was 33656, checked in by vboxsync, 14 years ago

*: rebrand Sun (L)GPL disclaimers

  • Property svn:eol-style set to native
File size: 13.6 KB
Line 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define DATA_SIZE (1 << SHIFT)
31
32#if DATA_SIZE == 8
33#define SUFFIX q
34#define USUFFIX q
35#define DATA_TYPE uint64_t
36#define DATA_TYPE_PROMOTED uint64_t
37#elif DATA_SIZE == 4
38#define SUFFIX l
39#define USUFFIX l
40#define DATA_TYPE uint32_t
41#ifdef VBOX
42#define DATA_TYPE_PROMOTED RTCCUINTREG
43#endif
44#elif DATA_SIZE == 2
45#define SUFFIX w
46#define USUFFIX uw
47#define DATA_TYPE uint16_t
48#ifdef VBOX
49#define DATA_TYPE_PROMOTED RTCCUINTREG
50#endif
51#elif DATA_SIZE == 1
52#define SUFFIX b
53#define USUFFIX ub
54#define DATA_TYPE uint8_t
55#ifdef VBOX
56#define DATA_TYPE_PROMOTED RTCCUINTREG
57#endif
58#else
59#error unsupported data size
60#endif
61
62#ifdef SOFTMMU_CODE_ACCESS
63#define READ_ACCESS_TYPE 2
64#define ADDR_READ addr_code
65#else
66#define READ_ACCESS_TYPE 0
67#define ADDR_READ addr_read
68#endif
69
70static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
71 int mmu_idx,
72 void *retaddr);
73#ifndef VBOX
74static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
75 target_ulong addr,
76 void *retaddr)
77#else
78DECLINLINE(DATA_TYPE) glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
79 target_ulong addr,
80 void *retaddr)
81#endif
82{
83 DATA_TYPE res;
84 int index;
85 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
86 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
87 env->mem_io_pc = (unsigned long)retaddr;
88 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
89 && !can_do_io(env)) {
90 cpu_io_recompile(env, retaddr);
91 }
92
93#if SHIFT <= 2
94 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
95#else
96#ifdef TARGET_WORDS_BIGENDIAN
97 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
98 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
99#else
100 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
101 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
102#endif
103#endif /* SHIFT > 2 */
104#ifdef USE_KQEMU
105 env->last_io_time = cpu_get_time_fast();
106#endif
107 return res;
108}
109
110/* handle all cases except unaligned access which span two pages */
111#ifndef VBOX
112DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
113 int mmu_idx)
114#else
115/* Load helpers invoked from generated code, and TCG makes an assumption
116 that valid value takes the whole register, why gcc after 4.3 may
117 use only lower part of register for smaller types. So force promotion. */
118DATA_TYPE_PROMOTED REGPARM
119glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
120 int mmu_idx)
121#endif
122{
123 DATA_TYPE res;
124 int index;
125 target_ulong tlb_addr;
126 target_phys_addr_t addend;
127 void *retaddr;
128
129 /* test if there is match for unaligned or IO access */
130 /* XXX: could done more in memory macro in a non portable way */
131 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
132 redo:
133 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
134 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
135 if (tlb_addr & ~TARGET_PAGE_MASK) {
136 /* IO access */
137 if ((addr & (DATA_SIZE - 1)) != 0)
138 goto do_unaligned_access;
139 retaddr = GETPC();
140 addend = env->iotlb[mmu_idx][index];
141 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
142 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
143 /* slow unaligned access (it spans two pages or IO) */
144 do_unaligned_access:
145 retaddr = GETPC();
146#ifdef ALIGNED_ONLY
147 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
148#endif
149 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
150 mmu_idx, retaddr);
151 } else {
152 /* unaligned/aligned access in the same page */
153#ifdef ALIGNED_ONLY
154 if ((addr & (DATA_SIZE - 1)) != 0) {
155 retaddr = GETPC();
156 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
157 }
158#endif
159 addend = env->tlb_table[mmu_idx][index].addend;
160 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
161 }
162 } else {
163 /* the page is not in the TLB : fill it */
164 retaddr = GETPC();
165#ifdef ALIGNED_ONLY
166 if ((addr & (DATA_SIZE - 1)) != 0)
167 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
168#endif
169 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
170 goto redo;
171 }
172 return res;
173}
174
175/* handle all unaligned cases */
176static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
177 int mmu_idx,
178 void *retaddr)
179{
180 DATA_TYPE res, res1, res2;
181 int index, shift;
182 target_phys_addr_t addend;
183 target_ulong tlb_addr, addr1, addr2;
184
185 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
186 redo:
187 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
188 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
189 if (tlb_addr & ~TARGET_PAGE_MASK) {
190 /* IO access */
191 if ((addr & (DATA_SIZE - 1)) != 0)
192 goto do_unaligned_access;
193 retaddr = GETPC();
194 addend = env->iotlb[mmu_idx][index];
195 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
196 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
197 do_unaligned_access:
198 /* slow unaligned access (it spans two pages) */
199 addr1 = addr & ~(DATA_SIZE - 1);
200 addr2 = addr1 + DATA_SIZE;
201 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
202 mmu_idx, retaddr);
203 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
204 mmu_idx, retaddr);
205 shift = (addr & (DATA_SIZE - 1)) * 8;
206#ifdef TARGET_WORDS_BIGENDIAN
207 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
208#else
209 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
210#endif
211 res = (DATA_TYPE)res;
212 } else {
213 /* unaligned/aligned access in the same page */
214 addend = env->tlb_table[mmu_idx][index].addend;
215 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
216 }
217 } else {
218 /* the page is not in the TLB : fill it */
219 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
220 goto redo;
221 }
222 return res;
223}
224
225#ifndef SOFTMMU_CODE_ACCESS
226
227static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
228 DATA_TYPE val,
229 int mmu_idx,
230 void *retaddr);
231
232#ifndef VBOX
233static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
234 DATA_TYPE val,
235 target_ulong addr,
236 void *retaddr)
237#else
238DECLINLINE(void) glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
239 DATA_TYPE val,
240 target_ulong addr,
241 void *retaddr)
242#endif
243{
244 int index;
245 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
246 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
247 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
248 && !can_do_io(env)) {
249 cpu_io_recompile(env, retaddr);
250 }
251
252 env->mem_io_vaddr = addr;
253 env->mem_io_pc = (unsigned long)retaddr;
254#if SHIFT <= 2
255 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
256#else
257#ifdef TARGET_WORDS_BIGENDIAN
258 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
259 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
260#else
261 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
262 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
263#endif
264#endif /* SHIFT > 2 */
265#ifdef USE_KQEMU
266 env->last_io_time = cpu_get_time_fast();
267#endif
268}
269
270void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
271 DATA_TYPE val,
272 int mmu_idx)
273{
274 target_phys_addr_t addend;
275 target_ulong tlb_addr;
276 void *retaddr;
277 int index;
278
279 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
280 redo:
281 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
282 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
283 if (tlb_addr & ~TARGET_PAGE_MASK) {
284 /* IO access */
285 if ((addr & (DATA_SIZE - 1)) != 0)
286 goto do_unaligned_access;
287 retaddr = GETPC();
288 addend = env->iotlb[mmu_idx][index];
289 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
290 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
291 do_unaligned_access:
292 retaddr = GETPC();
293#ifdef ALIGNED_ONLY
294 do_unaligned_access(addr, 1, mmu_idx, retaddr);
295#endif
296 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
297 mmu_idx, retaddr);
298 } else {
299 /* aligned/unaligned access in the same page */
300#ifdef ALIGNED_ONLY
301 if ((addr & (DATA_SIZE - 1)) != 0) {
302 retaddr = GETPC();
303 do_unaligned_access(addr, 1, mmu_idx, retaddr);
304 }
305#endif
306 addend = env->tlb_table[mmu_idx][index].addend;
307 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
308 }
309 } else {
310 /* the page is not in the TLB : fill it */
311 retaddr = GETPC();
312#ifdef ALIGNED_ONLY
313 if ((addr & (DATA_SIZE - 1)) != 0)
314 do_unaligned_access(addr, 1, mmu_idx, retaddr);
315#endif
316 tlb_fill(addr, 1, mmu_idx, retaddr);
317 goto redo;
318 }
319}
320
321/* handles all unaligned cases */
322static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
323 DATA_TYPE val,
324 int mmu_idx,
325 void *retaddr)
326{
327 target_phys_addr_t addend;
328 target_ulong tlb_addr;
329 int index, i;
330
331 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
332 redo:
333 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
334 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
335 if (tlb_addr & ~TARGET_PAGE_MASK) {
336 /* IO access */
337 if ((addr & (DATA_SIZE - 1)) != 0)
338 goto do_unaligned_access;
339 addend = env->iotlb[mmu_idx][index];
340 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
341 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
342 do_unaligned_access:
343 /* XXX: not efficient, but simple */
344 /* Note: relies on the fact that tlb_fill() does not remove the
345 * previous page from the TLB cache. */
346 for(i = DATA_SIZE - 1; i >= 0; i--) {
347#ifdef TARGET_WORDS_BIGENDIAN
348 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
349 mmu_idx, retaddr);
350#else
351 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
352 mmu_idx, retaddr);
353#endif
354 }
355 } else {
356 /* aligned/unaligned access in the same page */
357 addend = env->tlb_table[mmu_idx][index].addend;
358 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
359 }
360 } else {
361 /* the page is not in the TLB : fill it */
362 tlb_fill(addr, 1, mmu_idx, retaddr);
363 goto redo;
364 }
365}
366
367#endif /* !defined(SOFTMMU_CODE_ACCESS) */
368
369#ifdef VBOX
370#undef DATA_TYPE_PROMOTED
371#endif
372#undef READ_ACCESS_TYPE
373#undef SHIFT
374#undef DATA_TYPE
375#undef SUFFIX
376#undef USUFFIX
377#undef DATA_SIZE
378#undef ADDR_READ
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette