VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper2.c@ 894

Last change on this file since 894 was 1, checked in by vboxsync, 55 years ago

import

  • Property svn:eol-style set to native
File size: 27.6 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdarg.h>
21#include <stdlib.h>
22#include <stdio.h>
23#include <string.h>
24#include <inttypes.h>
25#include <signal.h>
26#include <assert.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30
31//#define DEBUG_MMU
32
33#ifdef USE_CODE_COPY
34#include <asm/ldt.h>
35#include <linux/unistd.h>
36#include <linux/version.h>
37
38_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39
40#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41#define modify_ldt_ldt_s user_desc
42#endif
43#endif /* USE_CODE_COPY */
44
45#ifdef VBOX
46CPUX86State *cpu_x86_init(CPUX86State *env)
47{
48#else /* !VBOX */
49CPUX86State *cpu_x86_init(void)
50{
51 CPUX86State *env;
52#endif /* !VBOX */
53 static int inited;
54
55 cpu_exec_init();
56
57#ifndef VBOX
58 env = malloc(sizeof(CPUX86State));
59 if (!env)
60 return NULL;
61 memset(env, 0, sizeof(CPUX86State));
62#endif /* !VBOX */
63 /* init various static tables */
64 if (!inited) {
65 inited = 1;
66 optimize_flags_init();
67 }
68#ifdef USE_CODE_COPY
69 /* testing code for code copy case */
70 {
71 struct modify_ldt_ldt_s ldt;
72
73 ldt.entry_number = 1;
74 ldt.base_addr = (unsigned long)env;
75 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
76 ldt.seg_32bit = 1;
77 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
78 ldt.read_exec_only = 0;
79 ldt.limit_in_pages = 1;
80 ldt.seg_not_present = 0;
81 ldt.useable = 1;
82 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
83
84 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
85 }
86#endif
87#ifndef VBOX /* cpuid_features is initialized by caller */
88 {
89 int family, model, stepping;
90#ifdef TARGET_X86_64
91 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
92 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
93 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
94 family = 6;
95 model = 2;
96 stepping = 3;
97#else
98 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
99 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
100 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
101#if 0
102 /* pentium 75-200 */
103 family = 5;
104 model = 2;
105 stepping = 11;
106#else
107 /* pentium pro */
108 family = 6;
109 model = 3;
110 stepping = 3;
111#endif
112#endif
113 env->cpuid_version = (family << 8) | (model << 4) | stepping;
114 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
115 CPUID_TSC | CPUID_MSR | CPUID_MCE |
116 CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
117 env->cpuid_ext_features = 0;
118
119 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
120#ifdef TARGET_X86_64
121 /* currently not enabled for std i386 because not fully tested */
122 env->cpuid_features |= CPUID_APIC;
123#endif
124 }
125#endif /* VBOX */
126 cpu_single_env = env;
127 cpu_reset(env);
128 return env;
129}
130
131/* NOTE: must be called outside the CPU execute loop */
132void cpu_reset(CPUX86State *env)
133{
134 int i;
135
136 memset(env, 0, offsetof(CPUX86State, breakpoints));
137
138 tlb_flush(env, 1);
139
140 /* init to reset state */
141
142#ifdef CONFIG_SOFTMMU
143 env->hflags |= HF_SOFTMMU_MASK;
144#endif
145
146 cpu_x86_update_cr0(env, 0x60000010);
147 env->a20_mask = 0xffffffff;
148
149 env->idt.limit = 0xffff;
150 env->gdt.limit = 0xffff;
151 env->ldt.limit = 0xffff;
152 env->ldt.flags = DESC_P_MASK;
153 env->tr.limit = 0xffff;
154 env->tr.flags = DESC_P_MASK;
155
156 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
157 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
158 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
159 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
160 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
161 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
162
163 env->eip = 0xfff0;
164 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
165
166 env->eflags = 0x2;
167
168 /* FPU init */
169 for(i = 0;i < 8; i++)
170 env->fptags[i] = 1;
171 env->fpuc = 0x37f;
172
173 env->mxcsr = 0x1f80;
174}
175
176#ifndef VBOX
177void cpu_x86_close(CPUX86State *env)
178{
179 free(env);
180}
181#endif
182
183/***********************************************************/
184/* x86 debug */
185
186static const char *cc_op_str[] = {
187 "DYNAMIC",
188 "EFLAGS",
189
190 "MULB",
191 "MULW",
192 "MULL",
193 "MULQ",
194
195 "ADDB",
196 "ADDW",
197 "ADDL",
198 "ADDQ",
199
200 "ADCB",
201 "ADCW",
202 "ADCL",
203 "ADCQ",
204
205 "SUBB",
206 "SUBW",
207 "SUBL",
208 "SUBQ",
209
210 "SBBB",
211 "SBBW",
212 "SBBL",
213 "SBBQ",
214
215 "LOGICB",
216 "LOGICW",
217 "LOGICL",
218 "LOGICQ",
219
220 "INCB",
221 "INCW",
222 "INCL",
223 "INCQ",
224
225 "DECB",
226 "DECW",
227 "DECL",
228 "DECQ",
229
230 "SHLB",
231 "SHLW",
232 "SHLL",
233 "SHLQ",
234
235 "SARB",
236 "SARW",
237 "SARL",
238 "SARQ",
239};
240
241void cpu_dump_state(CPUState *env, FILE *f,
242 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
243 int flags)
244{
245 int eflags, i;
246 char cc_op_name[32];
247 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
248
249 eflags = env->eflags;
250#ifdef TARGET_X86_64
251 if (env->hflags & HF_CS64_MASK) {
252 cpu_fprintf(f,
253 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
254 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
255 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
256 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
257 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
258 env->regs[R_EAX],
259 env->regs[R_EBX],
260 env->regs[R_ECX],
261 env->regs[R_EDX],
262 env->regs[R_ESI],
263 env->regs[R_EDI],
264 env->regs[R_EBP],
265 env->regs[R_ESP],
266 env->regs[8],
267 env->regs[9],
268 env->regs[10],
269 env->regs[11],
270 env->regs[12],
271 env->regs[13],
272 env->regs[14],
273 env->regs[15],
274 env->eip, eflags,
275 eflags & DF_MASK ? 'D' : '-',
276 eflags & CC_O ? 'O' : '-',
277 eflags & CC_S ? 'S' : '-',
278 eflags & CC_Z ? 'Z' : '-',
279 eflags & CC_A ? 'A' : '-',
280 eflags & CC_P ? 'P' : '-',
281 eflags & CC_C ? 'C' : '-',
282 env->hflags & HF_CPL_MASK,
283 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
284 (env->a20_mask >> 20) & 1);
285 } else
286#endif
287 {
288 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
289 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
290 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
291 (uint32_t)env->regs[R_EAX],
292 (uint32_t)env->regs[R_EBX],
293 (uint32_t)env->regs[R_ECX],
294 (uint32_t)env->regs[R_EDX],
295 (uint32_t)env->regs[R_ESI],
296 (uint32_t)env->regs[R_EDI],
297 (uint32_t)env->regs[R_EBP],
298 (uint32_t)env->regs[R_ESP],
299 (uint32_t)env->eip, eflags,
300 eflags & DF_MASK ? 'D' : '-',
301 eflags & CC_O ? 'O' : '-',
302 eflags & CC_S ? 'S' : '-',
303 eflags & CC_Z ? 'Z' : '-',
304 eflags & CC_A ? 'A' : '-',
305 eflags & CC_P ? 'P' : '-',
306 eflags & CC_C ? 'C' : '-',
307 env->hflags & HF_CPL_MASK,
308 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
309 (env->a20_mask >> 20) & 1);
310 }
311
312#ifdef TARGET_X86_64
313 if (env->hflags & HF_LMA_MASK) {
314 for(i = 0; i < 6; i++) {
315 SegmentCache *sc = &env->segs[i];
316 cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
317 seg_name[i],
318 sc->selector,
319 sc->base,
320 sc->limit,
321 sc->flags);
322 }
323 cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
324 env->ldt.selector,
325 env->ldt.base,
326 env->ldt.limit,
327 env->ldt.flags);
328 cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
329 env->tr.selector,
330 env->tr.base,
331 env->tr.limit,
332 env->tr.flags);
333 cpu_fprintf(f, "GDT= %016llx %08x\n",
334 env->gdt.base, env->gdt.limit);
335 cpu_fprintf(f, "IDT= %016llx %08x\n",
336 env->idt.base, env->idt.limit);
337 cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
338 (uint32_t)env->cr[0],
339 env->cr[2],
340 env->cr[3],
341 (uint32_t)env->cr[4]);
342 } else
343#endif
344 {
345 for(i = 0; i < 6; i++) {
346 SegmentCache *sc = &env->segs[i];
347 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
348 seg_name[i],
349 sc->selector,
350 (uint32_t)sc->base,
351 sc->limit,
352 sc->flags);
353 }
354 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
355 env->ldt.selector,
356 (uint32_t)env->ldt.base,
357 env->ldt.limit,
358 env->ldt.flags);
359 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
360 env->tr.selector,
361 (uint32_t)env->tr.base,
362 env->tr.limit,
363 env->tr.flags);
364 cpu_fprintf(f, "GDT= %08x %08x\n",
365 (uint32_t)env->gdt.base, env->gdt.limit);
366 cpu_fprintf(f, "IDT= %08x %08x\n",
367 (uint32_t)env->idt.base, env->idt.limit);
368 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
369 (uint32_t)env->cr[0],
370 (uint32_t)env->cr[2],
371 (uint32_t)env->cr[3],
372 (uint32_t)env->cr[4]);
373 }
374 if (flags & X86_DUMP_CCOP) {
375 if ((unsigned)env->cc_op < CC_OP_NB)
376 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
377 else
378 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
379#ifdef TARGET_X86_64
380 if (env->hflags & HF_CS64_MASK) {
381 cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
382 env->cc_src, env->cc_dst,
383 cc_op_name);
384 } else
385#endif
386 {
387 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
388 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
389 cc_op_name);
390 }
391 }
392 if (flags & X86_DUMP_FPU) {
393 cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
394 (double)env->fpregs[0].d,
395 (double)env->fpregs[1].d,
396 (double)env->fpregs[2].d,
397 (double)env->fpregs[3].d);
398 cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
399 (double)env->fpregs[4].d,
400 (double)env->fpregs[5].d,
401 (double)env->fpregs[7].d,
402 (double)env->fpregs[8].d);
403 }
404}
405
406/***********************************************************/
407/* x86 mmu */
408/* XXX: add PGE support */
409
410void cpu_x86_set_a20(CPUX86State *env, int a20_state)
411{
412 a20_state = (a20_state != 0);
413 if (a20_state != ((env->a20_mask >> 20) & 1)) {
414#if defined(DEBUG_MMU)
415 printf("A20 update: a20=%d\n", a20_state);
416#endif
417 /* if the cpu is currently executing code, we must unlink it and
418 all the potentially executing TB */
419 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
420
421 /* when a20 is changed, all the MMU mappings are invalid, so
422 we must flush everything */
423 tlb_flush(env, 1);
424 env->a20_mask = 0xffefffff | (a20_state << 20);
425 }
426}
427
428void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
429{
430 int pe_state;
431
432#if defined(DEBUG_MMU)
433 printf("CR0 update: CR0=0x%08x\n", new_cr0);
434#endif
435 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
436 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
437 tlb_flush(env, 1);
438 }
439
440#ifdef TARGET_X86_64
441 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
442 (env->efer & MSR_EFER_LME)) {
443 /* enter in long mode */
444 /* XXX: generate an exception */
445 if (!(env->cr[4] & CR4_PAE_MASK))
446 return;
447 env->efer |= MSR_EFER_LMA;
448 env->hflags |= HF_LMA_MASK;
449 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
450 (env->efer & MSR_EFER_LMA)) {
451 /* exit long mode */
452 env->efer &= ~MSR_EFER_LMA;
453 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
454 env->eip &= 0xffffffff;
455 }
456#endif
457 env->cr[0] = new_cr0 | CR0_ET_MASK;
458
459 /* update PE flag in hidden flags */
460 pe_state = (env->cr[0] & CR0_PE_MASK);
461 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
462 /* ensure that ADDSEG is always set in real mode */
463 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
464 /* update FPU flags */
465 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
466 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
467#ifdef VBOX
468 remR3ChangeCpuMode(env);
469#endif
470}
471
472/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
473 the PDPT */
474void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
475{
476 env->cr[3] = new_cr3;
477 if (env->cr[0] & CR0_PG_MASK) {
478#if defined(DEBUG_MMU)
479 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
480#endif
481 tlb_flush(env, 0);
482 }
483}
484
485void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
486{
487#if defined(DEBUG_MMU)
488 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
489#endif
490 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
491 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
492 tlb_flush(env, 1);
493 }
494 /* SSE handling */
495 if (!(env->cpuid_features & CPUID_SSE))
496 new_cr4 &= ~CR4_OSFXSR_MASK;
497 if (new_cr4 & CR4_OSFXSR_MASK)
498 env->hflags |= HF_OSFXSR_MASK;
499 else
500 env->hflags &= ~HF_OSFXSR_MASK;
501
502 env->cr[4] = new_cr4;
503#ifdef VBOX
504 remR3ChangeCpuMode(env);
505#endif
506}
507
508/* XXX: also flush 4MB pages */
509void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
510{
511#if defined(DEBUG) && defined(VBOX)
512 uint32_t pde;
513 uint8_t *pde_ptr;
514
515 /* page directory entry */
516 pde_ptr = remR3GCPhys2HCVirt(env, (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask));
517 pde = ldl_raw(pde_ptr);
518 /* if PSE bit is set, then we use a 4MB page */
519 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
520 printf("cpu_x86_flush_tlb: 4 MB page!!!!!\n");
521 }
522#endif
523 tlb_flush_page(env, addr);
524}
525
526#if defined(CONFIG_USER_ONLY)
527
528int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
529 int is_write, int is_user, int is_softmmu)
530{
531 /* user mode only emulation */
532 is_write &= 1;
533 env->cr[2] = addr;
534 env->error_code = (is_write << PG_ERROR_W_BIT);
535 env->error_code |= PG_ERROR_U_MASK;
536 return 1;
537}
538
539target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
540{
541 return addr;
542}
543
544#else
545
546/* return value:
547 -1 = cannot handle fault
548 0 = nothing more to do
549 1 = generate PF fault
550 2 = soft MMU activation required for this block
551*/
552int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
553 int is_write, int is_user, int is_softmmu)
554{
555 uint32_t pdpe_addr, pde_addr, pte_addr;
556 uint32_t pde, pte, ptep, pdpe;
557 int error_code, is_dirty, prot, page_size, ret;
558 unsigned long paddr, page_offset;
559 target_ulong vaddr, virt_addr;
560
561#if defined(DEBUG_MMU)
562 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
563 addr, is_write, is_user, env->eip);
564#endif
565 is_write &= 1;
566
567 if (!(env->cr[0] & CR0_PG_MASK)) {
568 pte = addr;
569 virt_addr = addr & TARGET_PAGE_MASK;
570 prot = PAGE_READ | PAGE_WRITE;
571 page_size = 4096;
572 goto do_mapping;
573 }
574
575 if (env->cr[4] & CR4_PAE_MASK) {
576 /* XXX: we only use 32 bit physical addresses */
577#ifdef TARGET_X86_64
578 if (env->hflags & HF_LMA_MASK) {
579 uint32_t pml4e_addr, pml4e;
580 int32_t sext;
581
582 /* XXX: handle user + rw rights */
583 /* XXX: handle NX flag */
584 /* test virtual address sign extension */
585 sext = (int64_t)addr >> 47;
586 if (sext != 0 && sext != -1) {
587 error_code = 0;
588 goto do_fault;
589 }
590
591 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
592 env->a20_mask;
593 pml4e = ldl_phys(pml4e_addr);
594 if (!(pml4e & PG_PRESENT_MASK)) {
595 error_code = 0;
596 goto do_fault;
597 }
598 if (!(pml4e & PG_ACCESSED_MASK)) {
599 pml4e |= PG_ACCESSED_MASK;
600 stl_phys_notdirty(pml4e_addr, pml4e);
601 }
602
603 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
604 env->a20_mask;
605 pdpe = ldl_phys(pdpe_addr);
606 if (!(pdpe & PG_PRESENT_MASK)) {
607 error_code = 0;
608 goto do_fault;
609 }
610 if (!(pdpe & PG_ACCESSED_MASK)) {
611 pdpe |= PG_ACCESSED_MASK;
612 stl_phys_notdirty(pdpe_addr, pdpe);
613 }
614 } else
615#endif
616 {
617 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
618 env->a20_mask;
619 pdpe = ldl_phys(pdpe_addr);
620 if (!(pdpe & PG_PRESENT_MASK)) {
621 error_code = 0;
622 goto do_fault;
623 }
624 }
625
626 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
627 env->a20_mask;
628 pde = ldl_phys(pde_addr);
629 if (!(pde & PG_PRESENT_MASK)) {
630 error_code = 0;
631 goto do_fault;
632 }
633 if (pde & PG_PSE_MASK) {
634 /* 2 MB page */
635 page_size = 2048 * 1024;
636 goto handle_big_page;
637 } else {
638 /* 4 KB page */
639 if (!(pde & PG_ACCESSED_MASK)) {
640 pde |= PG_ACCESSED_MASK;
641 stl_phys_notdirty(pde_addr, pde);
642 }
643 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
644 env->a20_mask;
645 goto handle_4k_page;
646 }
647 } else {
648 /* page directory entry */
649 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
650 env->a20_mask;
651 pde = ldl_phys(pde_addr);
652 if (!(pde & PG_PRESENT_MASK)) {
653 error_code = 0;
654 goto do_fault;
655 }
656 /* if PSE bit is set, then we use a 4MB page */
657 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
658 page_size = 4096 * 1024;
659 handle_big_page:
660 if (is_user) {
661 if (!(pde & PG_USER_MASK))
662 goto do_fault_protect;
663 if (is_write && !(pde & PG_RW_MASK))
664 goto do_fault_protect;
665 } else {
666 if ((env->cr[0] & CR0_WP_MASK) &&
667 is_write && !(pde & PG_RW_MASK))
668 goto do_fault_protect;
669 }
670 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
671 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
672 pde |= PG_ACCESSED_MASK;
673 if (is_dirty)
674 pde |= PG_DIRTY_MASK;
675 stl_phys_notdirty(pde_addr, pde);
676 }
677
678 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
679 ptep = pte;
680 virt_addr = addr & ~(page_size - 1);
681 } else {
682 if (!(pde & PG_ACCESSED_MASK)) {
683 pde |= PG_ACCESSED_MASK;
684 stl_phys_notdirty(pde_addr, pde);
685 }
686
687 /* page directory entry */
688 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
689 env->a20_mask;
690 handle_4k_page:
691 pte = ldl_phys(pte_addr);
692 if (!(pte & PG_PRESENT_MASK)) {
693 error_code = 0;
694 goto do_fault;
695 }
696 /* combine pde and pte user and rw protections */
697 ptep = pte & pde;
698 if (is_user) {
699 if (!(ptep & PG_USER_MASK))
700 goto do_fault_protect;
701 if (is_write && !(ptep & PG_RW_MASK))
702 goto do_fault_protect;
703 } else {
704 if ((env->cr[0] & CR0_WP_MASK) &&
705 is_write && !(ptep & PG_RW_MASK))
706 goto do_fault_protect;
707 }
708 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
709 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
710 pte |= PG_ACCESSED_MASK;
711 if (is_dirty)
712 pte |= PG_DIRTY_MASK;
713 stl_phys_notdirty(pte_addr, pte);
714 }
715 page_size = 4096;
716 virt_addr = addr & ~0xfff;
717 }
718
719 /* the page can be put in the TLB */
720 prot = PAGE_READ;
721 if (pte & PG_DIRTY_MASK) {
722 /* only set write access if already dirty... otherwise wait
723 for dirty access */
724 if (is_user) {
725 if (ptep & PG_RW_MASK)
726 prot |= PAGE_WRITE;
727 } else {
728 if (!(env->cr[0] & CR0_WP_MASK) ||
729 (ptep & PG_RW_MASK))
730 prot |= PAGE_WRITE;
731 }
732 }
733 }
734 do_mapping:
735 pte = pte & env->a20_mask;
736
737 /* Even if 4MB pages, we map only one 4KB page in the cache to
738 avoid filling it too fast */
739 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
740 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
741 vaddr = virt_addr + page_offset;
742
743 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
744 return ret;
745 do_fault_protect:
746 error_code = PG_ERROR_P_MASK;
747 do_fault:
748 env->cr[2] = addr;
749 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
750 if (is_user)
751 env->error_code |= PG_ERROR_U_MASK;
752 return 1;
753}
754
755target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
756{
757 uint32_t pde_addr, pte_addr;
758 uint32_t pde, pte, paddr, page_offset, page_size;
759
760 if (env->cr[4] & CR4_PAE_MASK) {
761 uint32_t pdpe_addr, pde_addr, pte_addr;
762 uint32_t pdpe;
763
764 /* XXX: we only use 32 bit physical addresses */
765#ifdef TARGET_X86_64
766 if (env->hflags & HF_LMA_MASK) {
767 uint32_t pml4e_addr, pml4e;
768 int32_t sext;
769
770 /* test virtual address sign extension */
771 sext = (int64_t)addr >> 47;
772 if (sext != 0 && sext != -1)
773 return -1;
774
775 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
776 env->a20_mask;
777 pml4e = ldl_phys(pml4e_addr);
778 if (!(pml4e & PG_PRESENT_MASK))
779 return -1;
780
781 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
782 env->a20_mask;
783 pdpe = ldl_phys(pdpe_addr);
784 if (!(pdpe & PG_PRESENT_MASK))
785 return -1;
786 } else
787#endif
788 {
789 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
790 env->a20_mask;
791 pdpe = ldl_phys(pdpe_addr);
792 if (!(pdpe & PG_PRESENT_MASK))
793 return -1;
794 }
795
796 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
797 env->a20_mask;
798 pde = ldl_phys(pde_addr);
799 if (!(pde & PG_PRESENT_MASK)) {
800 return -1;
801 }
802 if (pde & PG_PSE_MASK) {
803 /* 2 MB page */
804 page_size = 2048 * 1024;
805 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
806 } else {
807 /* 4 KB page */
808 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
809 env->a20_mask;
810 page_size = 4096;
811 pte = ldl_phys(pte_addr);
812 }
813 } else {
814 if (!(env->cr[0] & CR0_PG_MASK)) {
815 pte = addr;
816 page_size = 4096;
817 } else {
818 /* page directory entry */
819 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
820 pde = ldl_phys(pde_addr);
821 if (!(pde & PG_PRESENT_MASK))
822 return -1;
823 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
824 pte = pde & ~0x003ff000; /* align to 4MB */
825 page_size = 4096 * 1024;
826 } else {
827 /* page directory entry */
828 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
829 pte = ldl_phys(pte_addr);
830 if (!(pte & PG_PRESENT_MASK))
831 return -1;
832 page_size = 4096;
833 }
834 }
835 pte = pte & env->a20_mask;
836 }
837
838 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
839 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
840 return paddr;
841}
842#endif /* !CONFIG_USER_ONLY */
843
844#if defined(USE_CODE_COPY)
845struct fpstate {
846 uint16_t fpuc;
847 uint16_t dummy1;
848 uint16_t fpus;
849 uint16_t dummy2;
850 uint16_t fptag;
851 uint16_t dummy3;
852
853 uint32_t fpip;
854 uint32_t fpcs;
855 uint32_t fpoo;
856 uint32_t fpos;
857 uint8_t fpregs1[8 * 10];
858};
859
860void restore_native_fp_state(CPUState *env)
861{
862 int fptag, i, j;
863 struct fpstate fp1, *fp = &fp1;
864
865 fp->fpuc = env->fpuc;
866 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
867 fptag = 0;
868 for (i=7; i>=0; i--) {
869 fptag <<= 2;
870 if (env->fptags[i]) {
871 fptag |= 3;
872 } else {
873 /* the FPU automatically computes it */
874 }
875 }
876 fp->fptag = fptag;
877 j = env->fpstt;
878 for(i = 0;i < 8; i++) {
879 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
880 j = (j + 1) & 7;
881 }
882 asm volatile ("frstor %0" : "=m" (*fp));
883 env->native_fp_regs = 1;
884}
885
886void save_native_fp_state(CPUState *env)
887{
888 int fptag, i, j;
889 uint16_t fpuc;
890 struct fpstate fp1, *fp = &fp1;
891
892 asm volatile ("fsave %0" : : "m" (*fp));
893 env->fpuc = fp->fpuc;
894 env->fpstt = (fp->fpus >> 11) & 7;
895 env->fpus = fp->fpus & ~0x3800;
896 fptag = fp->fptag;
897 for(i = 0;i < 8; i++) {
898 env->fptags[i] = ((fptag & 3) == 3);
899 fptag >>= 2;
900 }
901 j = env->fpstt;
902 for(i = 0;i < 8; i++) {
903 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
904 j = (j + 1) & 7;
905 }
906 /* we must restore the default rounding state */
907 /* XXX: we do not restore the exception state */
908 fpuc = 0x037f | (env->fpuc & (3 << 10));
909 asm volatile("fldcw %0" : : "m" (fpuc));
910 env->native_fp_regs = 0;
911}
912#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette