VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 36140

Last change on this file since 36140 was 36140, checked in by vboxsync, 14 years ago

rem: Re-synced to svn://svn.savannah.nongnu.org/qemu/trunk@5495 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • Property svn:eol-style set to native
File size: 43.9 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include <stdarg.h>
31#include <stdlib.h>
32#include <stdio.h>
33#include <string.h>
34#ifndef VBOX
35# include <inttypes.h>
36# include <signal.h>
37# include <assert.h>
38#endif
39
40#include "cpu.h"
41#include "exec-all.h"
42#include "svm.h"
43#include "qemu-common.h"
44
45//#define DEBUG_MMU
46
47static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
48
49#ifndef VBOX
50static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
51 uint32_t *ext_features,
52 uint32_t *ext2_features,
53 uint32_t *ext3_features)
54{
55 int i;
56 /* feature flags taken from "Intel Processor Identification and the CPUID
57 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
58 * about feature names, the Linux name is used. */
59 static const char *feature_name[] = {
60 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
61 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
62 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
63 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
64 };
65 static const char *ext_feature_name[] = {
66 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
67 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
68 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70 };
71 static const char *ext2_feature_name[] = {
72 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
73 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
74 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
75 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
76 };
77 static const char *ext3_feature_name[] = {
78 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
79 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
80 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
81 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
82 };
83
84 for ( i = 0 ; i < 32 ; i++ )
85 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
86 *features |= 1 << i;
87 return;
88 }
89 for ( i = 0 ; i < 32 ; i++ )
90 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
91 *ext_features |= 1 << i;
92 return;
93 }
94 for ( i = 0 ; i < 32 ; i++ )
95 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
96 *ext2_features |= 1 << i;
97 return;
98 }
99 for ( i = 0 ; i < 32 ; i++ )
100 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
101 *ext3_features |= 1 << i;
102 return;
103 }
104 fprintf(stderr, "CPU feature %s not found\n", flagname);
105}
106#endif /* !VBOX */
107
108#ifndef VBOX
109CPUX86State *cpu_x86_init(const char *cpu_model)
110#else
111CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
112#endif
113{
114#ifndef VBOX
115 CPUX86State *env;
116#endif
117 static int inited;
118
119#ifndef VBOX
120 env = qemu_mallocz(sizeof(CPUX86State));
121 if (!env)
122 return NULL;
123#endif
124 cpu_exec_init(env);
125 env->cpu_model_str = cpu_model;
126
127 /* init various static tables */
128 if (!inited) {
129 inited = 1;
130 optimize_flags_init();
131 }
132 if (cpu_x86_register(env, cpu_model) < 0) {
133 cpu_x86_close(env);
134 return NULL;
135 }
136 cpu_reset(env);
137#ifdef USE_KQEMU
138 kqemu_init(env);
139#endif
140 return env;
141}
142
143typedef struct x86_def_t {
144 const char *name;
145 uint32_t level;
146 uint32_t vendor1, vendor2, vendor3;
147 int family;
148 int model;
149 int stepping;
150 uint32_t features, ext_features, ext2_features, ext3_features;
151 uint32_t xlevel;
152 char model_id[48];
153} x86_def_t;
154
155#ifndef VBOX
156#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
157#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
158 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
159#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
160 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
161 CPUID_PSE36 | CPUID_FXSR)
162#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
163#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
164 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
165 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
166 CPUID_PAE | CPUID_SEP | CPUID_APIC)
167static x86_def_t x86_defs[] = {
168#ifdef TARGET_X86_64
169 {
170 .name = "qemu64",
171 .level = 2,
172 .vendor1 = CPUID_VENDOR_AMD_1,
173 .vendor2 = CPUID_VENDOR_AMD_2,
174 .vendor3 = CPUID_VENDOR_AMD_3,
175 .family = 6,
176 .model = 2,
177 .stepping = 3,
178 .features = PPRO_FEATURES |
179 /* these features are needed for Win64 and aren't fully implemented */
180 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
181 /* this feature is needed for Solaris and isn't fully implemented */
182 CPUID_PSE36,
183 .ext_features = CPUID_EXT_SSE3,
184 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
185 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
186 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
187 .ext3_features = CPUID_EXT3_SVM,
188 .xlevel = 0x8000000A,
189 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
190 },
191 {
192 .name = "core2duo",
193 .level = 10,
194 .family = 6,
195 .model = 15,
196 .stepping = 11,
197 /* The original CPU also implements these features:
198 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
199 CPUID_TM, CPUID_PBE */
200 .features = PPRO_FEATURES |
201 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
202 CPUID_PSE36,
203 /* The original CPU also implements these ext features:
204 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
205 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
206 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
207 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
208 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
209 .xlevel = 0x80000008,
210 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
211 },
212#endif
213 {
214 .name = "qemu32",
215 .level = 2,
216 .family = 6,
217 .model = 3,
218 .stepping = 3,
219 .features = PPRO_FEATURES,
220 .ext_features = CPUID_EXT_SSE3,
221 .xlevel = 0,
222 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
223 },
224 {
225 .name = "coreduo",
226 .level = 10,
227 .family = 6,
228 .model = 14,
229 .stepping = 8,
230 /* The original CPU also implements these features:
231 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
232 CPUID_TM, CPUID_PBE */
233 .features = PPRO_FEATURES | CPUID_VME |
234 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
235 /* The original CPU also implements these ext features:
236 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
237 CPUID_EXT_PDCM */
238 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
239 .ext2_features = CPUID_EXT2_NX,
240 .xlevel = 0x80000008,
241 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
242 },
243 {
244 .name = "486",
245 .level = 0,
246 .family = 4,
247 .model = 0,
248 .stepping = 0,
249 .features = I486_FEATURES,
250 .xlevel = 0,
251 },
252 {
253 .name = "pentium",
254 .level = 1,
255 .family = 5,
256 .model = 4,
257 .stepping = 3,
258 .features = PENTIUM_FEATURES,
259 .xlevel = 0,
260 },
261 {
262 .name = "pentium2",
263 .level = 2,
264 .family = 6,
265 .model = 5,
266 .stepping = 2,
267 .features = PENTIUM2_FEATURES,
268 .xlevel = 0,
269 },
270 {
271 .name = "pentium3",
272 .level = 2,
273 .family = 6,
274 .model = 7,
275 .stepping = 3,
276 .features = PENTIUM3_FEATURES,
277 .xlevel = 0,
278 },
279 {
280 .name = "athlon",
281 .level = 2,
282 .vendor1 = 0x68747541, /* "Auth" */
283 .vendor2 = 0x69746e65, /* "enti" */
284 .vendor3 = 0x444d4163, /* "cAMD" */
285 .family = 6,
286 .model = 2,
287 .stepping = 3,
288 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
289 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
290 .xlevel = 0x80000008,
291 /* XXX: put another string ? */
292 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
293 },
294 {
295 .name = "n270",
296 /* original is on level 10 */
297 .level = 5,
298 .family = 6,
299 .model = 28,
300 .stepping = 2,
301 .features = PPRO_FEATURES |
302 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
303 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
304 * CPUID_HT | CPUID_TM | CPUID_PBE */
305 /* Some CPUs got no CPUID_SEP */
306 .ext_features = CPUID_EXT_MONITOR |
307 CPUID_EXT_SSE3 /* PNI */, CPUID_EXT_SSSE3,
308 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
309 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
310 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
311 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
312 .xlevel = 0x8000000A,
313 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
314 },
315};
316
317static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
318{
319 unsigned int i;
320 x86_def_t *def;
321
322 char *s = strdup(cpu_model);
323 char *featurestr, *name = strtok(s, ",");
324 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
325 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
326 int family = -1, model = -1, stepping = -1;
327
328 def = NULL;
329 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
330 if (strcmp(name, x86_defs[i].name) == 0) {
331 def = &x86_defs[i];
332 break;
333 }
334 }
335 if (!def)
336 goto error;
337 memcpy(x86_cpu_def, def, sizeof(*def));
338
339 featurestr = strtok(NULL, ",");
340
341 while (featurestr) {
342 char *val;
343 if (featurestr[0] == '+') {
344 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
345 } else if (featurestr[0] == '-') {
346 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
347 } else if ((val = strchr(featurestr, '='))) {
348 *val = 0; val++;
349 if (!strcmp(featurestr, "family")) {
350 char *err;
351 family = strtol(val, &err, 10);
352 if (!*val || *err || family < 0) {
353 fprintf(stderr, "bad numerical value %s\n", val);
354 goto error;
355 }
356 x86_cpu_def->family = family;
357 } else if (!strcmp(featurestr, "model")) {
358 char *err;
359 model = strtol(val, &err, 10);
360 if (!*val || *err || model < 0 || model > 0xf) {
361 fprintf(stderr, "bad numerical value %s\n", val);
362 goto error;
363 }
364 x86_cpu_def->model = model;
365 } else if (!strcmp(featurestr, "stepping")) {
366 char *err;
367 stepping = strtol(val, &err, 10);
368 if (!*val || *err || stepping < 0 || stepping > 0xf) {
369 fprintf(stderr, "bad numerical value %s\n", val);
370 goto error;
371 }
372 x86_cpu_def->stepping = stepping;
373 } else if (!strcmp(featurestr, "vendor")) {
374 if (strlen(val) != 12) {
375 fprintf(stderr, "vendor string must be 12 chars long\n");
376 goto error;
377 }
378 x86_cpu_def->vendor1 = 0;
379 x86_cpu_def->vendor2 = 0;
380 x86_cpu_def->vendor3 = 0;
381 for(i = 0; i < 4; i++) {
382 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
383 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
384 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
385 }
386 } else if (!strcmp(featurestr, "model_id")) {
387 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
388 val);
389 } else {
390 fprintf(stderr, "unrecognized feature %s\n", featurestr);
391 goto error;
392 }
393 } else {
394 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
395 goto error;
396 }
397 featurestr = strtok(NULL, ",");
398 }
399 x86_cpu_def->features |= plus_features;
400 x86_cpu_def->ext_features |= plus_ext_features;
401 x86_cpu_def->ext2_features |= plus_ext2_features;
402 x86_cpu_def->ext3_features |= plus_ext3_features;
403 x86_cpu_def->features &= ~minus_features;
404 x86_cpu_def->ext_features &= ~minus_ext_features;
405 x86_cpu_def->ext2_features &= ~minus_ext2_features;
406 x86_cpu_def->ext3_features &= ~minus_ext3_features;
407 free(s);
408 return 0;
409
410error:
411 free(s);
412 return -1;
413}
414
415void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
416{
417 unsigned int i;
418
419 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
420 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
421}
422#endif /* !VBOX */
423
424static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
425{
426#ifndef VBOX
427 x86_def_t def1, *def = &def1;
428
429 if (cpu_x86_find_by_name(def, cpu_model) < 0)
430 return -1;
431 if (def->vendor1) {
432 env->cpuid_vendor1 = def->vendor1;
433 env->cpuid_vendor2 = def->vendor2;
434 env->cpuid_vendor3 = def->vendor3;
435 } else {
436 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
437 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
438 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
439 }
440 env->cpuid_level = def->level;
441 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
442 env->cpuid_features = def->features;
443 env->pat = 0x0007040600070406ULL;
444 env->cpuid_ext_features = def->ext_features;
445 env->cpuid_ext2_features = def->ext2_features;
446 env->cpuid_xlevel = def->xlevel;
447 env->cpuid_ext3_features = def->ext3_features;
448 {
449 const char *model_id = def->model_id;
450 int c, len, i;
451 if (!model_id)
452 model_id = "";
453 len = strlen(model_id);
454 for(i = 0; i < 48; i++) {
455 if (i >= len)
456 c = '\0';
457 else
458 c = (uint8_t)model_id[i];
459 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
460 }
461 }
462#endif /* !VBOX */
463 return 0;
464}
465
466/* NOTE: must be called outside the CPU execute loop */
467void cpu_reset(CPUX86State *env)
468{
469 int i;
470
471 memset(env, 0, offsetof(CPUX86State, breakpoints));
472
473 tlb_flush(env, 1);
474
475 env->old_exception = -1;
476
477 /* init to reset state */
478
479#ifdef CONFIG_SOFTMMU
480 env->hflags |= HF_SOFTMMU_MASK;
481#endif
482 env->hflags2 |= HF2_GIF_MASK;
483
484 cpu_x86_update_cr0(env, 0x60000010);
485 env->a20_mask = ~0x0;
486 env->smbase = 0x30000;
487
488 env->idt.limit = 0xffff;
489 env->gdt.limit = 0xffff;
490 env->ldt.limit = 0xffff;
491 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
492 env->tr.limit = 0xffff;
493 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
494
495 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
496 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
497 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
498 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
499 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
500 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
501 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
502 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
503 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
504 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
505 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
506 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
507
508 env->eip = 0xfff0;
509#ifndef VBOX
510 env->regs[R_EDX] = env->cpuid_version;
511#else
512 /** @todo: is it right? */
513 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
514#endif
515
516 env->eflags = 0x2;
517
518 /* FPU init */
519 for(i = 0;i < 8; i++)
520 env->fptags[i] = 1;
521 env->fpuc = 0x37f;
522
523 env->mxcsr = 0x1f80;
524}
525
526void cpu_x86_close(CPUX86State *env)
527{
528#ifndef VBOX
529 qemu_free(env);
530#endif
531}
532
533/***********************************************************/
534/* x86 debug */
535
536static const char *cc_op_str[] = {
537 "DYNAMIC",
538 "EFLAGS",
539
540 "MULB",
541 "MULW",
542 "MULL",
543 "MULQ",
544
545 "ADDB",
546 "ADDW",
547 "ADDL",
548 "ADDQ",
549
550 "ADCB",
551 "ADCW",
552 "ADCL",
553 "ADCQ",
554
555 "SUBB",
556 "SUBW",
557 "SUBL",
558 "SUBQ",
559
560 "SBBB",
561 "SBBW",
562 "SBBL",
563 "SBBQ",
564
565 "LOGICB",
566 "LOGICW",
567 "LOGICL",
568 "LOGICQ",
569
570 "INCB",
571 "INCW",
572 "INCL",
573 "INCQ",
574
575 "DECB",
576 "DECW",
577 "DECL",
578 "DECQ",
579
580 "SHLB",
581 "SHLW",
582 "SHLL",
583 "SHLQ",
584
585 "SARB",
586 "SARW",
587 "SARL",
588 "SARQ",
589};
590
591void cpu_dump_state(CPUState *env, FILE *f,
592 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
593 int flags)
594{
595 int eflags, i, nb;
596 char cc_op_name[32];
597 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
598
599 eflags = env->eflags;
600#ifdef TARGET_X86_64
601 if (env->hflags & HF_CS64_MASK) {
602 cpu_fprintf(f,
603 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
604 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
605 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
606 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
607 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
608 env->regs[R_EAX],
609 env->regs[R_EBX],
610 env->regs[R_ECX],
611 env->regs[R_EDX],
612 env->regs[R_ESI],
613 env->regs[R_EDI],
614 env->regs[R_EBP],
615 env->regs[R_ESP],
616 env->regs[8],
617 env->regs[9],
618 env->regs[10],
619 env->regs[11],
620 env->regs[12],
621 env->regs[13],
622 env->regs[14],
623 env->regs[15],
624 env->eip, eflags,
625 eflags & DF_MASK ? 'D' : '-',
626 eflags & CC_O ? 'O' : '-',
627 eflags & CC_S ? 'S' : '-',
628 eflags & CC_Z ? 'Z' : '-',
629 eflags & CC_A ? 'A' : '-',
630 eflags & CC_P ? 'P' : '-',
631 eflags & CC_C ? 'C' : '-',
632 env->hflags & HF_CPL_MASK,
633 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
634 (int)(env->a20_mask >> 20) & 1,
635 (env->hflags >> HF_SMM_SHIFT) & 1,
636 env->halted);
637 } else
638#endif
639 {
640 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
641 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
642 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
643 (uint32_t)env->regs[R_EAX],
644 (uint32_t)env->regs[R_EBX],
645 (uint32_t)env->regs[R_ECX],
646 (uint32_t)env->regs[R_EDX],
647 (uint32_t)env->regs[R_ESI],
648 (uint32_t)env->regs[R_EDI],
649 (uint32_t)env->regs[R_EBP],
650 (uint32_t)env->regs[R_ESP],
651 (uint32_t)env->eip, eflags,
652 eflags & DF_MASK ? 'D' : '-',
653 eflags & CC_O ? 'O' : '-',
654 eflags & CC_S ? 'S' : '-',
655 eflags & CC_Z ? 'Z' : '-',
656 eflags & CC_A ? 'A' : '-',
657 eflags & CC_P ? 'P' : '-',
658 eflags & CC_C ? 'C' : '-',
659 env->hflags & HF_CPL_MASK,
660 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
661 (int)(env->a20_mask >> 20) & 1,
662 (env->hflags >> HF_SMM_SHIFT) & 1,
663 env->halted);
664 }
665
666#ifdef TARGET_X86_64
667 if (env->hflags & HF_LMA_MASK) {
668 for(i = 0; i < 6; i++) {
669 SegmentCache *sc = &env->segs[i];
670 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
671 seg_name[i],
672 sc->selector,
673 sc->base,
674 sc->limit,
675 sc->flags);
676 }
677 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
678 env->ldt.selector,
679 env->ldt.base,
680 env->ldt.limit,
681 env->ldt.flags);
682 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
683 env->tr.selector,
684 env->tr.base,
685 env->tr.limit,
686 env->tr.flags);
687 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
688 env->gdt.base, env->gdt.limit);
689 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
690 env->idt.base, env->idt.limit);
691 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
692 (uint32_t)env->cr[0],
693 env->cr[2],
694 env->cr[3],
695 (uint32_t)env->cr[4]);
696 } else
697#endif
698 {
699 for(i = 0; i < 6; i++) {
700 SegmentCache *sc = &env->segs[i];
701 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
702 seg_name[i],
703 sc->selector,
704 (uint32_t)sc->base,
705 sc->limit,
706 sc->flags);
707 }
708 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
709 env->ldt.selector,
710 (uint32_t)env->ldt.base,
711 env->ldt.limit,
712 env->ldt.flags);
713 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
714 env->tr.selector,
715 (uint32_t)env->tr.base,
716 env->tr.limit,
717 env->tr.flags);
718 cpu_fprintf(f, "GDT= %08x %08x\n",
719 (uint32_t)env->gdt.base, env->gdt.limit);
720 cpu_fprintf(f, "IDT= %08x %08x\n",
721 (uint32_t)env->idt.base, env->idt.limit);
722 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
723 (uint32_t)env->cr[0],
724 (uint32_t)env->cr[2],
725 (uint32_t)env->cr[3],
726 (uint32_t)env->cr[4]);
727 }
728 if (flags & X86_DUMP_CCOP) {
729 if ((unsigned)env->cc_op < CC_OP_NB)
730 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
731 else
732 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
733#ifdef TARGET_X86_64
734 if (env->hflags & HF_CS64_MASK) {
735 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
736 env->cc_src, env->cc_dst,
737 cc_op_name);
738 } else
739#endif
740 {
741 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
742 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
743 cc_op_name);
744 }
745 }
746 if (flags & X86_DUMP_FPU) {
747 int fptag;
748 fptag = 0;
749 for(i = 0; i < 8; i++) {
750 fptag |= ((!env->fptags[i]) << i);
751 }
752 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
753 env->fpuc,
754 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
755 env->fpstt,
756 fptag,
757 env->mxcsr);
758 for(i=0;i<8;i++) {
759#if defined(USE_X86LDOUBLE)
760 union {
761 long double d;
762 struct {
763 uint64_t lower;
764 uint16_t upper;
765 } l;
766 } tmp;
767 tmp.d = env->fpregs[i].d;
768 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
769 i, tmp.l.lower, tmp.l.upper);
770#else
771 cpu_fprintf(f, "FPR%d=%016" PRIx64,
772 i, env->fpregs[i].mmx.q);
773#endif
774 if ((i & 1) == 1)
775 cpu_fprintf(f, "\n");
776 else
777 cpu_fprintf(f, " ");
778 }
779 if (env->hflags & HF_CS64_MASK)
780 nb = 16;
781 else
782 nb = 8;
783 for(i=0;i<nb;i++) {
784 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
785 i,
786 env->xmm_regs[i].XMM_L(3),
787 env->xmm_regs[i].XMM_L(2),
788 env->xmm_regs[i].XMM_L(1),
789 env->xmm_regs[i].XMM_L(0));
790 if ((i & 1) == 1)
791 cpu_fprintf(f, "\n");
792 else
793 cpu_fprintf(f, " ");
794 }
795 }
796}
797
798/***********************************************************/
799/* x86 mmu */
800/* XXX: add PGE support */
801
802void cpu_x86_set_a20(CPUX86State *env, int a20_state)
803{
804 a20_state = (a20_state != 0);
805 if (a20_state != ((env->a20_mask >> 20) & 1)) {
806#if defined(DEBUG_MMU)
807 printf("A20 update: a20=%d\n", a20_state);
808#endif
809 /* if the cpu is currently executing code, we must unlink it and
810 all the potentially executing TB */
811 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
812
813 /* when a20 is changed, all the MMU mappings are invalid, so
814 we must flush everything */
815 tlb_flush(env, 1);
816 env->a20_mask = (~0x100000) | (a20_state << 20);
817 }
818}
819
820void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
821{
822 int pe_state;
823
824#if defined(DEBUG_MMU)
825 printf("CR0 update: CR0=0x%08x\n", new_cr0);
826#endif
827 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
828 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
829 tlb_flush(env, 1);
830 }
831
832#ifdef TARGET_X86_64
833 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
834 (env->efer & MSR_EFER_LME)) {
835 /* enter in long mode */
836 /* XXX: generate an exception */
837 if (!(env->cr[4] & CR4_PAE_MASK))
838 return;
839 env->efer |= MSR_EFER_LMA;
840 env->hflags |= HF_LMA_MASK;
841 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
842 (env->efer & MSR_EFER_LMA)) {
843 /* exit long mode */
844 env->efer &= ~MSR_EFER_LMA;
845 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
846 env->eip &= 0xffffffff;
847 }
848#endif
849 env->cr[0] = new_cr0 | CR0_ET_MASK;
850
851 /* update PE flag in hidden flags */
852 pe_state = (env->cr[0] & CR0_PE_MASK);
853 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
854 /* ensure that ADDSEG is always set in real mode */
855 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
856 /* update FPU flags */
857 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
858 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
859#ifdef VBOX
860
861 remR3ChangeCpuMode(env);
862#endif
863}
864
865/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
866 the PDPT */
867void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
868{
869 env->cr[3] = new_cr3;
870 if (env->cr[0] & CR0_PG_MASK) {
871#if defined(DEBUG_MMU)
872 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
873#endif
874 tlb_flush(env, 0);
875 }
876}
877
878void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
879{
880#if defined(DEBUG_MMU)
881 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
882#endif
883 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
884 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
885 tlb_flush(env, 1);
886 }
887 /* SSE handling */
888 if (!(env->cpuid_features & CPUID_SSE))
889 new_cr4 &= ~CR4_OSFXSR_MASK;
890 if (new_cr4 & CR4_OSFXSR_MASK)
891 env->hflags |= HF_OSFXSR_MASK;
892 else
893 env->hflags &= ~HF_OSFXSR_MASK;
894
895 env->cr[4] = new_cr4;
896#ifdef VBOX
897 remR3ChangeCpuMode(env);
898#endif
899}
900
901/* XXX: also flush 4MB pages */
902void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
903{
904 tlb_flush_page(env, addr);
905}
906
907#if defined(CONFIG_USER_ONLY)
908
909int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
910 int is_write, int mmu_idx, int is_softmmu)
911{
912 /* user mode only emulation */
913 is_write &= 1;
914 env->cr[2] = addr;
915 env->error_code = (is_write << PG_ERROR_W_BIT);
916 env->error_code |= PG_ERROR_U_MASK;
917 env->exception_index = EXCP0E_PAGE;
918 return 1;
919}
920
921target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
922{
923 return addr;
924}
925
926#else
927
928/* XXX: This value should match the one returned by CPUID
929 * and in exec.c */
930#if defined(USE_KQEMU)
931#define PHYS_ADDR_MASK 0xfffff000LL
932#else
933# if defined(TARGET_X86_64)
934# define PHYS_ADDR_MASK 0xfffffff000LL
935# else
936# define PHYS_ADDR_MASK 0xffffff000LL
937# endif
938#endif
939
940/* return value:
941 -1 = cannot handle fault
942 0 = nothing more to do
943 1 = generate PF fault
944 2 = soft MMU activation required for this block
945*/
946int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
947 int is_write1, int mmu_idx, int is_softmmu)
948{
949 uint64_t ptep, pte;
950 target_ulong pde_addr, pte_addr;
951 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
952 target_phys_addr_t paddr;
953 uint32_t page_offset;
954 target_ulong vaddr, virt_addr;
955
956 is_user = mmu_idx == MMU_USER_IDX;
957#if defined(DEBUG_MMU)
958 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
959 addr, is_write1, is_user, env->eip);
960#endif
961 is_write = is_write1 & 1;
962
963 if (!(env->cr[0] & CR0_PG_MASK)) {
964 pte = addr;
965 virt_addr = addr & TARGET_PAGE_MASK;
966 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
967 page_size = 4096;
968 goto do_mapping;
969 }
970
971 if (env->cr[4] & CR4_PAE_MASK) {
972 uint64_t pde, pdpe;
973 target_ulong pdpe_addr;
974
975#ifdef TARGET_X86_64
976 if (env->hflags & HF_LMA_MASK) {
977 uint64_t pml4e_addr, pml4e;
978 int32_t sext;
979
980 /* test virtual address sign extension */
981 sext = (int64_t)addr >> 47;
982 if (sext != 0 && sext != -1) {
983 env->error_code = 0;
984 env->exception_index = EXCP0D_GPF;
985 return 1;
986 }
987
988 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
989 env->a20_mask;
990 pml4e = ldq_phys(pml4e_addr);
991 if (!(pml4e & PG_PRESENT_MASK)) {
992 error_code = 0;
993 goto do_fault;
994 }
995 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
996 error_code = PG_ERROR_RSVD_MASK;
997 goto do_fault;
998 }
999 if (!(pml4e & PG_ACCESSED_MASK)) {
1000 pml4e |= PG_ACCESSED_MASK;
1001 stl_phys_notdirty(pml4e_addr, pml4e);
1002 }
1003 ptep = pml4e ^ PG_NX_MASK;
1004 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1005 env->a20_mask;
1006 pdpe = ldq_phys(pdpe_addr);
1007 if (!(pdpe & PG_PRESENT_MASK)) {
1008 error_code = 0;
1009 goto do_fault;
1010 }
1011 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1012 error_code = PG_ERROR_RSVD_MASK;
1013 goto do_fault;
1014 }
1015 ptep &= pdpe ^ PG_NX_MASK;
1016 if (!(pdpe & PG_ACCESSED_MASK)) {
1017 pdpe |= PG_ACCESSED_MASK;
1018 stl_phys_notdirty(pdpe_addr, pdpe);
1019 }
1020 } else
1021#endif
1022 {
1023 /* XXX: load them when cr3 is loaded ? */
1024 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1025 env->a20_mask;
1026 pdpe = ldq_phys(pdpe_addr);
1027 if (!(pdpe & PG_PRESENT_MASK)) {
1028 error_code = 0;
1029 goto do_fault;
1030 }
1031 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1032 }
1033
1034 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1035 env->a20_mask;
1036 pde = ldq_phys(pde_addr);
1037 if (!(pde & PG_PRESENT_MASK)) {
1038 error_code = 0;
1039 goto do_fault;
1040 }
1041 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1042 error_code = PG_ERROR_RSVD_MASK;
1043 goto do_fault;
1044 }
1045 ptep &= pde ^ PG_NX_MASK;
1046 if (pde & PG_PSE_MASK) {
1047 /* 2 MB page */
1048 page_size = 2048 * 1024;
1049 ptep ^= PG_NX_MASK;
1050 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1051 goto do_fault_protect;
1052 if (is_user) {
1053 if (!(ptep & PG_USER_MASK))
1054 goto do_fault_protect;
1055 if (is_write && !(ptep & PG_RW_MASK))
1056 goto do_fault_protect;
1057 } else {
1058 if ((env->cr[0] & CR0_WP_MASK) &&
1059 is_write && !(ptep & PG_RW_MASK))
1060 goto do_fault_protect;
1061 }
1062 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1063 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1064 pde |= PG_ACCESSED_MASK;
1065 if (is_dirty)
1066 pde |= PG_DIRTY_MASK;
1067 stl_phys_notdirty(pde_addr, pde);
1068 }
1069 /* align to page_size */
1070 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1071 virt_addr = addr & ~(page_size - 1);
1072 } else {
1073 /* 4 KB page */
1074 if (!(pde & PG_ACCESSED_MASK)) {
1075 pde |= PG_ACCESSED_MASK;
1076 stl_phys_notdirty(pde_addr, pde);
1077 }
1078 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1079 env->a20_mask;
1080 pte = ldq_phys(pte_addr);
1081 if (!(pte & PG_PRESENT_MASK)) {
1082 error_code = 0;
1083 goto do_fault;
1084 }
1085 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1086 error_code = PG_ERROR_RSVD_MASK;
1087 goto do_fault;
1088 }
1089 /* combine pde and pte nx, user and rw protections */
1090 ptep &= pte ^ PG_NX_MASK;
1091 ptep ^= PG_NX_MASK;
1092 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1093 goto do_fault_protect;
1094 if (is_user) {
1095 if (!(ptep & PG_USER_MASK))
1096 goto do_fault_protect;
1097 if (is_write && !(ptep & PG_RW_MASK))
1098 goto do_fault_protect;
1099 } else {
1100 if ((env->cr[0] & CR0_WP_MASK) &&
1101 is_write && !(ptep & PG_RW_MASK))
1102 goto do_fault_protect;
1103 }
1104 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1105 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1106 pte |= PG_ACCESSED_MASK;
1107 if (is_dirty)
1108 pte |= PG_DIRTY_MASK;
1109 stl_phys_notdirty(pte_addr, pte);
1110 }
1111 page_size = 4096;
1112 virt_addr = addr & ~0xfff;
1113 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1114 }
1115 } else {
1116 uint32_t pde;
1117
1118 /* page directory entry */
1119 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1120 env->a20_mask;
1121 pde = ldl_phys(pde_addr);
1122 if (!(pde & PG_PRESENT_MASK)) {
1123 error_code = 0;
1124 goto do_fault;
1125 }
1126 /* if PSE bit is set, then we use a 4MB page */
1127 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1128 page_size = 4096 * 1024;
1129 if (is_user) {
1130 if (!(pde & PG_USER_MASK))
1131 goto do_fault_protect;
1132 if (is_write && !(pde & PG_RW_MASK))
1133 goto do_fault_protect;
1134 } else {
1135 if ((env->cr[0] & CR0_WP_MASK) &&
1136 is_write && !(pde & PG_RW_MASK))
1137 goto do_fault_protect;
1138 }
1139 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1140 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1141 pde |= PG_ACCESSED_MASK;
1142 if (is_dirty)
1143 pde |= PG_DIRTY_MASK;
1144 stl_phys_notdirty(pde_addr, pde);
1145 }
1146
1147 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1148 ptep = pte;
1149 virt_addr = addr & ~(page_size - 1);
1150 } else {
1151 if (!(pde & PG_ACCESSED_MASK)) {
1152 pde |= PG_ACCESSED_MASK;
1153 stl_phys_notdirty(pde_addr, pde);
1154 }
1155
1156 /* page directory entry */
1157 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1158 env->a20_mask;
1159 pte = ldl_phys(pte_addr);
1160 if (!(pte & PG_PRESENT_MASK)) {
1161 error_code = 0;
1162 goto do_fault;
1163 }
1164 /* combine pde and pte user and rw protections */
1165 ptep = pte & pde;
1166 if (is_user) {
1167 if (!(ptep & PG_USER_MASK))
1168 goto do_fault_protect;
1169 if (is_write && !(ptep & PG_RW_MASK))
1170 goto do_fault_protect;
1171 } else {
1172 if ((env->cr[0] & CR0_WP_MASK) &&
1173 is_write && !(ptep & PG_RW_MASK))
1174 goto do_fault_protect;
1175 }
1176 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1177 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1178 pte |= PG_ACCESSED_MASK;
1179 if (is_dirty)
1180 pte |= PG_DIRTY_MASK;
1181 stl_phys_notdirty(pte_addr, pte);
1182 }
1183 page_size = 4096;
1184 virt_addr = addr & ~0xfff;
1185 }
1186 }
1187 /* the page can be put in the TLB */
1188 prot = PAGE_READ;
1189 if (!(ptep & PG_NX_MASK))
1190 prot |= PAGE_EXEC;
1191 if (pte & PG_DIRTY_MASK) {
1192 /* only set write access if already dirty... otherwise wait
1193 for dirty access */
1194 if (is_user) {
1195 if (ptep & PG_RW_MASK)
1196 prot |= PAGE_WRITE;
1197 } else {
1198 if (!(env->cr[0] & CR0_WP_MASK) ||
1199 (ptep & PG_RW_MASK))
1200 prot |= PAGE_WRITE;
1201 }
1202 }
1203 do_mapping:
1204 pte = pte & env->a20_mask;
1205
1206 /* Even if 4MB pages, we map only one 4KB page in the cache to
1207 avoid filling it too fast */
1208 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1209 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1210 vaddr = virt_addr + page_offset;
1211
1212 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1213 return ret;
1214 do_fault_protect:
1215 error_code = PG_ERROR_P_MASK;
1216 do_fault:
1217 error_code |= (is_write << PG_ERROR_W_BIT);
1218 if (is_user)
1219 error_code |= PG_ERROR_U_MASK;
1220 if (is_write1 == 2 &&
1221 (env->efer & MSR_EFER_NXE) &&
1222 (env->cr[4] & CR4_PAE_MASK))
1223 error_code |= PG_ERROR_I_D_MASK;
1224 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1225 /* cr2 is not modified in case of exceptions */
1226 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1227 addr);
1228 } else {
1229 env->cr[2] = addr;
1230 }
1231 env->error_code = error_code;
1232 env->exception_index = EXCP0E_PAGE;
1233 return 1;
1234}
1235
1236target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1237{
1238 target_ulong pde_addr, pte_addr;
1239 uint64_t pte;
1240 target_phys_addr_t paddr;
1241 uint32_t page_offset;
1242 int page_size;
1243
1244 if (env->cr[4] & CR4_PAE_MASK) {
1245 target_ulong pdpe_addr;
1246 uint64_t pde, pdpe;
1247
1248#ifdef TARGET_X86_64
1249 if (env->hflags & HF_LMA_MASK) {
1250 uint64_t pml4e_addr, pml4e;
1251 int32_t sext;
1252
1253 /* test virtual address sign extension */
1254 sext = (int64_t)addr >> 47;
1255 if (sext != 0 && sext != -1)
1256 return -1;
1257
1258 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1259 env->a20_mask;
1260 pml4e = ldq_phys(pml4e_addr);
1261 if (!(pml4e & PG_PRESENT_MASK))
1262 return -1;
1263
1264 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1265 env->a20_mask;
1266 pdpe = ldq_phys(pdpe_addr);
1267 if (!(pdpe & PG_PRESENT_MASK))
1268 return -1;
1269 } else
1270#endif
1271 {
1272 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1273 env->a20_mask;
1274 pdpe = ldq_phys(pdpe_addr);
1275 if (!(pdpe & PG_PRESENT_MASK))
1276 return -1;
1277 }
1278
1279 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1280 env->a20_mask;
1281 pde = ldq_phys(pde_addr);
1282 if (!(pde & PG_PRESENT_MASK)) {
1283 return -1;
1284 }
1285 if (pde & PG_PSE_MASK) {
1286 /* 2 MB page */
1287 page_size = 2048 * 1024;
1288 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1289 } else {
1290 /* 4 KB page */
1291 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1292 env->a20_mask;
1293 page_size = 4096;
1294 pte = ldq_phys(pte_addr);
1295 }
1296 if (!(pte & PG_PRESENT_MASK))
1297 return -1;
1298 } else {
1299 uint32_t pde;
1300
1301 if (!(env->cr[0] & CR0_PG_MASK)) {
1302 pte = addr;
1303 page_size = 4096;
1304 } else {
1305 /* page directory entry */
1306 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1307 pde = ldl_phys(pde_addr);
1308 if (!(pde & PG_PRESENT_MASK))
1309 return -1;
1310 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1311 pte = pde & ~0x003ff000; /* align to 4MB */
1312 page_size = 4096 * 1024;
1313 } else {
1314 /* page directory entry */
1315 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1316 pte = ldl_phys(pte_addr);
1317 if (!(pte & PG_PRESENT_MASK))
1318 return -1;
1319 page_size = 4096;
1320 }
1321 }
1322 pte = pte & env->a20_mask;
1323 }
1324
1325 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1326 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1327 return paddr;
1328}
1329#endif /* !CONFIG_USER_ONLY */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette