VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 45473

Last change on this file since 45473 was 41436, checked in by vboxsync, 12 years ago

REM,EM: A20 fixes.

  • Property svn:eol-style set to native
File size: 38.2 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#endif /* !VBOX */
37
38#include "cpu.h"
39#include "exec-all.h"
40#include "qemu-common.h"
41#include "kvm.h"
42
43//#define DEBUG_MMU
44
45/* NOTE: must be called outside the CPU execute loop */
46void cpu_reset(CPUX86State *env)
47{
48 int i;
49
50 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
51 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
52 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
53 }
54
55 memset(env, 0, offsetof(CPUX86State, breakpoints));
56
57 tlb_flush(env, 1);
58
59 env->old_exception = -1;
60
61 /* init to reset state */
62
63#ifdef CONFIG_SOFTMMU
64 env->hflags |= HF_SOFTMMU_MASK;
65#endif
66 env->hflags2 |= HF2_GIF_MASK;
67
68 cpu_x86_update_cr0(env, 0x60000010);
69 env->a20_mask = ~0x0;
70 env->smbase = 0x30000;
71
72 env->idt.limit = 0xffff;
73 env->gdt.limit = 0xffff;
74 env->ldt.limit = 0xffff;
75 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
76 env->tr.limit = 0xffff;
77 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
78
79 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
80 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
81 DESC_R_MASK | DESC_A_MASK);
82 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
83 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
84 DESC_A_MASK);
85 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
86 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
87 DESC_A_MASK);
88 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
89 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
90 DESC_A_MASK);
91 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
92 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
93 DESC_A_MASK);
94 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
95 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
96 DESC_A_MASK);
97
98 env->eip = 0xfff0;
99#ifndef VBOX /* We'll get the right value from CPUM. */
100 env->regs[R_EDX] = env->cpuid_version;
101#endif
102
103 env->eflags = 0x2;
104
105 /* FPU init */
106 for(i = 0;i < 8; i++)
107 env->fptags[i] = 1;
108 env->fpuc = 0x37f;
109
110 env->mxcsr = 0x1f80;
111
112 memset(env->dr, 0, sizeof(env->dr));
113 env->dr[6] = DR6_FIXED_1;
114 env->dr[7] = DR7_FIXED_1;
115 cpu_breakpoint_remove_all(env, BP_CPU);
116 cpu_watchpoint_remove_all(env, BP_CPU);
117
118#ifndef VBOX
119 env->mcg_status = 0;
120#endif
121}
122
123void cpu_x86_close(CPUX86State *env)
124{
125#ifndef VBOX
126 qemu_free(env);
127#endif
128}
129
130/***********************************************************/
131/* x86 debug */
132
133static const char *cc_op_str[] = {
134 "DYNAMIC",
135 "EFLAGS",
136
137 "MULB",
138 "MULW",
139 "MULL",
140 "MULQ",
141
142 "ADDB",
143 "ADDW",
144 "ADDL",
145 "ADDQ",
146
147 "ADCB",
148 "ADCW",
149 "ADCL",
150 "ADCQ",
151
152 "SUBB",
153 "SUBW",
154 "SUBL",
155 "SUBQ",
156
157 "SBBB",
158 "SBBW",
159 "SBBL",
160 "SBBQ",
161
162 "LOGICB",
163 "LOGICW",
164 "LOGICL",
165 "LOGICQ",
166
167 "INCB",
168 "INCW",
169 "INCL",
170 "INCQ",
171
172 "DECB",
173 "DECW",
174 "DECL",
175 "DECQ",
176
177 "SHLB",
178 "SHLW",
179 "SHLL",
180 "SHLQ",
181
182 "SARB",
183 "SARW",
184 "SARL",
185 "SARQ",
186};
187
188static void
189cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
190 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
191 const char *name, struct SegmentCache *sc)
192{
193#ifdef VBOX
194# define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__)
195#endif
196#ifdef TARGET_X86_64
197 if (env->hflags & HF_CS64_MASK) {
198 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
199 sc->selector, sc->base, sc->limit, sc->flags);
200 } else
201#endif
202 {
203 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
204 (uint32_t)sc->base, sc->limit, sc->flags);
205 }
206
207 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
208 goto done;
209
210 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
211 if (sc->flags & DESC_S_MASK) {
212 if (sc->flags & DESC_CS_MASK) {
213 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
214 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
215 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
216 (sc->flags & DESC_R_MASK) ? 'R' : '-');
217 } else {
218 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
219 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
220 (sc->flags & DESC_W_MASK) ? 'W' : '-');
221 }
222 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
223 } else {
224 static const char *sys_type_name[2][16] = {
225 { /* 32 bit mode */
226 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
227 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
228 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
229 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
230 },
231 { /* 64 bit mode */
232 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
233 "Reserved", "Reserved", "Reserved", "Reserved",
234 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
235 "Reserved", "IntGate64", "TrapGate64"
236 }
237 };
238 cpu_fprintf(f, "%s",
239 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
240 [(sc->flags & DESC_TYPE_MASK)
241 >> DESC_TYPE_SHIFT]);
242 }
243done:
244 cpu_fprintf(f, "\n");
245#ifdef VBOX
246# undef cpu_fprintf
247#endif
248}
249
250void cpu_dump_state(CPUState *env, FILE *f,
251 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
252 int flags)
253{
254 int eflags, i, nb;
255 char cc_op_name[32];
256 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
257
258#ifdef VBOX
259# define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__)
260#endif
261 cpu_synchronize_state(env);
262
263 eflags = env->eflags;
264#ifdef TARGET_X86_64
265 if (env->hflags & HF_CS64_MASK) {
266 cpu_fprintf(f,
267 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
268 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
269 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
270 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
271 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
272 env->regs[R_EAX],
273 env->regs[R_EBX],
274 env->regs[R_ECX],
275 env->regs[R_EDX],
276 env->regs[R_ESI],
277 env->regs[R_EDI],
278 env->regs[R_EBP],
279 env->regs[R_ESP],
280 env->regs[8],
281 env->regs[9],
282 env->regs[10],
283 env->regs[11],
284 env->regs[12],
285 env->regs[13],
286 env->regs[14],
287 env->regs[15],
288 env->eip, eflags,
289 eflags & DF_MASK ? 'D' : '-',
290 eflags & CC_O ? 'O' : '-',
291 eflags & CC_S ? 'S' : '-',
292 eflags & CC_Z ? 'Z' : '-',
293 eflags & CC_A ? 'A' : '-',
294 eflags & CC_P ? 'P' : '-',
295 eflags & CC_C ? 'C' : '-',
296 env->hflags & HF_CPL_MASK,
297 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
298 (env->a20_mask >> 20) & 1,
299 (env->hflags >> HF_SMM_SHIFT) & 1,
300 env->halted);
301 } else
302#endif
303 {
304 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
305 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
306 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
307 (uint32_t)env->regs[R_EAX],
308 (uint32_t)env->regs[R_EBX],
309 (uint32_t)env->regs[R_ECX],
310 (uint32_t)env->regs[R_EDX],
311 (uint32_t)env->regs[R_ESI],
312 (uint32_t)env->regs[R_EDI],
313 (uint32_t)env->regs[R_EBP],
314 (uint32_t)env->regs[R_ESP],
315 (uint32_t)env->eip, eflags,
316 eflags & DF_MASK ? 'D' : '-',
317 eflags & CC_O ? 'O' : '-',
318 eflags & CC_S ? 'S' : '-',
319 eflags & CC_Z ? 'Z' : '-',
320 eflags & CC_A ? 'A' : '-',
321 eflags & CC_P ? 'P' : '-',
322 eflags & CC_C ? 'C' : '-',
323 env->hflags & HF_CPL_MASK,
324 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
325 (env->a20_mask >> 20) & 1,
326 (env->hflags >> HF_SMM_SHIFT) & 1,
327 env->halted);
328 }
329
330 for(i = 0; i < 6; i++) {
331 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
332 &env->segs[i]);
333 }
334 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
335 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
336
337#ifdef TARGET_X86_64
338 if (env->hflags & HF_LMA_MASK) {
339 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
340 env->gdt.base, env->gdt.limit);
341 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
342 env->idt.base, env->idt.limit);
343 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
344 (uint32_t)env->cr[0],
345 env->cr[2],
346 env->cr[3],
347 (uint32_t)env->cr[4]);
348 for(i = 0; i < 4; i++)
349 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
350 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
351 env->dr[6], env->dr[7]);
352 } else
353#endif
354 {
355 cpu_fprintf(f, "GDT= %08x %08x\n",
356 (uint32_t)env->gdt.base, env->gdt.limit);
357 cpu_fprintf(f, "IDT= %08x %08x\n",
358 (uint32_t)env->idt.base, env->idt.limit);
359 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
360 (uint32_t)env->cr[0],
361 (uint32_t)env->cr[2],
362 (uint32_t)env->cr[3],
363 (uint32_t)env->cr[4]);
364 for(i = 0; i < 4; i++)
365 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
366 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
367 }
368 if (flags & X86_DUMP_CCOP) {
369 if ((unsigned)env->cc_op < CC_OP_NB)
370 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
371 else
372 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
373#ifdef TARGET_X86_64
374 if (env->hflags & HF_CS64_MASK) {
375 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
376 env->cc_src, env->cc_dst,
377 cc_op_name);
378 } else
379#endif
380 {
381 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
382 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
383 cc_op_name);
384 }
385 }
386 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
387 if (flags & X86_DUMP_FPU) {
388 int fptag;
389 fptag = 0;
390 for(i = 0; i < 8; i++) {
391 fptag |= ((!env->fptags[i]) << i);
392 }
393 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
394 env->fpuc,
395 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
396 env->fpstt,
397 fptag,
398 env->mxcsr);
399 for(i=0;i<8;i++) {
400#if defined(USE_X86LDOUBLE)
401 union {
402 long double d;
403 struct {
404 uint64_t lower;
405 uint16_t upper;
406 } l;
407 } tmp;
408 tmp.d = env->fpregs[i].d;
409 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
410 i, tmp.l.lower, tmp.l.upper);
411#else
412 cpu_fprintf(f, "FPR%d=%016" PRIx64,
413 i, env->fpregs[i].mmx.q);
414#endif
415 if ((i & 1) == 1)
416 cpu_fprintf(f, "\n");
417 else
418 cpu_fprintf(f, " ");
419 }
420 if (env->hflags & HF_CS64_MASK)
421 nb = 16;
422 else
423 nb = 8;
424 for(i=0;i<nb;i++) {
425 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
426 i,
427 env->xmm_regs[i].XMM_L(3),
428 env->xmm_regs[i].XMM_L(2),
429 env->xmm_regs[i].XMM_L(1),
430 env->xmm_regs[i].XMM_L(0));
431 if ((i & 1) == 1)
432 cpu_fprintf(f, "\n");
433 else
434 cpu_fprintf(f, " ");
435 }
436 }
437#ifdef VBOX
438# undef cpu_fprintf
439#endif
440}
441
442/***********************************************************/
443/* x86 mmu */
444/* XXX: add PGE support */
445
446void cpu_x86_set_a20(CPUX86State *env, int a20_state)
447{
448 a20_state = (a20_state != 0);
449 if (a20_state != ((env->a20_mask >> 20) & 1)) {
450#if defined(DEBUG_MMU)
451 printf("A20 update: a20=%d\n", a20_state);
452#endif
453 /* if the cpu is currently executing code, we must unlink it and
454 all the potentially executing TB */
455 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
456
457 /* when a20 is changed, all the MMU mappings are invalid, so
458 we must flush everything */
459 tlb_flush(env, 1);
460 env->a20_mask = ~(1 << 20) | (a20_state << 20);
461 }
462}
463
464void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
465{
466 int pe_state;
467
468#if defined(DEBUG_MMU)
469 printf("CR0 update: CR0=0x%08x\n", new_cr0);
470#endif
471 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
472 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
473 tlb_flush(env, 1);
474 }
475
476#ifdef TARGET_X86_64
477 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
478 (env->efer & MSR_EFER_LME)) {
479 /* enter in long mode */
480 /* XXX: generate an exception */
481 if (!(env->cr[4] & CR4_PAE_MASK))
482 return;
483 env->efer |= MSR_EFER_LMA;
484 env->hflags |= HF_LMA_MASK;
485 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
486 (env->efer & MSR_EFER_LMA)) {
487 /* exit long mode */
488 env->efer &= ~MSR_EFER_LMA;
489 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
490 env->eip &= 0xffffffff;
491 }
492#endif
493 env->cr[0] = new_cr0 | CR0_ET_MASK;
494
495 /* update PE flag in hidden flags */
496 pe_state = (env->cr[0] & CR0_PE_MASK);
497 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
498 /* ensure that ADDSEG is always set in real mode */
499 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
500 /* update FPU flags */
501 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
502 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
503#ifdef VBOX
504 remR3ChangeCpuMode(env);
505#endif
506}
507
508/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
509 the PDPT */
510void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
511{
512 env->cr[3] = new_cr3;
513 if (env->cr[0] & CR0_PG_MASK) {
514#if defined(DEBUG_MMU)
515 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
516#endif
517 tlb_flush(env, 0);
518 }
519}
520
521void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
522{
523#if defined(DEBUG_MMU)
524 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
525#endif
526 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
527 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
528 tlb_flush(env, 1);
529 }
530 /* SSE handling */
531 if (!(env->cpuid_features & CPUID_SSE))
532 new_cr4 &= ~CR4_OSFXSR_MASK;
533 if (new_cr4 & CR4_OSFXSR_MASK)
534 env->hflags |= HF_OSFXSR_MASK;
535 else
536 env->hflags &= ~HF_OSFXSR_MASK;
537
538 env->cr[4] = new_cr4;
539#ifdef VBOX
540 remR3ChangeCpuMode(env);
541#endif
542}
543
544#if defined(CONFIG_USER_ONLY)
545
546int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
547 int is_write, int mmu_idx, int is_softmmu)
548{
549 /* user mode only emulation */
550 is_write &= 1;
551 env->cr[2] = addr;
552 env->error_code = (is_write << PG_ERROR_W_BIT);
553 env->error_code |= PG_ERROR_U_MASK;
554 env->exception_index = EXCP0E_PAGE;
555 return 1;
556}
557
558#else
559
560/* XXX: This value should match the one returned by CPUID
561 * and in exec.c */
562# if defined(TARGET_X86_64)
563# define PHYS_ADDR_MASK 0xfffffff000LL
564# else
565# define PHYS_ADDR_MASK 0xffffff000LL
566# endif
567
568/* return value:
569 -1 = cannot handle fault
570 0 = nothing more to do
571 1 = generate PF fault
572*/
573int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
574 int is_write1, int mmu_idx, int is_softmmu)
575{
576 uint64_t ptep, pte;
577 target_ulong pde_addr, pte_addr;
578 int error_code, is_dirty, prot, page_size, is_write, is_user;
579 target_phys_addr_t paddr;
580 uint32_t page_offset;
581 target_ulong vaddr, virt_addr;
582
583 is_user = mmu_idx == MMU_USER_IDX;
584#if defined(DEBUG_MMU)
585 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
586 addr, is_write1, is_user, env->eip);
587#endif
588 is_write = is_write1 & 1;
589
590 if (!(env->cr[0] & CR0_PG_MASK)) {
591 pte = addr;
592 virt_addr = addr & TARGET_PAGE_MASK;
593 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
594 page_size = 4096;
595 goto do_mapping;
596 }
597
598 if (env->cr[4] & CR4_PAE_MASK) {
599 uint64_t pde, pdpe;
600 target_ulong pdpe_addr;
601
602#ifdef TARGET_X86_64
603 if (env->hflags & HF_LMA_MASK) {
604 uint64_t pml4e_addr, pml4e;
605 int32_t sext;
606
607 /* test virtual address sign extension */
608 sext = (int64_t)addr >> 47;
609 if (sext != 0 && sext != -1) {
610 env->error_code = 0;
611 env->exception_index = EXCP0D_GPF;
612 return 1;
613 }
614
615 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
616 env->a20_mask;
617 pml4e = ldq_phys(pml4e_addr);
618 if (!(pml4e & PG_PRESENT_MASK)) {
619 error_code = 0;
620 goto do_fault;
621 }
622 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
623 error_code = PG_ERROR_RSVD_MASK;
624 goto do_fault;
625 }
626 if (!(pml4e & PG_ACCESSED_MASK)) {
627 pml4e |= PG_ACCESSED_MASK;
628 stl_phys_notdirty(pml4e_addr, pml4e);
629 }
630 ptep = pml4e ^ PG_NX_MASK;
631 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
632 env->a20_mask;
633 pdpe = ldq_phys(pdpe_addr);
634 if (!(pdpe & PG_PRESENT_MASK)) {
635 error_code = 0;
636 goto do_fault;
637 }
638 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
639 error_code = PG_ERROR_RSVD_MASK;
640 goto do_fault;
641 }
642 ptep &= pdpe ^ PG_NX_MASK;
643 if (!(pdpe & PG_ACCESSED_MASK)) {
644 pdpe |= PG_ACCESSED_MASK;
645 stl_phys_notdirty(pdpe_addr, pdpe);
646 }
647 } else
648#endif
649 {
650 /* XXX: load them when cr3 is loaded ? */
651 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
652 env->a20_mask;
653 pdpe = ldq_phys(pdpe_addr);
654 if (!(pdpe & PG_PRESENT_MASK)) {
655 error_code = 0;
656 goto do_fault;
657 }
658 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
659 }
660
661 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
662 env->a20_mask;
663 pde = ldq_phys(pde_addr);
664 if (!(pde & PG_PRESENT_MASK)) {
665 error_code = 0;
666 goto do_fault;
667 }
668 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
669 error_code = PG_ERROR_RSVD_MASK;
670 goto do_fault;
671 }
672 ptep &= pde ^ PG_NX_MASK;
673 if (pde & PG_PSE_MASK) {
674 /* 2 MB page */
675 page_size = 2048 * 1024;
676 ptep ^= PG_NX_MASK;
677 if ((ptep & PG_NX_MASK) && is_write1 == 2)
678 goto do_fault_protect;
679 if (is_user) {
680 if (!(ptep & PG_USER_MASK))
681 goto do_fault_protect;
682 if (is_write && !(ptep & PG_RW_MASK))
683 goto do_fault_protect;
684 } else {
685 if ((env->cr[0] & CR0_WP_MASK) &&
686 is_write && !(ptep & PG_RW_MASK))
687 goto do_fault_protect;
688 }
689 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
690 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
691 pde |= PG_ACCESSED_MASK;
692 if (is_dirty)
693 pde |= PG_DIRTY_MASK;
694 stl_phys_notdirty(pde_addr, pde);
695 }
696 /* align to page_size */
697 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
698 virt_addr = addr & ~(page_size - 1);
699 } else {
700 /* 4 KB page */
701 if (!(pde & PG_ACCESSED_MASK)) {
702 pde |= PG_ACCESSED_MASK;
703 stl_phys_notdirty(pde_addr, pde);
704 }
705 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
706 env->a20_mask;
707 pte = ldq_phys(pte_addr);
708 if (!(pte & PG_PRESENT_MASK)) {
709 error_code = 0;
710 goto do_fault;
711 }
712 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
713 error_code = PG_ERROR_RSVD_MASK;
714 goto do_fault;
715 }
716 /* combine pde and pte nx, user and rw protections */
717 ptep &= pte ^ PG_NX_MASK;
718 ptep ^= PG_NX_MASK;
719 if ((ptep & PG_NX_MASK) && is_write1 == 2)
720 goto do_fault_protect;
721 if (is_user) {
722 if (!(ptep & PG_USER_MASK))
723 goto do_fault_protect;
724 if (is_write && !(ptep & PG_RW_MASK))
725 goto do_fault_protect;
726 } else {
727 if ((env->cr[0] & CR0_WP_MASK) &&
728 is_write && !(ptep & PG_RW_MASK))
729 goto do_fault_protect;
730 }
731 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
732 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
733 pte |= PG_ACCESSED_MASK;
734 if (is_dirty)
735 pte |= PG_DIRTY_MASK;
736 stl_phys_notdirty(pte_addr, pte);
737 }
738 page_size = 4096;
739 virt_addr = addr & ~0xfff;
740 pte = pte & (PHYS_ADDR_MASK | 0xfff);
741 }
742 } else {
743 uint32_t pde;
744
745 /* page directory entry */
746 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
747 env->a20_mask;
748 pde = ldl_phys(pde_addr);
749 if (!(pde & PG_PRESENT_MASK)) {
750 error_code = 0;
751 goto do_fault;
752 }
753 /* if PSE bit is set, then we use a 4MB page */
754 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
755 page_size = 4096 * 1024;
756 if (is_user) {
757 if (!(pde & PG_USER_MASK))
758 goto do_fault_protect;
759 if (is_write && !(pde & PG_RW_MASK))
760 goto do_fault_protect;
761 } else {
762 if ((env->cr[0] & CR0_WP_MASK) &&
763 is_write && !(pde & PG_RW_MASK))
764 goto do_fault_protect;
765 }
766 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
767 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
768 pde |= PG_ACCESSED_MASK;
769 if (is_dirty)
770 pde |= PG_DIRTY_MASK;
771 stl_phys_notdirty(pde_addr, pde);
772 }
773
774 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
775 ptep = pte;
776 virt_addr = addr & ~(page_size - 1);
777 } else {
778 if (!(pde & PG_ACCESSED_MASK)) {
779 pde |= PG_ACCESSED_MASK;
780 stl_phys_notdirty(pde_addr, pde);
781 }
782
783 /* page directory entry */
784 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
785 env->a20_mask;
786 pte = ldl_phys(pte_addr);
787 if (!(pte & PG_PRESENT_MASK)) {
788 error_code = 0;
789 goto do_fault;
790 }
791 /* combine pde and pte user and rw protections */
792 ptep = pte & pde;
793 if (is_user) {
794 if (!(ptep & PG_USER_MASK))
795 goto do_fault_protect;
796 if (is_write && !(ptep & PG_RW_MASK))
797 goto do_fault_protect;
798 } else {
799 if ((env->cr[0] & CR0_WP_MASK) &&
800 is_write && !(ptep & PG_RW_MASK))
801 goto do_fault_protect;
802 }
803 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
804 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
805 pte |= PG_ACCESSED_MASK;
806 if (is_dirty)
807 pte |= PG_DIRTY_MASK;
808 stl_phys_notdirty(pte_addr, pte);
809 }
810 page_size = 4096;
811 virt_addr = addr & ~0xfff;
812 }
813 }
814 /* the page can be put in the TLB */
815 prot = PAGE_READ;
816 if (!(ptep & PG_NX_MASK))
817 prot |= PAGE_EXEC;
818 if (pte & PG_DIRTY_MASK) {
819 /* only set write access if already dirty... otherwise wait
820 for dirty access */
821 if (is_user) {
822 if (ptep & PG_RW_MASK)
823 prot |= PAGE_WRITE;
824 } else {
825 if (!(env->cr[0] & CR0_WP_MASK) ||
826 (ptep & PG_RW_MASK))
827 prot |= PAGE_WRITE;
828 }
829 }
830 do_mapping:
831#ifndef VBOX
832 pte = pte & env->a20_mask;
833#endif
834
835 /* Even if 4MB pages, we map only one 4KB page in the cache to
836 avoid filling it too fast */
837 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
838 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
839#ifdef VBOX
840 paddr &= env->a20_mask;
841#endif
842 vaddr = virt_addr + page_offset;
843
844 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
845 return 0;
846 do_fault_protect:
847 error_code = PG_ERROR_P_MASK;
848 do_fault:
849 error_code |= (is_write << PG_ERROR_W_BIT);
850 if (is_user)
851 error_code |= PG_ERROR_U_MASK;
852 if (is_write1 == 2 &&
853 (env->efer & MSR_EFER_NXE) &&
854 (env->cr[4] & CR4_PAE_MASK))
855 error_code |= PG_ERROR_I_D_MASK;
856 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
857 /* cr2 is not modified in case of exceptions */
858 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
859 addr);
860 } else {
861 env->cr[2] = addr;
862 }
863 env->error_code = error_code;
864 env->exception_index = EXCP0E_PAGE;
865 return 1;
866}
867
868target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
869{
870 target_ulong pde_addr, pte_addr;
871 uint64_t pte;
872 target_phys_addr_t paddr;
873 uint32_t page_offset;
874 int page_size;
875
876 if (env->cr[4] & CR4_PAE_MASK) {
877 target_ulong pdpe_addr;
878 uint64_t pde, pdpe;
879
880#ifdef TARGET_X86_64
881 if (env->hflags & HF_LMA_MASK) {
882 uint64_t pml4e_addr, pml4e;
883 int32_t sext;
884
885 /* test virtual address sign extension */
886 sext = (int64_t)addr >> 47;
887 if (sext != 0 && sext != -1)
888 return -1;
889
890 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
891 env->a20_mask;
892 pml4e = ldq_phys(pml4e_addr);
893 if (!(pml4e & PG_PRESENT_MASK))
894 return -1;
895
896 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
897 env->a20_mask;
898 pdpe = ldq_phys(pdpe_addr);
899 if (!(pdpe & PG_PRESENT_MASK))
900 return -1;
901 } else
902#endif
903 {
904 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
905 env->a20_mask;
906 pdpe = ldq_phys(pdpe_addr);
907 if (!(pdpe & PG_PRESENT_MASK))
908 return -1;
909 }
910
911 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
912 env->a20_mask;
913 pde = ldq_phys(pde_addr);
914 if (!(pde & PG_PRESENT_MASK)) {
915 return -1;
916 }
917 if (pde & PG_PSE_MASK) {
918 /* 2 MB page */
919 page_size = 2048 * 1024;
920 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
921 } else {
922 /* 4 KB page */
923 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
924 env->a20_mask;
925 page_size = 4096;
926 pte = ldq_phys(pte_addr);
927 }
928 if (!(pte & PG_PRESENT_MASK))
929 return -1;
930 } else {
931 uint32_t pde;
932
933 if (!(env->cr[0] & CR0_PG_MASK)) {
934 pte = addr;
935 page_size = 4096;
936 } else {
937 /* page directory entry */
938 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
939 pde = ldl_phys(pde_addr);
940 if (!(pde & PG_PRESENT_MASK))
941 return -1;
942 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
943 pte = pde & ~0x003ff000; /* align to 4MB */
944 page_size = 4096 * 1024;
945 } else {
946 /* page directory entry */
947 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
948 pte = ldl_phys(pte_addr);
949 if (!(pte & PG_PRESENT_MASK))
950 return -1;
951 page_size = 4096;
952 }
953 }
954 pte = pte & env->a20_mask;
955 }
956
957 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
958 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
959 return paddr;
960}
961
962void hw_breakpoint_insert(CPUState *env, int index)
963{
964 int type, err = 0;
965
966 switch (hw_breakpoint_type(env->dr[7], index)) {
967 case 0:
968 if (hw_breakpoint_enabled(env->dr[7], index))
969 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
970 &env->cpu_breakpoint[index]);
971 break;
972 case 1:
973 type = BP_CPU | BP_MEM_WRITE;
974 goto insert_wp;
975 case 2:
976 /* No support for I/O watchpoints yet */
977 break;
978 case 3:
979 type = BP_CPU | BP_MEM_ACCESS;
980 insert_wp:
981 err = cpu_watchpoint_insert(env, env->dr[index],
982 hw_breakpoint_len(env->dr[7], index),
983 type, &env->cpu_watchpoint[index]);
984 break;
985 }
986 if (err)
987 env->cpu_breakpoint[index] = NULL;
988}
989
990void hw_breakpoint_remove(CPUState *env, int index)
991{
992 if (!env->cpu_breakpoint[index])
993 return;
994 switch (hw_breakpoint_type(env->dr[7], index)) {
995 case 0:
996 if (hw_breakpoint_enabled(env->dr[7], index))
997 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
998 break;
999 case 1:
1000 case 3:
1001 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1002 break;
1003 case 2:
1004 /* No support for I/O watchpoints yet */
1005 break;
1006 }
1007}
1008
1009int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1010{
1011 target_ulong dr6;
1012 int reg, type;
1013 int hit_enabled = 0;
1014
1015 dr6 = env->dr[6] & ~0xf;
1016 for (reg = 0; reg < 4; reg++) {
1017 type = hw_breakpoint_type(env->dr[7], reg);
1018 if ((type == 0 && env->dr[reg] == env->eip) ||
1019 ((type & 1) && env->cpu_watchpoint[reg] &&
1020 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1021 dr6 |= 1 << reg;
1022 if (hw_breakpoint_enabled(env->dr[7], reg))
1023 hit_enabled = 1;
1024 }
1025 }
1026 if (hit_enabled || force_dr6_update)
1027 env->dr[6] = dr6;
1028 return hit_enabled;
1029}
1030
1031static CPUDebugExcpHandler *prev_debug_excp_handler;
1032
1033void raise_exception_env(int exception_index, CPUState *env);
1034
1035static void breakpoint_handler(CPUState *env)
1036{
1037 CPUBreakpoint *bp;
1038
1039 if (env->watchpoint_hit) {
1040 if (env->watchpoint_hit->flags & BP_CPU) {
1041 env->watchpoint_hit = NULL;
1042 if (check_hw_breakpoints(env, 0))
1043 raise_exception_env(EXCP01_DB, env);
1044 else
1045 cpu_resume_from_signal(env, NULL);
1046 }
1047 } else {
1048 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1049 if (bp->pc == env->eip) {
1050 if (bp->flags & BP_CPU) {
1051 check_hw_breakpoints(env, 1);
1052 raise_exception_env(EXCP01_DB, env);
1053 }
1054 break;
1055 }
1056 }
1057 if (prev_debug_excp_handler)
1058 prev_debug_excp_handler(env);
1059}
1060
1061#ifndef VBOX
1062/* This should come from sysemu.h - if we could include it here... */
1063void qemu_system_reset_request(void);
1064
1065void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1066 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1067{
1068 uint64_t mcg_cap = cenv->mcg_cap;
1069 unsigned bank_num = mcg_cap & 0xff;
1070 uint64_t *banks = cenv->mce_banks;
1071
1072 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1073 return;
1074
1075 /*
1076 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1077 * reporting is disabled
1078 */
1079 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1080 cenv->mcg_ctl != ~(uint64_t)0)
1081 return;
1082 banks += 4 * bank;
1083 /*
1084 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1085 * reporting is disabled for the bank
1086 */
1087 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1088 return;
1089 if (status & MCI_STATUS_UC) {
1090 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1091 !(cenv->cr[4] & CR4_MCE_MASK)) {
1092 fprintf(stderr, "injects mce exception while previous "
1093 "one is in progress!\n");
1094 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1095 qemu_system_reset_request();
1096 return;
1097 }
1098 if (banks[1] & MCI_STATUS_VAL)
1099 status |= MCI_STATUS_OVER;
1100 banks[2] = addr;
1101 banks[3] = misc;
1102 cenv->mcg_status = mcg_status;
1103 banks[1] = status;
1104 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1105 } else if (!(banks[1] & MCI_STATUS_VAL)
1106 || !(banks[1] & MCI_STATUS_UC)) {
1107 if (banks[1] & MCI_STATUS_VAL)
1108 status |= MCI_STATUS_OVER;
1109 banks[2] = addr;
1110 banks[3] = misc;
1111 banks[1] = status;
1112 } else
1113 banks[1] |= MCI_STATUS_OVER;
1114}
1115#endif /* !VBOX */
1116#endif /* !CONFIG_USER_ONLY */
1117
1118#ifndef VBOX
1119
1120static void mce_init(CPUX86State *cenv)
1121{
1122 unsigned int bank, bank_num;
1123
1124 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1125 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1126 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1127 cenv->mcg_ctl = ~(uint64_t)0;
1128 bank_num = MCE_BANKS_DEF;
1129 for (bank = 0; bank < bank_num; bank++)
1130 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1131 }
1132}
1133
1134int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1135 target_ulong *base, unsigned int *limit,
1136 unsigned int *flags)
1137{
1138 SegmentCache *dt;
1139 target_ulong ptr;
1140 uint32_t e1, e2;
1141 int index;
1142
1143 if (selector & 0x4)
1144 dt = &env->ldt;
1145 else
1146 dt = &env->gdt;
1147 index = selector & ~7;
1148 ptr = dt->base + index;
1149 if ((index + 7) > dt->limit
1150 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1151 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1152 return 0;
1153
1154 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1155 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1156 if (e2 & DESC_G_MASK)
1157 *limit = (*limit << 12) | 0xfff;
1158 *flags = e2;
1159
1160 return 1;
1161}
1162
1163#endif /* !VBOX */
1164
1165#ifndef VBOX
1166CPUX86State *cpu_x86_init(const char *cpu_model)
1167#else
1168CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
1169#endif
1170{
1171#ifndef VBOX
1172 CPUX86State *env;
1173#endif
1174 static int inited;
1175
1176#ifndef VBOX
1177 env = qemu_mallocz(sizeof(CPUX86State));
1178#endif
1179 cpu_exec_init(env);
1180 env->cpu_model_str = cpu_model;
1181
1182 /* init various static tables */
1183 if (!inited) {
1184 inited = 1;
1185 optimize_flags_init();
1186#ifndef CONFIG_USER_ONLY
1187 prev_debug_excp_handler =
1188 cpu_set_debug_excp_handler(breakpoint_handler);
1189#endif
1190 }
1191#ifndef VBOX
1192 if (cpu_x86_register(env, cpu_model) < 0) {
1193 cpu_x86_close(env);
1194 return NULL;
1195 }
1196 mce_init(env);
1197#endif
1198
1199 qemu_init_vcpu(env);
1200
1201 return env;
1202}
1203
1204#ifndef VBOX
1205#if !defined(CONFIG_USER_ONLY)
1206void do_cpu_init(CPUState *env)
1207{
1208 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1209 cpu_reset(env);
1210 env->interrupt_request = sipi;
1211 apic_init_reset(env->apic_state);
1212 env->halted = !cpu_is_bsp(env);
1213}
1214
1215void do_cpu_sipi(CPUState *env)
1216{
1217 apic_sipi(env->apic_state);
1218}
1219#else
1220void do_cpu_init(CPUState *env)
1221{
1222}
1223void do_cpu_sipi(CPUState *env)
1224{
1225}
1226#endif
1227#endif /* !VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette