1 | /*
|
---|
2 | * i386 virtual CPU header
|
---|
3 | *
|
---|
4 | * Copyright (c) 2003 Fabrice Bellard
|
---|
5 | *
|
---|
6 | * This library is free software; you can redistribute it and/or
|
---|
7 | * modify it under the terms of the GNU Lesser General Public
|
---|
8 | * License as published by the Free Software Foundation; either
|
---|
9 | * version 2 of the License, or (at your option) any later version.
|
---|
10 | *
|
---|
11 | * This library is distributed in the hope that it will be useful,
|
---|
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
14 | * Lesser General Public License for more details.
|
---|
15 | *
|
---|
16 | * You should have received a copy of the GNU Lesser General Public
|
---|
17 | * License along with this library; if not, write to the Free Software
|
---|
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
---|
19 | */
|
---|
20 |
|
---|
21 | /*
|
---|
22 | * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
|
---|
23 | * other than GPL or LGPL is available it will apply instead, Sun elects to use only
|
---|
24 | * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
|
---|
25 | * a choice of LGPL license versions is made available with the language indicating
|
---|
26 | * that LGPLv2 or any later version may be used, or where a choice of which version
|
---|
27 | * of the LGPL is applied is otherwise unspecified.
|
---|
28 | */
|
---|
29 | #ifndef CPU_I386_H
|
---|
30 | #define CPU_I386_H
|
---|
31 |
|
---|
32 | #include "config.h"
|
---|
33 |
|
---|
34 | #ifdef TARGET_X86_64
|
---|
35 | #define TARGET_LONG_BITS 64
|
---|
36 | #else
|
---|
37 | #define TARGET_LONG_BITS 32
|
---|
38 | #endif
|
---|
39 |
|
---|
40 | /* target supports implicit self modifying code */
|
---|
41 | #define TARGET_HAS_SMC
|
---|
42 | /* support for self modifying code even if the modified instruction is
|
---|
43 | close to the modifying instruction */
|
---|
44 | #define TARGET_HAS_PRECISE_SMC
|
---|
45 |
|
---|
46 | #define TARGET_HAS_ICE 1
|
---|
47 |
|
---|
48 | #ifdef TARGET_X86_64
|
---|
49 | #define ELF_MACHINE EM_X86_64
|
---|
50 | #else
|
---|
51 | #define ELF_MACHINE EM_386
|
---|
52 | #endif
|
---|
53 |
|
---|
54 | #include "cpu-defs.h"
|
---|
55 |
|
---|
56 | #include "softfloat.h"
|
---|
57 |
|
---|
58 | #if defined(VBOX)
|
---|
59 | # include <iprt/critsect.h>
|
---|
60 | # include <iprt/thread.h>
|
---|
61 | # include <iprt/assert.h>
|
---|
62 | # include <iprt/asm.h>
|
---|
63 | # include <VBox/vmm.h>
|
---|
64 | #endif /* VBOX */
|
---|
65 |
|
---|
66 | #define R_EAX 0
|
---|
67 | #define R_ECX 1
|
---|
68 | #define R_EDX 2
|
---|
69 | #define R_EBX 3
|
---|
70 | #define R_ESP 4
|
---|
71 | #define R_EBP 5
|
---|
72 | #define R_ESI 6
|
---|
73 | #define R_EDI 7
|
---|
74 |
|
---|
75 | #define R_AL 0
|
---|
76 | #define R_CL 1
|
---|
77 | #define R_DL 2
|
---|
78 | #define R_BL 3
|
---|
79 | #define R_AH 4
|
---|
80 | #define R_CH 5
|
---|
81 | #define R_DH 6
|
---|
82 | #define R_BH 7
|
---|
83 |
|
---|
84 | #define R_ES 0
|
---|
85 | #define R_CS 1
|
---|
86 | #define R_SS 2
|
---|
87 | #define R_DS 3
|
---|
88 | #define R_FS 4
|
---|
89 | #define R_GS 5
|
---|
90 |
|
---|
91 | /* segment descriptor fields */
|
---|
92 | #define DESC_G_MASK (1 << 23)
|
---|
93 | #define DESC_B_SHIFT 22
|
---|
94 | #define DESC_B_MASK (1 << DESC_B_SHIFT)
|
---|
95 | #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
|
---|
96 | #define DESC_L_MASK (1 << DESC_L_SHIFT)
|
---|
97 | #define DESC_AVL_MASK (1 << 20)
|
---|
98 | #define DESC_P_MASK (1 << 15)
|
---|
99 | #define DESC_DPL_SHIFT 13
|
---|
100 | #define DESC_S_MASK (1 << 12)
|
---|
101 | #define DESC_TYPE_SHIFT 8
|
---|
102 | #define DESC_A_MASK (1 << 8)
|
---|
103 |
|
---|
104 | #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
|
---|
105 | #define DESC_C_MASK (1 << 10) /* code: conforming */
|
---|
106 | #define DESC_R_MASK (1 << 9) /* code: readable */
|
---|
107 |
|
---|
108 | #define DESC_E_MASK (1 << 10) /* data: expansion direction */
|
---|
109 | #define DESC_W_MASK (1 << 9) /* data: writable */
|
---|
110 |
|
---|
111 | #define DESC_TSS_BUSY_MASK (1 << 9)
|
---|
112 |
|
---|
113 | /* eflags masks */
|
---|
114 | #define CC_C 0x0001
|
---|
115 | #define CC_P 0x0004
|
---|
116 | #define CC_A 0x0010
|
---|
117 | #define CC_Z 0x0040
|
---|
118 | #define CC_S 0x0080
|
---|
119 | #define CC_O 0x0800
|
---|
120 |
|
---|
121 | #define TF_SHIFT 8
|
---|
122 | #define IOPL_SHIFT 12
|
---|
123 | #define VM_SHIFT 17
|
---|
124 |
|
---|
125 | #define TF_MASK 0x00000100
|
---|
126 | #define IF_MASK 0x00000200
|
---|
127 | #define DF_MASK 0x00000400
|
---|
128 | #define IOPL_MASK 0x00003000
|
---|
129 | #define NT_MASK 0x00004000
|
---|
130 | #define RF_MASK 0x00010000
|
---|
131 | #define VM_MASK 0x00020000
|
---|
132 | #define AC_MASK 0x00040000
|
---|
133 | #define VIF_MASK 0x00080000
|
---|
134 | #define VIP_MASK 0x00100000
|
---|
135 | #define ID_MASK 0x00200000
|
---|
136 |
|
---|
137 | /* hidden flags - used internally by qemu to represent additionnal cpu
|
---|
138 | states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not redundant. We avoid
|
---|
139 | using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
|
---|
140 | with eflags. */
|
---|
141 | /* current cpl */
|
---|
142 | #define HF_CPL_SHIFT 0
|
---|
143 | /* true if soft mmu is being used */
|
---|
144 | #define HF_SOFTMMU_SHIFT 2
|
---|
145 | /* true if hardware interrupts must be disabled for next instruction */
|
---|
146 | #define HF_INHIBIT_IRQ_SHIFT 3
|
---|
147 | /* 16 or 32 segments */
|
---|
148 | #define HF_CS32_SHIFT 4
|
---|
149 | #define HF_SS32_SHIFT 5
|
---|
150 | /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
|
---|
151 | #define HF_ADDSEG_SHIFT 6
|
---|
152 | /* copy of CR0.PE (protected mode) */
|
---|
153 | #define HF_PE_SHIFT 7
|
---|
154 | #define HF_TF_SHIFT 8 /* must be same as eflags */
|
---|
155 | #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
|
---|
156 | #define HF_EM_SHIFT 10
|
---|
157 | #define HF_TS_SHIFT 11
|
---|
158 | #define HF_IOPL_SHIFT 12 /* must be same as eflags */
|
---|
159 | #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
|
---|
160 | #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
|
---|
161 | #define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
|
---|
162 | #define HF_VM_SHIFT 17 /* must be same as eflags */
|
---|
163 | #define HF_HALTED_SHIFT 18 /* CPU halted */
|
---|
164 | #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
|
---|
165 | #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
|
---|
166 | #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
|
---|
167 |
|
---|
168 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
|
---|
169 | #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
|
---|
170 | #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
|
---|
171 | #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
|
---|
172 | #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
|
---|
173 | #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
|
---|
174 | #define HF_PE_MASK (1 << HF_PE_SHIFT)
|
---|
175 | #define HF_TF_MASK (1 << HF_TF_SHIFT)
|
---|
176 | #define HF_MP_MASK (1 << HF_MP_SHIFT)
|
---|
177 | #define HF_EM_MASK (1 << HF_EM_SHIFT)
|
---|
178 | #define HF_TS_MASK (1 << HF_TS_SHIFT)
|
---|
179 | #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
|
---|
180 | #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
|
---|
181 | #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
|
---|
182 | #define HF_HALTED_MASK (1 << HF_HALTED_SHIFT)
|
---|
183 | #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
|
---|
184 | #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
|
---|
185 | #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
|
---|
186 |
|
---|
187 | /* hflags2 */
|
---|
188 |
|
---|
189 | #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
|
---|
190 | #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
|
---|
191 | #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
|
---|
192 | #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
|
---|
193 |
|
---|
194 | #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
|
---|
195 | #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
|
---|
196 | #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
|
---|
197 | #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
|
---|
198 |
|
---|
199 | #define CR0_PE_MASK (1 << 0)
|
---|
200 | #define CR0_MP_MASK (1 << 1)
|
---|
201 | #define CR0_EM_MASK (1 << 2)
|
---|
202 | #define CR0_TS_MASK (1 << 3)
|
---|
203 | #define CR0_ET_MASK (1 << 4)
|
---|
204 | #define CR0_NE_MASK (1 << 5)
|
---|
205 | #define CR0_WP_MASK (1 << 16)
|
---|
206 | #define CR0_AM_MASK (1 << 18)
|
---|
207 | #define CR0_PG_MASK (1 << 31)
|
---|
208 |
|
---|
209 | #define CR4_VME_MASK (1 << 0)
|
---|
210 | #define CR4_PVI_MASK (1 << 1)
|
---|
211 | #define CR4_TSD_MASK (1 << 2)
|
---|
212 | #define CR4_DE_MASK (1 << 3)
|
---|
213 | #define CR4_PSE_MASK (1 << 4)
|
---|
214 | #define CR4_PAE_MASK (1 << 5)
|
---|
215 | #define CR4_PGE_MASK (1 << 7)
|
---|
216 | #define CR4_PCE_MASK (1 << 8)
|
---|
217 | #define CR4_OSFXSR_MASK (1 << 9)
|
---|
218 | #define CR4_OSXMMEXCPT_MASK (1 << 10)
|
---|
219 |
|
---|
220 | #define PG_PRESENT_BIT 0
|
---|
221 | #define PG_RW_BIT 1
|
---|
222 | #define PG_USER_BIT 2
|
---|
223 | #define PG_PWT_BIT 3
|
---|
224 | #define PG_PCD_BIT 4
|
---|
225 | #define PG_ACCESSED_BIT 5
|
---|
226 | #define PG_DIRTY_BIT 6
|
---|
227 | #define PG_PSE_BIT 7
|
---|
228 | #define PG_GLOBAL_BIT 8
|
---|
229 | #define PG_NX_BIT 63
|
---|
230 |
|
---|
231 | #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
|
---|
232 | #define PG_RW_MASK (1 << PG_RW_BIT)
|
---|
233 | #define PG_USER_MASK (1 << PG_USER_BIT)
|
---|
234 | #define PG_PWT_MASK (1 << PG_PWT_BIT)
|
---|
235 | #define PG_PCD_MASK (1 << PG_PCD_BIT)
|
---|
236 | #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
|
---|
237 | #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
|
---|
238 | #define PG_PSE_MASK (1 << PG_PSE_BIT)
|
---|
239 | #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
|
---|
240 | #define PG_NX_MASK (1LL << PG_NX_BIT)
|
---|
241 |
|
---|
242 | #define PG_ERROR_W_BIT 1
|
---|
243 |
|
---|
244 | #define PG_ERROR_P_MASK 0x01
|
---|
245 | #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
|
---|
246 | #define PG_ERROR_U_MASK 0x04
|
---|
247 | #define PG_ERROR_RSVD_MASK 0x08
|
---|
248 | #define PG_ERROR_I_D_MASK 0x10
|
---|
249 |
|
---|
250 | #define MSR_IA32_APICBASE 0x1b
|
---|
251 | #define MSR_IA32_APICBASE_BSP (1<<8)
|
---|
252 | #define MSR_IA32_APICBASE_ENABLE (1<<11)
|
---|
253 | #define MSR_IA32_APICBASE_BASE (0xfffff<<12)
|
---|
254 |
|
---|
255 | #ifndef MSR_IA32_SYSENTER_CS /* VBox x86.h klugde */
|
---|
256 | #define MSR_IA32_SYSENTER_CS 0x174
|
---|
257 | #define MSR_IA32_SYSENTER_ESP 0x175
|
---|
258 | #define MSR_IA32_SYSENTER_EIP 0x176
|
---|
259 | #endif
|
---|
260 |
|
---|
261 | #define MSR_IA32_SYSENTER_CS 0x174
|
---|
262 | #define MSR_IA32_SYSENTER_ESP 0x175
|
---|
263 | #define MSR_IA32_SYSENTER_EIP 0x176
|
---|
264 |
|
---|
265 | #define MSR_MCG_CAP 0x179
|
---|
266 | #define MSR_MCG_STATUS 0x17a
|
---|
267 | #define MSR_MCG_CTL 0x17b
|
---|
268 |
|
---|
269 | #define MSR_IA32_PERF_STATUS 0x198
|
---|
270 |
|
---|
271 | #define MSR_PAT 0x277
|
---|
272 |
|
---|
273 | #define MSR_EFER 0xc0000080
|
---|
274 |
|
---|
275 | #define MSR_EFER_SCE (1 << 0)
|
---|
276 | #define MSR_EFER_LME (1 << 8)
|
---|
277 | #define MSR_EFER_LMA (1 << 10)
|
---|
278 | #define MSR_EFER_NXE (1 << 11)
|
---|
279 | #define MSR_EFER_SVME (1 << 12)
|
---|
280 | #define MSR_EFER_FFXSR (1 << 14)
|
---|
281 |
|
---|
282 | #ifdef VBOX
|
---|
283 | #define MSR_APIC_RANGE_START 0x800
|
---|
284 | #define MSR_APIC_RANGE_END 0x900
|
---|
285 | #endif
|
---|
286 |
|
---|
287 | #define MSR_STAR 0xc0000081
|
---|
288 | #define MSR_LSTAR 0xc0000082
|
---|
289 | #define MSR_CSTAR 0xc0000083
|
---|
290 | #define MSR_FMASK 0xc0000084
|
---|
291 | #define MSR_FSBASE 0xc0000100
|
---|
292 | #define MSR_GSBASE 0xc0000101
|
---|
293 | #define MSR_KERNELGSBASE 0xc0000102
|
---|
294 |
|
---|
295 | #define MSR_VM_HSAVE_PA 0xc0010117
|
---|
296 |
|
---|
297 | /* cpuid_features bits */
|
---|
298 | #define CPUID_FP87 (1 << 0)
|
---|
299 | #define CPUID_VME (1 << 1)
|
---|
300 | #define CPUID_DE (1 << 2)
|
---|
301 | #define CPUID_PSE (1 << 3)
|
---|
302 | #define CPUID_TSC (1 << 4)
|
---|
303 | #define CPUID_MSR (1 << 5)
|
---|
304 | #define CPUID_PAE (1 << 6)
|
---|
305 | #define CPUID_MCE (1 << 7)
|
---|
306 | #define CPUID_CX8 (1 << 8)
|
---|
307 | #define CPUID_APIC (1 << 9)
|
---|
308 | #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
|
---|
309 | #define CPUID_MTRR (1 << 12)
|
---|
310 | #define CPUID_PGE (1 << 13)
|
---|
311 | #define CPUID_MCA (1 << 14)
|
---|
312 | #define CPUID_CMOV (1 << 15)
|
---|
313 | #define CPUID_PAT (1 << 16)
|
---|
314 | #define CPUID_PSE36 (1 << 17)
|
---|
315 | #define CPUID_CLFLUSH (1 << 19)
|
---|
316 | #define CPUID_DTS (1 << 21)
|
---|
317 | #define CPUID_ACPI (1 << 22)
|
---|
318 | #define CPUID_MMX (1 << 23)
|
---|
319 | #define CPUID_FXSR (1 << 24)
|
---|
320 | #define CPUID_SSE (1 << 25)
|
---|
321 | #define CPUID_SSE2 (1 << 26)
|
---|
322 | #define CPUID_SS (1 << 27)
|
---|
323 | #define CPUID_HT (1 << 28)
|
---|
324 | #define CPUID_TM (1 << 29)
|
---|
325 | #define CPUID_IA64 (1 << 30)
|
---|
326 | #define CPUID_PBE (1 << 31)
|
---|
327 |
|
---|
328 | #define CPUID_EXT_SSE3 (1 << 0)
|
---|
329 | #define CPUID_EXT_DTES64 (1 << 2)
|
---|
330 | #define CPUID_EXT_MONITOR (1 << 3)
|
---|
331 | #define CPUID_EXT_DSCPL (1 << 4)
|
---|
332 | #define CPUID_EXT_VMX (1 << 5)
|
---|
333 | #define CPUID_EXT_SMX (1 << 6)
|
---|
334 | #define CPUID_EXT_EST (1 << 7)
|
---|
335 | #define CPUID_EXT_TM2 (1 << 8)
|
---|
336 | #define CPUID_EXT_SSSE3 (1 << 9)
|
---|
337 | #define CPUID_EXT_CID (1 << 10)
|
---|
338 | #define CPUID_EXT_CX16 (1 << 13)
|
---|
339 | #define CPUID_EXT_XTPR (1 << 14)
|
---|
340 | #define CPUID_EXT_PDCM (1 << 15)
|
---|
341 | #define CPUID_EXT_DCA (1 << 18)
|
---|
342 | #define CPUID_EXT_SSE41 (1 << 19)
|
---|
343 | #define CPUID_EXT_SSE42 (1 << 20)
|
---|
344 | #define CPUID_EXT_X2APIC (1 << 21)
|
---|
345 | #define CPUID_EXT_MOVBE (1 << 22)
|
---|
346 | #define CPUID_EXT_POPCNT (1 << 23)
|
---|
347 | #define CPUID_EXT_XSAVE (1 << 26)
|
---|
348 | #define CPUID_EXT_OSXSAVE (1 << 27)
|
---|
349 |
|
---|
350 | #define CPUID_EXT2_SYSCALL (1 << 11)
|
---|
351 | #define CPUID_EXT2_MP (1 << 19)
|
---|
352 | #define CPUID_EXT2_NX (1 << 20)
|
---|
353 | #define CPUID_EXT2_MMXEXT (1 << 22)
|
---|
354 | #define CPUID_EXT2_FFXSR (1 << 25)
|
---|
355 | #define CPUID_EXT2_PDPE1GB (1 << 26)
|
---|
356 | #define CPUID_EXT2_RDTSCP (1 << 27)
|
---|
357 | #define CPUID_EXT2_LM (1 << 29)
|
---|
358 | #define CPUID_EXT2_3DNOWEXT (1 << 30)
|
---|
359 | #define CPUID_EXT2_3DNOW (1 << 31)
|
---|
360 |
|
---|
361 | #define CPUID_EXT3_LAHF_LM (1 << 0)
|
---|
362 | #define CPUID_EXT3_CMP_LEG (1 << 1)
|
---|
363 | #define CPUID_EXT3_SVM (1 << 2)
|
---|
364 | #define CPUID_EXT3_EXTAPIC (1 << 3)
|
---|
365 | #define CPUID_EXT3_CR8LEG (1 << 4)
|
---|
366 | #define CPUID_EXT3_ABM (1 << 5)
|
---|
367 | #define CPUID_EXT3_SSE4A (1 << 6)
|
---|
368 | #define CPUID_EXT3_MISALIGNSSE (1 << 7)
|
---|
369 | #define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
|
---|
370 | #define CPUID_EXT3_OSVW (1 << 9)
|
---|
371 | #define CPUID_EXT3_IBS (1 << 10)
|
---|
372 | #define CPUID_EXT3_SKINIT (1 << 12)
|
---|
373 |
|
---|
374 | #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
|
---|
375 | #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
|
---|
376 | #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
|
---|
377 |
|
---|
378 | #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
|
---|
379 | #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
|
---|
380 | #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
|
---|
381 |
|
---|
382 | #define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */
|
---|
383 | #define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */
|
---|
384 |
|
---|
385 | #define EXCP00_DIVZ 0
|
---|
386 | #define EXCP01_SSTP 1
|
---|
387 | #define EXCP02_NMI 2
|
---|
388 | #define EXCP03_INT3 3
|
---|
389 | #define EXCP04_INTO 4
|
---|
390 | #define EXCP05_BOUND 5
|
---|
391 | #define EXCP06_ILLOP 6
|
---|
392 | #define EXCP07_PREX 7
|
---|
393 | #define EXCP08_DBLE 8
|
---|
394 | #define EXCP09_XERR 9
|
---|
395 | #define EXCP0A_TSS 10
|
---|
396 | #define EXCP0B_NOSEG 11
|
---|
397 | #define EXCP0C_STACK 12
|
---|
398 | #define EXCP0D_GPF 13
|
---|
399 | #define EXCP0E_PAGE 14
|
---|
400 | #define EXCP10_COPR 16
|
---|
401 | #define EXCP11_ALGN 17
|
---|
402 | #define EXCP12_MCHK 18
|
---|
403 |
|
---|
404 | #define EXCP_SYSCALL 0x100 /* only happens in user only emulation
|
---|
405 | for syscall instruction */
|
---|
406 |
|
---|
407 | enum {
|
---|
408 | CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
|
---|
409 | CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
|
---|
410 |
|
---|
411 | CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
|
---|
412 | CC_OP_MULW,
|
---|
413 | CC_OP_MULL,
|
---|
414 | CC_OP_MULQ,
|
---|
415 |
|
---|
416 | CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
---|
417 | CC_OP_ADDW,
|
---|
418 | CC_OP_ADDL,
|
---|
419 | CC_OP_ADDQ,
|
---|
420 |
|
---|
421 | CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
---|
422 | CC_OP_ADCW,
|
---|
423 | CC_OP_ADCL,
|
---|
424 | CC_OP_ADCQ,
|
---|
425 |
|
---|
426 | CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
---|
427 | CC_OP_SUBW,
|
---|
428 | CC_OP_SUBL,
|
---|
429 | CC_OP_SUBQ,
|
---|
430 |
|
---|
431 | CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
|
---|
432 | CC_OP_SBBW,
|
---|
433 | CC_OP_SBBL,
|
---|
434 | CC_OP_SBBQ,
|
---|
435 |
|
---|
436 | CC_OP_LOGICB, /* modify all flags, CC_DST = res */
|
---|
437 | CC_OP_LOGICW,
|
---|
438 | CC_OP_LOGICL,
|
---|
439 | CC_OP_LOGICQ,
|
---|
440 |
|
---|
441 | CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
|
---|
442 | CC_OP_INCW,
|
---|
443 | CC_OP_INCL,
|
---|
444 | CC_OP_INCQ,
|
---|
445 |
|
---|
446 | CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
|
---|
447 | CC_OP_DECW,
|
---|
448 | CC_OP_DECL,
|
---|
449 | CC_OP_DECQ,
|
---|
450 |
|
---|
451 | CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
|
---|
452 | CC_OP_SHLW,
|
---|
453 | CC_OP_SHLL,
|
---|
454 | CC_OP_SHLQ,
|
---|
455 |
|
---|
456 | CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
|
---|
457 | CC_OP_SARW,
|
---|
458 | CC_OP_SARL,
|
---|
459 | CC_OP_SARQ,
|
---|
460 |
|
---|
461 | CC_OP_NB
|
---|
462 | };
|
---|
463 |
|
---|
464 | #ifdef FLOATX80
|
---|
465 | #define USE_X86LDOUBLE
|
---|
466 | #endif
|
---|
467 |
|
---|
468 | #ifdef USE_X86LDOUBLE
|
---|
469 | typedef floatx80 CPU86_LDouble;
|
---|
470 | #else
|
---|
471 | typedef float64 CPU86_LDouble;
|
---|
472 | #endif
|
---|
473 |
|
---|
474 | typedef struct SegmentCache {
|
---|
475 | uint32_t selector;
|
---|
476 | target_ulong base;
|
---|
477 | uint32_t limit;
|
---|
478 | uint32_t flags;
|
---|
479 | #ifdef VBOX
|
---|
480 | /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */
|
---|
481 | uint32_t newselector;
|
---|
482 | #endif
|
---|
483 | } SegmentCache;
|
---|
484 |
|
---|
485 | typedef union {
|
---|
486 | uint8_t _b[16];
|
---|
487 | uint16_t _w[8];
|
---|
488 | uint32_t _l[4];
|
---|
489 | uint64_t _q[2];
|
---|
490 | float32 _s[4];
|
---|
491 | float64 _d[2];
|
---|
492 | } XMMReg;
|
---|
493 |
|
---|
494 | typedef union {
|
---|
495 | uint8_t _b[8];
|
---|
496 | uint16_t _w[2];
|
---|
497 | uint32_t _l[1];
|
---|
498 | float32 _s[2];
|
---|
499 | uint64_t q;
|
---|
500 | } MMXReg;
|
---|
501 |
|
---|
502 | #ifdef WORDS_BIGENDIAN
|
---|
503 | #define XMM_B(n) _b[15 - (n)]
|
---|
504 | #define XMM_W(n) _w[7 - (n)]
|
---|
505 | #define XMM_L(n) _l[3 - (n)]
|
---|
506 | #define XMM_S(n) _s[3 - (n)]
|
---|
507 | #define XMM_Q(n) _q[1 - (n)]
|
---|
508 | #define XMM_D(n) _d[1 - (n)]
|
---|
509 |
|
---|
510 | #define MMX_B(n) _b[7 - (n)]
|
---|
511 | #define MMX_W(n) _w[3 - (n)]
|
---|
512 | #define MMX_L(n) _l[1 - (n)]
|
---|
513 | #define MMX_S(n) _s[1 - (n)]
|
---|
514 | #else
|
---|
515 | #define XMM_B(n) _b[n]
|
---|
516 | #define XMM_W(n) _w[n]
|
---|
517 | #define XMM_L(n) _l[n]
|
---|
518 | #define XMM_S(n) _s[n]
|
---|
519 | #define XMM_Q(n) _q[n]
|
---|
520 | #define XMM_D(n) _d[n]
|
---|
521 |
|
---|
522 | #define MMX_B(n) _b[n]
|
---|
523 | #define MMX_W(n) _w[n]
|
---|
524 | #define MMX_L(n) _l[n]
|
---|
525 | #define MMX_S(n) _s[n]
|
---|
526 | #endif
|
---|
527 | #define MMX_Q(n) q
|
---|
528 |
|
---|
529 | #ifdef TARGET_X86_64
|
---|
530 | #define CPU_NB_REGS 16
|
---|
531 | #else
|
---|
532 | #define CPU_NB_REGS 8
|
---|
533 | #endif
|
---|
534 |
|
---|
535 | #define NB_MMU_MODES 2
|
---|
536 |
|
---|
537 | typedef struct CPUX86State {
|
---|
538 | /* standard registers */
|
---|
539 | target_ulong regs[CPU_NB_REGS];
|
---|
540 | target_ulong eip;
|
---|
541 | target_ulong eflags; /* eflags register. During CPU emulation, CC
|
---|
542 | flags and DF are set to zero because they are
|
---|
543 | stored elsewhere */
|
---|
544 |
|
---|
545 | /* emulator internal eflags handling */
|
---|
546 | target_ulong cc_src;
|
---|
547 | target_ulong cc_dst;
|
---|
548 | uint32_t cc_op;
|
---|
549 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
|
---|
550 | uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
|
---|
551 | are known at translation time. */
|
---|
552 | uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
|
---|
553 |
|
---|
554 | /* segments */
|
---|
555 | SegmentCache segs[6]; /* selector values */
|
---|
556 | SegmentCache ldt;
|
---|
557 | SegmentCache tr;
|
---|
558 | SegmentCache gdt; /* only base and limit are used */
|
---|
559 | SegmentCache idt; /* only base and limit are used */
|
---|
560 |
|
---|
561 | target_ulong cr[5]; /* NOTE: cr1 is unused */
|
---|
562 | uint64_t a20_mask;
|
---|
563 |
|
---|
564 | /* FPU state */
|
---|
565 | unsigned int fpstt; /* top of stack index */
|
---|
566 | unsigned int fpus;
|
---|
567 | unsigned int fpuc;
|
---|
568 | uint8_t fptags[8]; /* 0 = valid, 1 = empty */
|
---|
569 | union {
|
---|
570 | #ifdef USE_X86LDOUBLE
|
---|
571 | CPU86_LDouble d __attribute__((aligned(16)));
|
---|
572 | #else
|
---|
573 | CPU86_LDouble d;
|
---|
574 | #endif
|
---|
575 | MMXReg mmx;
|
---|
576 | } fpregs[8];
|
---|
577 |
|
---|
578 | /* emulator internal variables */
|
---|
579 | float_status fp_status;
|
---|
580 | #ifdef VBOX
|
---|
581 | uint32_t alignment3[3]; /* force the long double to start a 16 byte line. */
|
---|
582 | #endif
|
---|
583 | CPU86_LDouble ft0;
|
---|
584 | #if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN)
|
---|
585 | uint32_t alignment4; /* long double is 12 byte, pad it to 16. */
|
---|
586 | #endif
|
---|
587 |
|
---|
588 | float_status mmx_status; /* for 3DNow! float ops */
|
---|
589 | float_status sse_status;
|
---|
590 | uint32_t mxcsr;
|
---|
591 | XMMReg xmm_regs[CPU_NB_REGS];
|
---|
592 | XMMReg xmm_t0;
|
---|
593 | MMXReg mmx_t0;
|
---|
594 | target_ulong cc_tmp; /* temporary for rcr/rcl */
|
---|
595 |
|
---|
596 | /* sysenter registers */
|
---|
597 | uint32_t sysenter_cs;
|
---|
598 | uint64_t sysenter_esp;
|
---|
599 | uint64_t sysenter_eip;
|
---|
600 | #ifdef VBOX
|
---|
601 | uint32_t alignment0;
|
---|
602 | #endif
|
---|
603 | uint64_t efer;
|
---|
604 | uint64_t star;
|
---|
605 |
|
---|
606 | uint64_t vm_hsave;
|
---|
607 | uint64_t vm_vmcb;
|
---|
608 | uint64_t tsc_offset;
|
---|
609 | uint64_t intercept;
|
---|
610 | uint16_t intercept_cr_read;
|
---|
611 | uint16_t intercept_cr_write;
|
---|
612 | uint16_t intercept_dr_read;
|
---|
613 | uint16_t intercept_dr_write;
|
---|
614 | uint32_t intercept_exceptions;
|
---|
615 | uint8_t v_tpr;
|
---|
616 |
|
---|
617 | #ifdef TARGET_X86_64
|
---|
618 | target_ulong lstar;
|
---|
619 | target_ulong cstar;
|
---|
620 | target_ulong fmask;
|
---|
621 | target_ulong kernelgsbase;
|
---|
622 | #endif
|
---|
623 |
|
---|
624 | uint64_t pat;
|
---|
625 |
|
---|
626 | /* exception/interrupt handling */
|
---|
627 | int error_code;
|
---|
628 | int exception_is_int;
|
---|
629 | target_ulong exception_next_eip;
|
---|
630 | target_ulong dr[8]; /* debug registers */
|
---|
631 | uint32_t smbase;
|
---|
632 | int old_exception; /* exception in flight */
|
---|
633 |
|
---|
634 |
|
---|
635 | CPU_COMMON
|
---|
636 |
|
---|
637 | #ifdef VBOX
|
---|
638 | /** cpu state flags. (see defines below) */
|
---|
639 | uint32_t state;
|
---|
640 | /** The VM handle. */
|
---|
641 | PVM pVM;
|
---|
642 | /** code buffer for instruction emulation */
|
---|
643 | void *pvCodeBuffer;
|
---|
644 | /** code buffer size */
|
---|
645 | uint32_t cbCodeBuffer;
|
---|
646 | #endif /* VBOX */
|
---|
647 |
|
---|
648 | /* processor features (e.g. for CPUID insn) */
|
---|
649 | #ifndef VBOX /* remR3CpuId deals with these */
|
---|
650 | uint32_t cpuid_level;
|
---|
651 | uint32_t cpuid_vendor1;
|
---|
652 | uint32_t cpuid_vendor2;
|
---|
653 | uint32_t cpuid_vendor3;
|
---|
654 | uint32_t cpuid_version;
|
---|
655 | #endif /* !VBOX */
|
---|
656 | uint32_t cpuid_features;
|
---|
657 | uint32_t cpuid_ext_features;
|
---|
658 | #ifndef VBOX
|
---|
659 | uint32_t cpuid_xlevel;
|
---|
660 | uint32_t cpuid_model[12];
|
---|
661 | #endif /* !VBOX */
|
---|
662 | uint32_t cpuid_ext2_features;
|
---|
663 | uint32_t cpuid_ext3_features;
|
---|
664 | uint32_t cpuid_apic_id;
|
---|
665 |
|
---|
666 | #ifndef VBOX
|
---|
667 | #ifdef USE_KQEMU
|
---|
668 | int kqemu_enabled;
|
---|
669 | int last_io_time;
|
---|
670 | #endif
|
---|
671 | /* in order to simplify APIC support, we leave this pointer to the
|
---|
672 | user */
|
---|
673 | struct APICState *apic_state;
|
---|
674 | #else
|
---|
675 | uint32_t alignment2[3];
|
---|
676 | #endif
|
---|
677 | } CPUX86State;
|
---|
678 |
|
---|
679 | #ifdef VBOX
|
---|
680 |
|
---|
681 | /* Version 1.6 structure; just for loading the old saved state */
|
---|
682 | typedef struct SegmentCache_Ver16 {
|
---|
683 | uint32_t selector;
|
---|
684 | uint32_t base;
|
---|
685 | uint32_t limit;
|
---|
686 | uint32_t flags;
|
---|
687 | /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */
|
---|
688 | uint32_t newselector;
|
---|
689 | } SegmentCache_Ver16;
|
---|
690 |
|
---|
691 | #define CPU_NB_REGS_VER16 8
|
---|
692 |
|
---|
693 | /* Version 1.6 structure; just for loading the old saved state */
|
---|
694 | typedef struct CPUX86State_Ver16 {
|
---|
695 | #if TARGET_LONG_BITS > HOST_LONG_BITS
|
---|
696 | /* temporaries if we cannot store them in host registers */
|
---|
697 | uint32_t t0, t1, t2;
|
---|
698 | #endif
|
---|
699 |
|
---|
700 | /* standard registers */
|
---|
701 | uint32_t regs[CPU_NB_REGS_VER16];
|
---|
702 | uint32_t eip;
|
---|
703 | uint32_t eflags; /* eflags register. During CPU emulation, CC
|
---|
704 | flags and DF are set to zero because they are
|
---|
705 | stored elsewhere */
|
---|
706 |
|
---|
707 | /* emulator internal eflags handling */
|
---|
708 | uint32_t cc_src;
|
---|
709 | uint32_t cc_dst;
|
---|
710 | uint32_t cc_op;
|
---|
711 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
|
---|
712 | uint32_t hflags; /* hidden flags, see HF_xxx constants */
|
---|
713 |
|
---|
714 | /* segments */
|
---|
715 | SegmentCache_Ver16 segs[6]; /* selector values */
|
---|
716 | SegmentCache_Ver16 ldt;
|
---|
717 | SegmentCache_Ver16 tr;
|
---|
718 | SegmentCache_Ver16 gdt; /* only base and limit are used */
|
---|
719 | SegmentCache_Ver16 idt; /* only base and limit are used */
|
---|
720 |
|
---|
721 | uint32_t cr[5]; /* NOTE: cr1 is unused */
|
---|
722 | uint32_t a20_mask;
|
---|
723 |
|
---|
724 | /* FPU state */
|
---|
725 | unsigned int fpstt; /* top of stack index */
|
---|
726 | unsigned int fpus;
|
---|
727 | unsigned int fpuc;
|
---|
728 | uint8_t fptags[8]; /* 0 = valid, 1 = empty */
|
---|
729 | union {
|
---|
730 | #ifdef USE_X86LDOUBLE
|
---|
731 | CPU86_LDouble d __attribute__((aligned(16)));
|
---|
732 | #else
|
---|
733 | CPU86_LDouble d;
|
---|
734 | #endif
|
---|
735 | MMXReg mmx;
|
---|
736 | } fpregs[8];
|
---|
737 |
|
---|
738 | /* emulator internal variables */
|
---|
739 | float_status fp_status;
|
---|
740 | #ifdef VBOX
|
---|
741 | uint32_t alignment3[3]; /* force the long double to start a 16 byte line. */
|
---|
742 | #endif
|
---|
743 | CPU86_LDouble ft0;
|
---|
744 | #if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN)
|
---|
745 | uint32_t alignment4; /* long double is 12 byte, pad it to 16. */
|
---|
746 | #endif
|
---|
747 | union {
|
---|
748 | float f;
|
---|
749 | double d;
|
---|
750 | int i32;
|
---|
751 | int64_t i64;
|
---|
752 | } fp_convert;
|
---|
753 |
|
---|
754 | float_status sse_status;
|
---|
755 | uint32_t mxcsr;
|
---|
756 | XMMReg xmm_regs[CPU_NB_REGS_VER16];
|
---|
757 | XMMReg xmm_t0;
|
---|
758 | MMXReg mmx_t0;
|
---|
759 |
|
---|
760 | /* sysenter registers */
|
---|
761 | uint32_t sysenter_cs;
|
---|
762 | uint32_t sysenter_esp;
|
---|
763 | uint32_t sysenter_eip;
|
---|
764 | #ifdef VBOX
|
---|
765 | uint32_t alignment0;
|
---|
766 | #endif
|
---|
767 | uint64_t efer;
|
---|
768 | uint64_t star;
|
---|
769 |
|
---|
770 | uint64_t pat;
|
---|
771 |
|
---|
772 | /* temporary data for USE_CODE_COPY mode */
|
---|
773 | #ifdef USE_CODE_COPY
|
---|
774 | uint32_t tmp0;
|
---|
775 | uint32_t saved_esp;
|
---|
776 | int native_fp_regs; /* if true, the FPU state is in the native CPU regs */
|
---|
777 | #endif
|
---|
778 |
|
---|
779 | /* exception/interrupt handling */
|
---|
780 | jmp_buf jmp_env;
|
---|
781 | } CPUX86State_Ver16;
|
---|
782 |
|
---|
783 | /** CPUX86State state flags
|
---|
784 | * @{ */
|
---|
785 | #define CPU_RAW_RING0 0x0002 /* Set after first time RawR0 is executed, never cleared. */
|
---|
786 | #define CPU_EMULATE_SINGLE_INSTR 0x0040 /* Execute a single instruction in emulation mode */
|
---|
787 | #define CPU_EMULATE_SINGLE_STEP 0x0080 /* go into single step mode */
|
---|
788 | #define CPU_RAW_HWACC 0x0100 /* Set after first time HWACC is executed, never cleared. */
|
---|
789 | /** @} */
|
---|
790 | #endif /* !VBOX */
|
---|
791 |
|
---|
792 | #ifdef VBOX
|
---|
793 | CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model);
|
---|
794 | #else /* !VBOX */
|
---|
795 | CPUX86State *cpu_x86_init(const char *cpu_model);
|
---|
796 | #endif /* !VBOX */
|
---|
797 | int cpu_x86_exec(CPUX86State *s);
|
---|
798 | void cpu_x86_close(CPUX86State *s);
|
---|
799 | void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt,
|
---|
800 | ...));
|
---|
801 | int cpu_get_pic_interrupt(CPUX86State *s);
|
---|
802 | /* MSDOS compatibility mode FPU exception support */
|
---|
803 | void cpu_set_ferr(CPUX86State *s);
|
---|
804 |
|
---|
805 | /* this function must always be used to load data in the segment
|
---|
806 | cache: it synchronizes the hflags with the segment cache values */
|
---|
807 | static inline void cpu_x86_load_seg_cache(CPUX86State *env,
|
---|
808 | int seg_reg, unsigned int selector,
|
---|
809 | target_ulong base,
|
---|
810 | unsigned int limit,
|
---|
811 | unsigned int flags)
|
---|
812 | {
|
---|
813 | SegmentCache *sc;
|
---|
814 | unsigned int new_hflags;
|
---|
815 |
|
---|
816 | sc = &env->segs[seg_reg];
|
---|
817 | sc->selector = selector;
|
---|
818 | sc->base = base;
|
---|
819 | sc->limit = limit;
|
---|
820 | sc->flags = flags;
|
---|
821 | #ifdef VBOX
|
---|
822 | sc->newselector = 0;
|
---|
823 | #endif
|
---|
824 |
|
---|
825 | /* update the hidden flags */
|
---|
826 | {
|
---|
827 | if (seg_reg == R_CS) {
|
---|
828 | #ifdef TARGET_X86_64
|
---|
829 | if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
|
---|
830 | /* long mode */
|
---|
831 | env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
|
---|
832 | env->hflags &= ~(HF_ADDSEG_MASK);
|
---|
833 | } else
|
---|
834 | #endif
|
---|
835 | {
|
---|
836 | /* legacy / compatibility case */
|
---|
837 | new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
|
---|
838 | >> (DESC_B_SHIFT - HF_CS32_SHIFT);
|
---|
839 | env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
|
---|
840 | new_hflags;
|
---|
841 | }
|
---|
842 | }
|
---|
843 | new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
|
---|
844 | >> (DESC_B_SHIFT - HF_SS32_SHIFT);
|
---|
845 | if (env->hflags & HF_CS64_MASK) {
|
---|
846 | /* zero base assumed for DS, ES and SS in long mode */
|
---|
847 | } else if (!(env->cr[0] & CR0_PE_MASK) ||
|
---|
848 | (env->eflags & VM_MASK) ||
|
---|
849 | !(env->hflags & HF_CS32_MASK)) {
|
---|
850 | /* XXX: try to avoid this test. The problem comes from the
|
---|
851 | fact that is real mode or vm86 mode we only modify the
|
---|
852 | 'base' and 'selector' fields of the segment cache to go
|
---|
853 | faster. A solution may be to force addseg to one in
|
---|
854 | translate-i386.c. */
|
---|
855 | new_hflags |= HF_ADDSEG_MASK;
|
---|
856 | } else {
|
---|
857 | new_hflags |= ((env->segs[R_DS].base |
|
---|
858 | env->segs[R_ES].base |
|
---|
859 | env->segs[R_SS].base) != 0) <<
|
---|
860 | HF_ADDSEG_SHIFT;
|
---|
861 | }
|
---|
862 | env->hflags = (env->hflags &
|
---|
863 | ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
|
---|
864 | }
|
---|
865 | }
|
---|
866 |
|
---|
867 | /* wrapper, just in case memory mappings must be changed */
|
---|
868 | static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
|
---|
869 | {
|
---|
870 | #if HF_CPL_MASK == 3
|
---|
871 | s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
|
---|
872 | #else
|
---|
873 | #error HF_CPL_MASK is hardcoded
|
---|
874 | #endif
|
---|
875 | }
|
---|
876 |
|
---|
877 | /* used for debug or cpu save/restore */
|
---|
878 | void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
|
---|
879 | CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
|
---|
880 |
|
---|
881 | /* the following helpers are only usable in user mode simulation as
|
---|
882 | they can trigger unexpected exceptions */
|
---|
883 | void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
|
---|
884 | void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
|
---|
885 | void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
|
---|
886 |
|
---|
887 | /* you can call this signal handler from your SIGBUS and SIGSEGV
|
---|
888 | signal handlers to inform the virtual CPU of exceptions. non zero
|
---|
889 | is returned if the signal was handled by the virtual CPU. */
|
---|
890 | int cpu_x86_signal_handler(int host_signum, void *pinfo,
|
---|
891 | void *puc);
|
---|
892 | void cpu_x86_set_a20(CPUX86State *env, int a20_state);
|
---|
893 |
|
---|
894 | uint64_t cpu_get_tsc(CPUX86State *env);
|
---|
895 |
|
---|
896 | void cpu_set_apic_base(CPUX86State *env, uint64_t val);
|
---|
897 | uint64_t cpu_get_apic_base(CPUX86State *env);
|
---|
898 | void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
|
---|
899 | #ifndef NO_CPU_IO_DEFS
|
---|
900 | uint8_t cpu_get_apic_tpr(CPUX86State *env);
|
---|
901 | #endif
|
---|
902 | #ifdef VBOX
|
---|
903 | uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg);
|
---|
904 | void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value);
|
---|
905 | #endif
|
---|
906 | void cpu_smm_update(CPUX86State *env);
|
---|
907 |
|
---|
908 | /* will be suppressed */
|
---|
909 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
|
---|
910 |
|
---|
911 | /* used to debug */
|
---|
912 | #define X86_DUMP_FPU 0x0001 /* dump FPU state too */
|
---|
913 | #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
|
---|
914 |
|
---|
915 | #ifdef USE_KQEMU
|
---|
916 | static inline int cpu_get_time_fast(void)
|
---|
917 | {
|
---|
918 | int low, high;
|
---|
919 | asm volatile("rdtsc" : "=a" (low), "=d" (high));
|
---|
920 | return low;
|
---|
921 | }
|
---|
922 | #endif
|
---|
923 |
|
---|
924 | #ifdef VBOX
|
---|
925 | void cpu_trap_raw(CPUX86State *env1);
|
---|
926 |
|
---|
927 | /* in helper.c */
|
---|
928 | uint8_t read_byte(CPUX86State *env1, target_ulong addr);
|
---|
929 | uint16_t read_word(CPUX86State *env1, target_ulong addr);
|
---|
930 | void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val);
|
---|
931 | uint32_t read_dword(CPUX86State *env1, target_ulong addr);
|
---|
932 | void write_word(CPUX86State *env1, target_ulong addr, uint16_t val);
|
---|
933 | void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val);
|
---|
934 | /* in helper.c */
|
---|
935 | int emulate_single_instr(CPUX86State *env1);
|
---|
936 | int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr, uint32_t *esp_ptr, int dpl);
|
---|
937 |
|
---|
938 | void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr);
|
---|
939 | void save_raw_fp_state(CPUX86State *env, uint8_t *ptr);
|
---|
940 |
|
---|
941 | #endif
|
---|
942 |
|
---|
943 | #define TARGET_PAGE_BITS 12
|
---|
944 |
|
---|
945 | #define CPUState CPUX86State
|
---|
946 | #define cpu_init cpu_x86_init
|
---|
947 | #define cpu_exec cpu_x86_exec
|
---|
948 | #define cpu_gen_code cpu_x86_gen_code
|
---|
949 | #define cpu_signal_handler cpu_x86_signal_handler
|
---|
950 | #define cpu_list x86_cpu_list
|
---|
951 |
|
---|
952 | #define CPU_SAVE_VERSION 7
|
---|
953 |
|
---|
954 | /* MMU modes definitions */
|
---|
955 | #define MMU_MODE0_SUFFIX _kernel
|
---|
956 | #define MMU_MODE1_SUFFIX _user
|
---|
957 | #define MMU_USER_IDX 1
|
---|
958 | static inline int cpu_mmu_index (CPUState *env)
|
---|
959 | {
|
---|
960 | return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
|
---|
961 | }
|
---|
962 |
|
---|
963 | void optimize_flags_init(void);
|
---|
964 |
|
---|
965 | typedef struct CCTable {
|
---|
966 | int (*compute_all)(void); /* return all the flags */
|
---|
967 | int (*compute_c)(void); /* return the C flag */
|
---|
968 | } CCTable;
|
---|
969 |
|
---|
970 | extern CCTable cc_table[];
|
---|
971 |
|
---|
972 | #if defined(CONFIG_USER_ONLY)
|
---|
973 | static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
|
---|
974 | {
|
---|
975 | if (newsp)
|
---|
976 | env->regs[R_ESP] = newsp;
|
---|
977 | env->regs[R_EAX] = 0;
|
---|
978 | }
|
---|
979 | #endif
|
---|
980 |
|
---|
981 | #define CPU_PC_FROM_TB(env, tb) env->eip = tb->pc - tb->cs_base
|
---|
982 |
|
---|
983 | #include "cpu-all.h"
|
---|
984 |
|
---|
985 | #include "svm.h"
|
---|
986 |
|
---|
987 | #endif /* CPU_I386_H */
|
---|