VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 20961

Last change on this file since 20961 was 20540, checked in by vboxsync, 15 years ago

CPUMR0A.asm: More pushf;cli ... popf paranoia.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.6 KB
Line 
1; $Id: CPUMR0A.asm 20540 2009-06-13 21:23:51Z vboxsync $
2;; @file
3; CPUM - Guest Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/vm.mac"
27%include "VBox/err.mac"
28%include "VBox/stam.mac"
29%include "CPUMInternal.mac"
30%include "VBox/x86.mac"
31%include "VBox/cpum.mac"
32
33%ifdef IN_RING3
34 %error "The jump table doesn't link on leopard."
35%endif
36
37
38;*******************************************************************************
39;* External Symbols *
40;*******************************************************************************
41%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
42extern NAME(SUPR0AbsIs64bit)
43extern NAME(SUPR0Abs64bitKernelCS)
44extern NAME(SUPR0Abs64bitKernelSS)
45extern NAME(SUPR0Abs64bitKernelDS)
46extern NAME(SUPR0AbsKernelCS)
47%endif
48
49
50;*******************************************************************************
51;* Global Variables *
52;*******************************************************************************
53%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
54BEGINDATA
55;;
56; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
57; needing to clobber a register. (This trick doesn't quite work for PE btw.
58; but that's not relevant atm.)
59GLOBALNAME g_fCPUMIs64bitHost
60 dd NAME(SUPR0AbsIs64bit)
61%endif
62
63
64BEGINCODE
65
66
67;;
68; Saves the host FPU/XMM state and restores the guest state.
69;
70; @returns 0
71; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
72;
73align 16
74BEGINPROC cpumR0SaveHostRestoreGuestFPUState
75%ifdef RT_ARCH_AMD64
76 %ifdef RT_OS_WINDOWS
77 mov xDX, rcx
78 %else
79 mov xDX, rdi
80 %endif
81%else
82 mov xDX, dword [esp + 4]
83%endif
84 pushf ; The darwin kernel can get upset or upset things if an
85 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
86
87 ; Switch the state.
88 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
89
90 mov xAX, cr0 ; Make sure its safe to access the FPU state.
91 mov xCX, xAX ; save old CR0
92 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
93 mov cr0, xAX ;; @todo optimize this.
94
95%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
97 jz .legacy_mode
98 db 0xea ; jmp far .sixtyfourbit_mode
99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
100.legacy_mode:
101%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
102
103 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
104 fxrstor [xDX + CPUMCPU.Guest.fpu]
105
106.done:
107 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
108 popf
109 xor eax, eax
110 ret
111
112%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
113ALIGNCODE(16)
114BITS 64
115.sixtyfourbit_mode:
116 and edx, 0ffffffffh
117 fxsave [rdx + CPUMCPU.Host.fpu]
118 fxrstor [rdx + CPUMCPU.Guest.fpu]
119 jmp far [.fpret wrt rip]
120.fpret: ; 16:32 Pointer to .the_end.
121 dd .done, NAME(SUPR0AbsKernelCS)
122BITS 32
123%endif
124ENDPROC cpumR0SaveHostRestoreGuestFPUState
125
126
127%ifndef RT_ARCH_AMD64
128%ifdef VBOX_WITH_64_BITS_GUESTS
129%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
130;;
131; Saves the host FPU/XMM state
132;
133; @returns 0
134; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
135;
136align 16
137BEGINPROC cpumR0SaveHostFPUState
138 mov xDX, dword [esp + 4]
139 pushf ; The darwin kernel can get upset or upset things if an
140 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
141
142 ; Switch the state.
143 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
144
145 mov xAX, cr0 ; Make sure its safe to access the FPU state.
146 mov xCX, xAX ; save old CR0
147 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
148 mov cr0, xAX ;; @todo optimize this.
149
150 fxsave [xDX + CPUMCPU.Host.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
151
152 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
153 popf
154 xor eax, eax
155 ret
156ENDPROC cpumR0SaveHostFPUState
157%endif
158%endif
159%endif
160
161
162;;
163; Saves the guest FPU/XMM state and restores the host state.
164;
165; @returns 0
166; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
167;
168align 16
169BEGINPROC cpumR0SaveGuestRestoreHostFPUState
170%ifdef RT_ARCH_AMD64
171 %ifdef RT_OS_WINDOWS
172 mov xDX, rcx
173 %else
174 mov xDX, rdi
175 %endif
176%else
177 mov xDX, dword [esp + 4]
178%endif
179
180 ; Only restore FPU if guest has used it.
181 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
182 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
183 jz short .fpu_not_used
184
185 pushf ; The darwin kernel can get upset or upset things if an
186 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
187
188 mov xAX, cr0 ; Make sure it's safe to access the FPU state.
189 mov xCX, xAX ; save old CR0
190 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
191 mov cr0, xAX ;; @todo optimize this.
192
193%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
194 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
195 jz .legacy_mode
196 db 0xea ; jmp far .sixtyfourbit_mode
197 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
198.legacy_mode:
199%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
200
201 fxsave [xDX + CPUMCPU.Guest.fpu] ; ASSUMES that all VT-x/AMD-V boxes sports fxsave/fxrstor (safe assumption)
202 fxrstor [xDX + CPUMCPU.Host.fpu]
203
204.done:
205 mov cr0, xCX ; and restore old CR0 again ;; @todo optimize this.
206 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
207 popf
208.fpu_not_used:
209 xor eax, eax
210 ret
211
212%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
213ALIGNCODE(16)
214BITS 64
215.sixtyfourbit_mode:
216 and edx, 0ffffffffh
217 fxsave [rdx + CPUMCPU.Guest.fpu]
218 fxrstor [rdx + CPUMCPU.Host.fpu]
219 jmp far [.fpret wrt rip]
220.fpret: ; 16:32 Pointer to .the_end.
221 dd .done, NAME(SUPR0AbsKernelCS)
222BITS 32
223%endif
224ENDPROC cpumR0SaveGuestRestoreHostFPUState
225
226
227;;
228; Sets the host's FPU/XMM state
229;
230; @returns 0
231; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
232;
233align 16
234BEGINPROC cpumR0RestoreHostFPUState
235%ifdef RT_ARCH_AMD64
236 %ifdef RT_OS_WINDOWS
237 mov xDX, rcx
238 %else
239 mov xDX, rdi
240 %endif
241%else
242 mov xDX, dword [esp + 4]
243%endif
244
245 ; Restore FPU if guest has used it.
246 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
247 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
248 jz short .fpu_not_used
249
250 pushf ; The darwin kernel can get upset or upset things if an
251 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
252
253 mov xAX, cr0
254 mov xCX, xAX ; save old CR0
255 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
256 mov cr0, xAX
257
258%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
259 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
260 jz .legacy_mode
261 db 0xea ; jmp far .sixtyfourbit_mode
262 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
263.legacy_mode:
264%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
265
266 fxrstor [xDX + CPUMCPU.Host.fpu]
267
268.done:
269 mov cr0, xCX ; and restore old CR0 again
270 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU
271 popf
272.fpu_not_used:
273 xor eax, eax
274 ret
275
276%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
277ALIGNCODE(16)
278BITS 64
279.sixtyfourbit_mode:
280 and edx, 0ffffffffh
281 fxrstor [rdx + CPUMCPU.Host.fpu]
282 jmp far [.fpret wrt rip]
283.fpret: ; 16:32 Pointer to .the_end.
284 dd .done, NAME(SUPR0AbsKernelCS)
285BITS 32
286%endif
287ENDPROC cpumR0RestoreHostFPUState
288
289
290%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
291;;
292; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);
293;
294ALIGNCODE(16)
295BEGINPROC cpumR0SaveDRx
296%ifdef RT_ARCH_AMD64
297 %ifdef ASM_CALL64_GCC
298 mov xCX, rdi
299 %endif
300%else
301 mov xCX, dword [esp + 4]
302%endif
303 pushf ; Just to be on the safe side.
304 cli
305%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
306 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
307 jz .legacy_mode
308 db 0xea ; jmp far .sixtyfourbit_mode
309 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
310.legacy_mode:
311%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
312
313 ;
314 ; Do the job.
315 ;
316 mov xAX, dr0
317 mov xDX, dr1
318 mov [xCX], xAX
319 mov [xCX + 8 * 1], xDX
320 mov xAX, dr2
321 mov xDX, dr3
322 mov [xCX + 8 * 2], xAX
323 mov [xCX + 8 * 3], xDX
324
325.done:
326 popf
327 ret
328
329%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
330ALIGNCODE(16)
331BITS 64
332.sixtyfourbit_mode:
333 and ecx, 0ffffffffh
334
335 mov rax, dr0
336 mov rdx, dr1
337 mov r8, dr2
338 mov r9, dr3
339 mov [rcx], rax
340 mov [rcx + 8 * 1], rdx
341 mov [rcx + 8 * 2], r8
342 mov [rcx + 8 * 3], r9
343 jmp far [.fpret wrt rip]
344.fpret: ; 16:32 Pointer to .the_end.
345 dd .done, NAME(SUPR0AbsKernelCS)
346BITS 32
347%endif
348ENDPROC cpumR0SaveDRx
349
350
351;;
352; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);
353;
354ALIGNCODE(16)
355BEGINPROC cpumR0LoadDRx
356%ifdef RT_ARCH_AMD64
357 %ifdef ASM_CALL64_GCC
358 mov xCX, rdi
359 %endif
360%else
361 mov xCX, dword [esp + 4]
362%endif
363 pushf ; Just to be on the safe side.
364 cli
365%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
366 cmp byte [NAME(g_fCPUMIs64bitHost)], 0
367 jz .legacy_mode
368 db 0xea ; jmp far .sixtyfourbit_mode
369 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
370.legacy_mode:
371%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
372
373 ;
374 ; Do the job.
375 ;
376 mov xAX, [xCX]
377 mov xDX, [xCX + 8 * 1]
378 mov dr0, xAX
379 mov dr1, xDX
380 mov xAX, [xCX + 8 * 2]
381 mov xDX, [xCX + 8 * 3]
382 mov dr2, xAX
383 mov dr3, xDX
384
385.done:
386 popf
387 ret
388
389%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
390ALIGNCODE(16)
391BITS 64
392.sixtyfourbit_mode:
393 and ecx, 0ffffffffh
394
395 mov rax, [rcx]
396 mov rdx, [rcx + 8 * 1]
397 mov r8, [rcx + 8 * 2]
398 mov r9, [rcx + 8 * 3]
399 mov dr0, rax
400 mov dr1, rdx
401 mov dr2, r8
402 mov dr3, r9
403 jmp far [.fpret wrt rip]
404.fpret: ; 16:32 Pointer to .the_end.
405 dd .done, NAME(SUPR0AbsKernelCS)
406BITS 32
407%endif
408ENDPROC cpumR0LoadDRx
409
410%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
411
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette