VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm@ 99208

Last change on this file since 99208 was 98103, checked in by vboxsync, 23 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.7 KB
Line 
1 ; $Id: CPUMR0A.asm 98103 2023-01-17 14:15:46Z vboxsync $
2;; @file
3; CPUM - Ring-0 Assembly Routines (supporting HM and IEM).
4;
5
6;
7; Copyright (C) 2006-2023 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28
29;*******************************************************************************
30;* Header Files *
31;*******************************************************************************
32%define RT_ASM_WITH_SEH64
33%include "iprt/asmdefs.mac"
34%include "VBox/asmdefs.mac"
35%include "VBox/vmm/vm.mac"
36%include "VBox/err.mac"
37%include "VBox/vmm/stam.mac"
38%include "CPUMInternal.mac"
39%include "iprt/x86.mac"
40%include "VBox/vmm/cpum.mac"
41
42
43BEGINCODE
44
45;;
46; Makes sure the EMTs have a FPU state associated with them on hosts where we're
47; allowed to use it in ring-0 too.
48;
49; This ensure that we don't have to allocate the state lazily while trying to execute
50; guest code with preemption disabled or worse.
51;
52; @cproto VMMR0_INT_DECL(void) CPUMR0RegisterVCpuThread(PVMCPU pVCpu);
53;
54BEGINPROC CPUMR0RegisterVCpuThread
55 push xBP
56 SEH64_PUSH_xBP
57 mov xBP, xSP
58 SEH64_SET_FRAME_xBP 0
59SEH64_END_PROLOGUE
60
61%ifdef VMM_R0_TOUCH_FPU
62 movdqa xmm0, xmm0 ; hope this is harmless.
63%endif
64
65.return:
66 xor eax, eax ; paranoia
67 leave
68 ret
69ENDPROC CPUMR0RegisterVCpuThread
70
71
72%ifdef VMM_R0_TOUCH_FPU
73;;
74; Touches the host FPU state.
75;
76; @uses nothing (well, maybe cr0)
77;
78 %ifndef RT_ASM_WITH_SEH64 ; workaround for yasm 1.3.0 bug (error: prologue -1 bytes, must be <256)
79ALIGNCODE(16)
80 %endif
81BEGINPROC CPUMR0TouchHostFpu
82 push xBP
83 SEH64_PUSH_xBP
84 mov xBP, xSP
85 SEH64_SET_FRAME_xBP 0
86SEH64_END_PROLOGUE
87
88 movdqa xmm0, xmm0 ; Hope this is harmless.
89
90 leave
91 ret
92ENDPROC CPUMR0TouchHostFpu
93%endif ; VMM_R0_TOUCH_FPU
94
95
96;;
97; Saves the host FPU/SSE/AVX state and restores the guest FPU/SSE/AVX state.
98;
99; @returns VINF_SUCCESS (0) or VINF_CPUM_HOST_CR0_MODIFIED. (EAX)
100; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
101;
102; @remarks 64-bit Windows drivers shouldn't use AVX registers without saving+loading:
103; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
104; However the compiler docs have different idea:
105; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
106; We'll go with the former for now.
107;
108%ifndef RT_ASM_WITH_SEH64 ; workaround for yasm 1.3.0 bug (error: prologue -1 bytes, must be <256)
109ALIGNCODE(16)
110%endif
111BEGINPROC cpumR0SaveHostRestoreGuestFPUState
112 push xBP
113 SEH64_PUSH_xBP
114 mov xBP, xSP
115 SEH64_SET_FRAME_xBP 0
116SEH64_END_PROLOGUE
117
118 ;
119 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
120 ;
121%ifdef RT_ARCH_AMD64
122 %ifdef RT_OS_WINDOWS
123 mov r11, rcx
124 %else
125 mov r11, rdi
126 %endif
127 %define pCpumCpu r11
128 %define pXState r10
129%else
130 push ebx
131 push esi
132 mov ebx, dword [ebp + 8]
133 %define pCpumCpu ebx
134 %define pXState esi
135%endif
136
137 pushf ; The darwin kernel can get upset or upset things if an
138 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
139
140 ;
141 ; Save the host state.
142 ;
143 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_HOST
144 jnz .already_saved_host
145
146 CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC xCX, xAX, pCpumCpu ; xCX is the return value for VT-x; xAX is scratch.
147
148 CPUMR0_SAVE_HOST
149
150%ifdef VBOX_WITH_KERNEL_USING_XMM
151 jmp .load_guest
152%endif
153.already_saved_host:
154%ifdef VBOX_WITH_KERNEL_USING_XMM
155 ; If we didn't save the host state, we must save the non-volatile XMM registers.
156 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
157 stmxcsr [pXState + X86FXSTATE.MXCSR]
158 movdqa [pXState + X86FXSTATE.xmm6 ], xmm6
159 movdqa [pXState + X86FXSTATE.xmm7 ], xmm7
160 movdqa [pXState + X86FXSTATE.xmm8 ], xmm8
161 movdqa [pXState + X86FXSTATE.xmm9 ], xmm9
162 movdqa [pXState + X86FXSTATE.xmm10], xmm10
163 movdqa [pXState + X86FXSTATE.xmm11], xmm11
164 movdqa [pXState + X86FXSTATE.xmm12], xmm12
165 movdqa [pXState + X86FXSTATE.xmm13], xmm13
166 movdqa [pXState + X86FXSTATE.xmm14], xmm14
167 movdqa [pXState + X86FXSTATE.xmm15], xmm15
168
169 ;
170 ; Load the guest state.
171 ;
172.load_guest:
173%endif
174 CPUMR0_LOAD_GUEST
175
176%ifdef VBOX_WITH_KERNEL_USING_XMM
177 ; Restore the non-volatile xmm registers. ASSUMING 64-bit host.
178 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
179 movdqa xmm6, [pXState + X86FXSTATE.xmm6]
180 movdqa xmm7, [pXState + X86FXSTATE.xmm7]
181 movdqa xmm8, [pXState + X86FXSTATE.xmm8]
182 movdqa xmm9, [pXState + X86FXSTATE.xmm9]
183 movdqa xmm10, [pXState + X86FXSTATE.xmm10]
184 movdqa xmm11, [pXState + X86FXSTATE.xmm11]
185 movdqa xmm12, [pXState + X86FXSTATE.xmm12]
186 movdqa xmm13, [pXState + X86FXSTATE.xmm13]
187 movdqa xmm14, [pXState + X86FXSTATE.xmm14]
188 movdqa xmm15, [pXState + X86FXSTATE.xmm15]
189 ldmxcsr [pXState + X86FXSTATE.MXCSR]
190%endif
191
192 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM | CPUM_USED_FPU_HOST)
193 mov byte [pCpumCpu + CPUMCPU.Guest.fUsedFpuGuest], 1
194 popf
195
196 mov eax, ecx
197.return:
198%ifdef RT_ARCH_X86
199 pop esi
200 pop ebx
201%endif
202 leave
203 ret
204ENDPROC cpumR0SaveHostRestoreGuestFPUState
205
206
207;;
208; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state.
209;
210; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
211;
212; @remarks 64-bit Windows drivers shouldn't use AVX registers without saving+loading:
213; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
214; However the compiler docs have different idea:
215; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
216; We'll go with the former for now.
217;
218%ifndef RT_ASM_WITH_SEH64 ; workaround for yasm 1.3.0 bug (error: prologue -1 bytes, must be <256)
219ALIGNCODE(16)
220%endif
221BEGINPROC cpumR0SaveGuestRestoreHostFPUState
222 push xBP
223 SEH64_PUSH_xBP
224 mov xBP, xSP
225 SEH64_SET_FRAME_xBP 0
226SEH64_END_PROLOGUE
227
228 ;
229 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
230 ;
231%ifdef RT_ARCH_AMD64
232 %ifdef RT_OS_WINDOWS
233 mov r11, rcx
234 %else
235 mov r11, rdi
236 %endif
237 %define pCpumCpu r11
238 %define pXState r10
239%else
240 push ebx
241 push esi
242 mov ebx, dword [ebp + 8]
243 %define pCpumCpu ebx
244 %define pXState esi
245%endif
246 pushf ; The darwin kernel can get upset or upset things if an
247 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
248
249 %ifdef VBOX_WITH_KERNEL_USING_XMM
250 ;
251 ; Copy non-volatile XMM registers to the host state so we can use
252 ; them while saving the guest state (we've gotta do this anyway).
253 ;
254 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
255 stmxcsr [pXState + X86FXSTATE.MXCSR]
256 movdqa [pXState + X86FXSTATE.xmm6], xmm6
257 movdqa [pXState + X86FXSTATE.xmm7], xmm7
258 movdqa [pXState + X86FXSTATE.xmm8], xmm8
259 movdqa [pXState + X86FXSTATE.xmm9], xmm9
260 movdqa [pXState + X86FXSTATE.xmm10], xmm10
261 movdqa [pXState + X86FXSTATE.xmm11], xmm11
262 movdqa [pXState + X86FXSTATE.xmm12], xmm12
263 movdqa [pXState + X86FXSTATE.xmm13], xmm13
264 movdqa [pXState + X86FXSTATE.xmm14], xmm14
265 movdqa [pXState + X86FXSTATE.xmm15], xmm15
266 %endif
267
268 ;
269 ; Save the guest state if necessary.
270 ;
271 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU_GUEST
272 jz .load_only_host
273
274 %ifdef VBOX_WITH_KERNEL_USING_XMM
275 ; Load the guest XMM register values we already saved in HMR0VMXStartVMWrapXMM.
276 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
277 movdqa xmm0, [pXState + X86FXSTATE.xmm0]
278 movdqa xmm1, [pXState + X86FXSTATE.xmm1]
279 movdqa xmm2, [pXState + X86FXSTATE.xmm2]
280 movdqa xmm3, [pXState + X86FXSTATE.xmm3]
281 movdqa xmm4, [pXState + X86FXSTATE.xmm4]
282 movdqa xmm5, [pXState + X86FXSTATE.xmm5]
283 movdqa xmm6, [pXState + X86FXSTATE.xmm6]
284 movdqa xmm7, [pXState + X86FXSTATE.xmm7]
285 movdqa xmm8, [pXState + X86FXSTATE.xmm8]
286 movdqa xmm9, [pXState + X86FXSTATE.xmm9]
287 movdqa xmm10, [pXState + X86FXSTATE.xmm10]
288 movdqa xmm11, [pXState + X86FXSTATE.xmm11]
289 movdqa xmm12, [pXState + X86FXSTATE.xmm12]
290 movdqa xmm13, [pXState + X86FXSTATE.xmm13]
291 movdqa xmm14, [pXState + X86FXSTATE.xmm14]
292 movdqa xmm15, [pXState + X86FXSTATE.xmm15]
293 ldmxcsr [pXState + X86FXSTATE.MXCSR]
294 %endif
295 CPUMR0_SAVE_GUEST
296
297 ;
298 ; Load the host state.
299 ;
300.load_only_host:
301 CPUMR0_LOAD_HOST
302
303 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or
304 ; in cpumRZSaveHostFPUState.
305 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu]
306 CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET xCX
307 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~(CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)
308 mov byte [pCpumCpu + CPUMCPU.Guest.fUsedFpuGuest], 0
309
310 popf
311%ifdef RT_ARCH_X86
312 pop esi
313 pop ebx
314%endif
315 leave
316 ret
317%undef pCpumCpu
318%undef pXState
319ENDPROC cpumR0SaveGuestRestoreHostFPUState
320
321
322%if ARCH_BITS == 32
323 %ifdef VBOX_WITH_64_BITS_GUESTS
324;;
325; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host.
326;
327; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer
328;
329 %ifndef RT_ASM_WITH_SEH64 ; workaround for yasm 1.3.0 bug (error: prologue -1 bytes, must be <256)
330ALIGNCODE(16)
331 %endif
332BEGINPROC cpumR0RestoreHostFPUState
333 ;
334 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.
335 ;
336 push ebp
337 mov ebp, esp
338 push ebx
339 push esi
340 mov ebx, dword [ebp + 8]
341 %define pCpumCpu ebx
342 %define pXState esi
343
344 ;
345 ; Restore host CPU state.
346 ;
347 pushf ; The darwin kernel can get upset or upset things if an
348 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.
349
350 CPUMR0_LOAD_HOST
351
352 ; Restore the CR0 value we saved in cpumR0SaveHostRestoreGuestFPUState or
353 ; in cpumRZSaveHostFPUState.
354 ;; @todo What about XCR0?
355 mov xCX, [pCpumCpu + CPUMCPU.Host.cr0Fpu]
356 CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET xCX
357
358 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_HOST
359 popf
360
361 pop esi
362 pop ebx
363 leave
364 ret
365 %undef pCpumCPu
366 %undef pXState
367ENDPROC cpumR0RestoreHostFPUState
368 %endif ; VBOX_WITH_64_BITS_GUESTS
369%endif ; ARCH_BITS == 32
370
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette