VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 97287

Last change on this file since 97287 was 97287, checked in by vboxsync, 2 years ago

VMM/cpum: Moved fExtrn up into the same cache line as rip, eflags, and crX.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1; $Id: CPUMInternal.mac 97287 2022-10-24 23:20:42Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2022 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28%include "VBox/asmdefs.mac"
29%include "VBox/vmm/cpum.mac"
30
31;; Check sanity.
32%ifdef VBOX_WITH_KERNEL_USING_XMM
33 %ifndef IN_RING0
34 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
35 %endif
36%endif
37
38;; For numeric expressions
39%ifdef RT_ARCH_AMD64
40 %define CPUM_IS_AMD64 1
41%else
42 %define CPUM_IS_AMD64 0
43%endif
44
45
46;;
47; CPU info
48struc CPUMINFO
49 .cMsrRanges resd 1 ; uint32_t
50 .fMsrMask resd 1 ; uint32_t
51 .fMxCsrMask resd 1 ; uint32_t
52 .cCpuIdLeaves resd 1 ; uint32_t
53 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
54 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
55 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
56 .uScalableBusFreq resq 1 ; uint64_t
57 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
58 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
59 .aCpuIdLeaves resb 256*32
60 .aMsrRanges resb 8192*128
61endstruc
62
63
64%define CPUM_USED_FPU_HOST RT_BIT(0)
65%define CPUM_USED_FPU_GUEST RT_BIT(10)
66%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
67%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
68%define CPUM_USE_SYSENTER RT_BIT(3)
69%define CPUM_USE_SYSCALL RT_BIT(4)
70%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
71%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
72%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
73%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
74%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
75%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
76%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
77
78
79struc CPUM
80 ;...
81 .fHostUseFlags resd 1
82
83 ; CR4 masks
84 .CR4.AndMask resd 1
85 .CR4.OrMask resd 1
86 .u8PortableCpuIdLevel resb 1
87 .fPendingRestore resb 1
88
89 alignb 8
90 .fXStateGuestMask resq 1
91 .fXStateHostMask resq 1
92
93 alignb 64
94 .HostFeatures resb 48
95 .GuestFeatures resb 48
96 .GuestInfo resb CPUMINFO_size
97
98 ; Patch manager saved state compatability CPUID leaf arrays
99 .aGuestCpuIdPatmStd resb 16*6
100 .aGuestCpuIdPatmExt resb 16*10
101 .aGuestCpuIdPatmCentaur resb 16*4
102
103 alignb 8
104 .cMsrWrites resq 1
105 .cMsrWritesToIgnoredBits resq 1
106 .cMsrWritesRaiseGp resq 1
107 .cMsrWritesUnknown resq 1
108 .cMsrReads resq 1
109 .cMsrReadsRaiseGp resq 1
110 .cMsrReadsUnknown resq 1
111endstruc
112
113struc CPUMCPU
114 ;
115 ; Guest context state
116 ;
117 .Guest resq 0
118 .Guest.eax resq 1
119 .Guest.ecx resq 1
120 .Guest.edx resq 1
121 .Guest.ebx resq 1
122 .Guest.esp resq 1
123 .Guest.ebp resq 1
124 .Guest.esi resq 1
125 .Guest.edi resq 1
126 .Guest.r8 resq 1
127 .Guest.r9 resq 1
128 .Guest.r10 resq 1
129 .Guest.r11 resq 1
130 .Guest.r12 resq 1
131 .Guest.r13 resq 1
132 .Guest.r14 resq 1
133 .Guest.r15 resq 1
134 .Guest.es.Sel resw 1
135 .Guest.es.PaddingSel resw 1
136 .Guest.es.ValidSel resw 1
137 .Guest.es.fFlags resw 1
138 .Guest.es.u64Base resq 1
139 .Guest.es.u32Limit resd 1
140 .Guest.es.Attr resd 1
141 .Guest.cs.Sel resw 1
142 .Guest.cs.PaddingSel resw 1
143 .Guest.cs.ValidSel resw 1
144 .Guest.cs.fFlags resw 1
145 .Guest.cs.u64Base resq 1
146 .Guest.cs.u32Limit resd 1
147 .Guest.cs.Attr resd 1
148 .Guest.ss.Sel resw 1
149 .Guest.ss.PaddingSel resw 1
150 .Guest.ss.ValidSel resw 1
151 .Guest.ss.fFlags resw 1
152 .Guest.ss.u64Base resq 1
153 .Guest.ss.u32Limit resd 1
154 .Guest.ss.Attr resd 1
155 .Guest.ds.Sel resw 1
156 .Guest.ds.PaddingSel resw 1
157 .Guest.ds.ValidSel resw 1
158 .Guest.ds.fFlags resw 1
159 .Guest.ds.u64Base resq 1
160 .Guest.ds.u32Limit resd 1
161 .Guest.ds.Attr resd 1
162 .Guest.fs.Sel resw 1
163 .Guest.fs.PaddingSel resw 1
164 .Guest.fs.ValidSel resw 1
165 .Guest.fs.fFlags resw 1
166 .Guest.fs.u64Base resq 1
167 .Guest.fs.u32Limit resd 1
168 .Guest.fs.Attr resd 1
169 .Guest.gs.Sel resw 1
170 .Guest.gs.PaddingSel resw 1
171 .Guest.gs.ValidSel resw 1
172 .Guest.gs.fFlags resw 1
173 .Guest.gs.u64Base resq 1
174 .Guest.gs.u32Limit resd 1
175 .Guest.gs.Attr resd 1
176 .Guest.ldtr.Sel resw 1
177 .Guest.ldtr.PaddingSel resw 1
178 .Guest.ldtr.ValidSel resw 1
179 .Guest.ldtr.fFlags resw 1
180 .Guest.ldtr.u64Base resq 1
181 .Guest.ldtr.u32Limit resd 1
182 .Guest.ldtr.Attr resd 1
183 .Guest.tr.Sel resw 1
184 .Guest.tr.PaddingSel resw 1
185 .Guest.tr.ValidSel resw 1
186 .Guest.tr.fFlags resw 1
187 .Guest.tr.u64Base resq 1
188 .Guest.tr.u32Limit resd 1
189 .Guest.tr.Attr resd 1
190 alignb 8
191 .Guest.eip resq 1
192 .Guest.eflags resq 1
193 .Guest.fExtrn resq 1
194 .Guest.uRipInhibitInt resq 1
195 .Guest.cr0 resq 1
196 .Guest.cr2 resq 1
197 .Guest.cr3 resq 1
198 .Guest.cr4 resq 1
199 .Guest.dr resq 8
200 .Guest.gdtrPadding resw 3
201 .Guest.gdtr resw 0
202 .Guest.gdtr.cbGdt resw 1
203 .Guest.gdtr.pGdt resq 1
204 .Guest.idtrPadding resw 3
205 .Guest.idtr resw 0
206 .Guest.idtr.cbIdt resw 1
207 .Guest.idtr.pIdt resq 1
208 .Guest.SysEnter.cs resb 8
209 .Guest.SysEnter.eip resb 8
210 .Guest.SysEnter.esp resb 8
211 .Guest.msrEFER resb 8
212 .Guest.msrSTAR resb 8
213 .Guest.msrPAT resb 8
214 .Guest.msrLSTAR resb 8
215 .Guest.msrCSTAR resb 8
216 .Guest.msrSFMASK resb 8
217 .Guest.msrKERNELGSBASE resb 8
218
219 alignb 32
220 .Guest.aPaePdpes resq 4
221
222 alignb 8
223 .Guest.aXcr resq 2
224 .Guest.fXStateMask resq 1
225 .Guest.fUsedFpuGuest resb 1
226 alignb 8
227 .Guest.aoffXState resw 64
228 alignb 256
229 .Guest.abXState resb 0x4000-0x300
230 .Guest.XState EQU .Guest.abXState
231
232;;
233 alignb 4096
234 .Guest.hwvirt resb 0
235 .Guest.hwvirt.svm resb 0
236 .Guest.hwvirt.vmx resb 0
237
238 .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
239 .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
240 .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
241 .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
242 .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
243 alignb 8
244 .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
245 .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
246 .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
247 .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
248 .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
249
250 .Guest.hwvirt.vmx.Vmcs resb 0x1000
251 .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
252 .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
253 .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
254 .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
255 .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
256 .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
257 .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
258 .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
259 alignb 8
260 .Guest.hwvirt.vmx.GCPhysVmxon resq 1
261 .Guest.hwvirt.vmx.GCPhysVmcs resq 1
262 .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
263 .Guest.hwvirt.vmx.enmDiag resd 1
264 .Guest.hwvirt.vmx.enmAbort resd 1
265 .Guest.hwvirt.vmx.uDiagAux resq 1
266 .Guest.hwvirt.vmx.uAbortAux resd 1
267 .Guest.hwvirt.vmx.fInVmxRootMode resb 1
268 .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
269 .Guest.hwvirt.vmx.fInterceptEvents resb 1
270 .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
271 .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
272 .Guest.hwvirt.vmx.uPrevPauseTick resq 1
273 .Guest.hwvirt.vmx.uEntryTick resq 1
274 .Guest.hwvirt.vmx.offVirtApicWrite resw 1
275 .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
276 alignb 8
277 .Guest.hwvirt.vmx.Msrs resb 224
278
279 alignb 8
280 .Guest.hwvirt.enmHwvirt resd 1
281 .Guest.hwvirt.fGif resb 1
282 alignb 4
283 .Guest.hwvirt.fSavedInhibit resd 1
284 alignb 64
285
286 .GuestMsrs resq 0
287 .GuestMsrs.au64 resq 64
288
289 ;
290 ; Other stuff.
291 ;
292 .hNestedVmxPreemptTimer resq 1
293
294 .fUseFlags resd 1
295 .fChanged resd 1
296 .u32RetCode resd 1
297 .fCpuIdApicFeatureVisible resb 1
298
299 ;
300 ; Host context state
301 ;
302 alignb 64
303 .Host resb 0
304 .Host.abXState resb 0x4000-0x300
305 .Host.XState EQU .Host.abXState
306 ;.Host.rax resq 1 - scratch
307 .Host.rbx resq 1
308 ;.Host.rcx resq 1 - scratch
309 ;.Host.rdx resq 1 - scratch
310 .Host.rdi resq 1
311 .Host.rsi resq 1
312 .Host.rbp resq 1
313 .Host.rsp resq 1
314 ;.Host.r8 resq 1 - scratch
315 ;.Host.r9 resq 1 - scratch
316 .Host.r10 resq 1
317 .Host.r11 resq 1
318 .Host.r12 resq 1
319 .Host.r13 resq 1
320 .Host.r14 resq 1
321 .Host.r15 resq 1
322 ;.Host.rip resd 1 - scratch
323 .Host.rflags resq 1
324 .Host.ss resw 1
325 .Host.ssPadding resw 1
326 .Host.gs resw 1
327 .Host.gsPadding resw 1
328 .Host.fs resw 1
329 .Host.fsPadding resw 1
330 .Host.es resw 1
331 .Host.esPadding resw 1
332 .Host.ds resw 1
333 .Host.dsPadding resw 1
334 .Host.cs resw 1
335 .Host.csPadding resw 1
336
337 .Host.cr0Fpu:
338 .Host.cr0 resq 1
339 ;.Host.cr2 resq 1 - scratch
340 .Host.cr3 resq 1
341 .Host.cr4 resq 1
342 .Host.cr8 resq 1
343
344 .Host.dr0 resq 1
345 .Host.dr1 resq 1
346 .Host.dr2 resq 1
347 .Host.dr3 resq 1
348 .Host.dr6 resq 1
349 .Host.dr7 resq 1
350
351 .Host.gdtr resb 10 ; GDT limit + linear address
352 .Host.gdtrPadding resw 1
353 .Host.idtr resb 10 ; IDT limit + linear address
354 .Host.idtrPadding resw 1
355 .Host.ldtr resw 1
356 .Host.ldtrPadding resw 1
357 .Host.tr resw 1
358 .Host.trPadding resw 1
359
360 .Host.SysEnter.cs resq 1
361 .Host.SysEnter.eip resq 1
362 .Host.SysEnter.esp resq 1
363 .Host.FSbase resq 1
364 .Host.GSbase resq 1
365 .Host.efer resq 1
366 alignb 8
367 .Host.xcr0 resq 1
368 .Host.fXStateMask resq 1
369
370 ;
371 ; Hypervisor Context.
372 ;
373 alignb 64
374 .Hyper resq 0
375 .Hyper.dr resq 8
376 .Hyper.cr3 resq 1
377 alignb 64
378
379%ifdef VBOX_WITH_CRASHDUMP_MAGIC
380 .aMagic resb 56
381 .uMagic resq 1
382%endif
383endstruc
384
385
386
387%if 0 ; Currently not used anywhere.
388;;
389; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
390;
391; Cleans the FPU state, if necessary, before restoring the FPU.
392;
393; This macro ASSUMES CR0.TS is not set!
394;
395; @param xDX Pointer to CPUMCPU.
396; @uses xAX, EFLAGS
397;
398; Changes here should also be reflected in CPUMRCA.asm's copy!
399;
400%macro CLEANFPU 0
401 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
402 jz .nothing_to_clean
403
404 xor eax, eax
405 fnstsw ax ; FSW -> AX.
406 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
407 ; while clearing & loading the FPU bits in 'clean_fpu' below.
408 jz .clean_fpu
409 fnclex
410
411.clean_fpu:
412 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
413 ; for the upcoming push (load)
414 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
415.nothing_to_clean:
416%endmacro
417%endif ; Unused.
418
419
420;;
421; Makes sure we don't trap (#NM) accessing the FPU.
422;
423; In ring-0 this is a bit of work since we may have try convince the host kernel
424; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
425; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
426;
427; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
428; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
429; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
430;
431; In raw-mode we will always have to clear TS and it will be recalculated
432; elsewhere and thus needs no saving.
433;
434; @param %1 Register to return the return status code in.
435; @param %2 Temporary scratch register.
436; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
437; of the EMT we're on.
438; @uses EFLAGS, CR0, %1, %2
439;
440%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
441 ;
442 ; ring-0 - slightly complicated (than old raw-mode).
443 ;
444 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
445 mov [%3 + CPUMCPU.Host.cr0Fpu], %1
446
447 mov %2, cr0
448 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
449 jz %%no_cr0_change
450
451 %ifdef VMM_R0_TOUCH_FPU
452 ; Touch the state and check that the kernel updated CR0 for us.
453 movdqa xmm0, xmm0
454 mov %2, cr0
455 test %2, X86_CR0_TS | X86_CR0_EM
456 jz %%cr0_changed
457 %endif
458
459 ; Save CR0 and clear them flags ourselves.
460 mov [%3 + CPUMCPU.Host.cr0Fpu], %2
461 and %2, ~(X86_CR0_TS | X86_CR0_EM)
462 mov cr0, %2
463
464%%cr0_changed:
465 mov %1, VINF_CPUM_HOST_CR0_MODIFIED
466%%no_cr0_change:
467%endmacro
468
469
470;;
471; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
472;
473; @param %1 The original state to restore (or zero).
474;
475%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
476 test %1, X86_CR0_TS | X86_CR0_EM
477 jz %%skip_cr0_restore
478 mov cr0, %1
479%%skip_cr0_restore:
480%endmacro
481
482
483;;
484; Saves the host state.
485;
486; @uses rax, rdx
487; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
488; @param pXState Define for the register containing the extended state pointer.
489;
490%macro CPUMR0_SAVE_HOST 0
491 ;
492 ; Load a couple of registers we'll use later in all branches.
493 ;
494 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
495 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
496
497 ;
498 ; XSAVE or FXSAVE?
499 ;
500 or eax, eax
501 jz %%host_fxsave
502
503 ; XSAVE
504 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
505 %ifdef RT_ARCH_AMD64
506 o64 xsave [pXState]
507 %else
508 xsave [pXState]
509 %endif
510 jmp %%host_done
511
512 ; FXSAVE
513%%host_fxsave:
514 %ifdef RT_ARCH_AMD64
515 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
516 %else
517 fxsave [pXState]
518 %endif
519
520%%host_done:
521%endmacro ; CPUMR0_SAVE_HOST
522
523
524;;
525; Loads the host state.
526;
527; @uses rax, rdx
528; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
529; @param pXState Define for the register containing the extended state pointer.
530;
531%macro CPUMR0_LOAD_HOST 0
532 ;
533 ; Load a couple of registers we'll use later in all branches.
534 ;
535 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
536 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
537
538 ;
539 ; XRSTOR or FXRSTOR?
540 ;
541 or eax, eax
542 jz %%host_fxrstor
543
544 ; XRSTOR
545 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
546 %ifdef RT_ARCH_AMD64
547 o64 xrstor [pXState]
548 %else
549 xrstor [pXState]
550 %endif
551 jmp %%host_done
552
553 ; FXRSTOR
554%%host_fxrstor:
555 %ifdef RT_ARCH_AMD64
556 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
557 %else
558 fxrstor [pXState]
559 %endif
560
561%%host_done:
562%endmacro ; CPUMR0_LOAD_HOST
563
564
565
566;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
567; save the 32-bit FPU state or 64-bit FPU state.
568;
569; @param %1 Pointer to CPUMCPU.
570; @param %2 Pointer to XState.
571; @param %3 Force AMD64
572; @param %4 The instruction to use (xsave or fxsave)
573; @uses xAX, xDX, EFLAGS, 20h of stack.
574;
575%macro SAVE_32_OR_64_FPU 4
576%if CPUM_IS_AMD64 || %3
577 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
578 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
579 jnz short %%save_long_mode_guest
580%endif
581 %4 [pXState]
582%if CPUM_IS_AMD64 || %3
583 jmp %%save_done_32bit_cs_ds
584
585%%save_long_mode_guest:
586 o64 %4 [pXState]
587
588 xor edx, edx
589 cmp dword [pXState + X86FXSTATE.FPUCS], 0
590 jne short %%save_done
591
592 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
593 fnstenv [rsp]
594 movzx eax, word [rsp + 10h]
595 mov [pXState + X86FXSTATE.FPUCS], eax
596 movzx eax, word [rsp + 18h]
597 add rsp, 20h
598 mov [pXState + X86FXSTATE.FPUDS], eax
599%endif
600%%save_done_32bit_cs_ds:
601 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
602%%save_done:
603 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
604%endmacro ; SAVE_32_OR_64_FPU
605
606
607;;
608; Save the guest state.
609;
610; @uses rax, rdx
611; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
612; @param pXState Define for the register containing the extended state pointer.
613;
614%macro CPUMR0_SAVE_GUEST 0
615 ;
616 ; Load a couple of registers we'll use later in all branches.
617 ;
618 %ifdef IN_RING0
619 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
620 %else
621 %error "Unsupported context!"
622 %endif
623 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
624
625 ;
626 ; XSAVE or FXSAVE?
627 ;
628 or eax, eax
629 jz %%guest_fxsave
630
631 ; XSAVE
632 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
633 %ifdef VBOX_WITH_KERNEL_USING_XMM
634 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
635 %endif
636 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
637 jmp %%guest_done
638
639 ; FXSAVE
640%%guest_fxsave:
641 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
642
643%%guest_done:
644%endmacro ; CPUMR0_SAVE_GUEST
645
646
647;;
648; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
649;
650; @param %1 Pointer to CPUMCPU.
651; @param %2 Pointer to XState.
652; @param %3 Force AMD64.
653; @param %4 The instruction to use (xrstor or fxrstor).
654; @uses xAX, xDX, EFLAGS
655;
656%macro RESTORE_32_OR_64_FPU 4
657%if CPUM_IS_AMD64 || %3
658 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
659 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
660 jz %%restore_32bit_fpu
661 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
662 jne short %%restore_64bit_fpu
663%%restore_32bit_fpu:
664%endif
665 %4 [pXState]
666%if CPUM_IS_AMD64 || %3
667 ; TODO: Restore XMM8-XMM15!
668 jmp short %%restore_fpu_done
669%%restore_64bit_fpu:
670 o64 %4 [pXState]
671%%restore_fpu_done:
672%endif
673%endmacro ; RESTORE_32_OR_64_FPU
674
675
676;;
677; Loads the guest state.
678;
679; @uses rax, rdx
680; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
681; @param pXState Define for the register containing the extended state pointer.
682;
683%macro CPUMR0_LOAD_GUEST 0
684 ;
685 ; Load a couple of registers we'll use later in all branches.
686 ;
687 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
688 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
689
690 ;
691 ; XRSTOR or FXRSTOR?
692 ;
693 or eax, eax
694 jz %%guest_fxrstor
695
696 ; XRSTOR
697 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
698 %ifdef VBOX_WITH_KERNEL_USING_XMM
699 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
700 %endif
701 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
702 jmp %%guest_done
703
704 ; FXRSTOR
705%%guest_fxrstor:
706 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
707
708%%guest_done:
709%endmacro ; CPUMR0_LOAD_GUEST
710
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette