VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 91305

Last change on this file since 91305 was 91305, checked in by vboxsync, 3 years ago

VMM/CPUM,++: Moved the nested VT-X I/O permission bitmap allocations into CPUMCTX. bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.6 KB
Line 
1; $Id: CPUMInternal.mac 91305 2021-09-17 20:56:45Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2020 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18%include "VBox/asmdefs.mac"
19%include "VBox/vmm/cpum.mac"
20
21;; Check sanity.
22%ifdef VBOX_WITH_KERNEL_USING_XMM
23 %ifndef IN_RING0
24 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
25 %endif
26%endif
27
28;; For numeric expressions
29%ifdef RT_ARCH_AMD64
30 %define CPUM_IS_AMD64 1
31%else
32 %define CPUM_IS_AMD64 0
33%endif
34
35
36;;
37; CPU info
38struc CPUMINFO
39 .cMsrRanges resd 1 ; uint32_t
40 .fMsrMask resd 1 ; uint32_t
41 .fMxCsrMask resd 1 ; uint32_t
42 .cCpuIdLeaves resd 1 ; uint32_t
43 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
44 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
45 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
46 .uScalableBusFreq resq 1 ; uint64_t
47 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
48 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
49 .aCpuIdLeaves resb 256*32
50 .aMsrRanges resb 8192*128
51endstruc
52
53
54%define CPUM_USED_FPU_HOST RT_BIT(0)
55%define CPUM_USED_FPU_GUEST RT_BIT(10)
56%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
57%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
58%define CPUM_USE_SYSENTER RT_BIT(3)
59%define CPUM_USE_SYSCALL RT_BIT(4)
60%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
61%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
62%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
63%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
64%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
65%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
66%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
67
68%define CPUM_HANDLER_DS 1
69%define CPUM_HANDLER_ES 2
70%define CPUM_HANDLER_FS 3
71%define CPUM_HANDLER_GS 4
72%define CPUM_HANDLER_IRET 5
73%define CPUM_HANDLER_TYPEMASK 0ffh
74%define CPUM_HANDLER_CTXCORE_IN_EBP RT_BIT(31)
75
76
77struc CPUM
78 ;...
79 .fHostUseFlags resd 1
80
81 ; CR4 masks
82 .CR4.AndMask resd 1
83 .CR4.OrMask resd 1
84 .u8PortableCpuIdLevel resb 1
85 .fPendingRestore resb 1
86
87 alignb 8
88 .fXStateGuestMask resq 1
89 .fXStateHostMask resq 1
90
91 alignb 64
92 .HostFeatures resb 48
93 .GuestFeatures resb 48
94 .GuestInfo resb CPUMINFO_size
95
96 ; Patch manager saved state compatability CPUID leaf arrays
97 .aGuestCpuIdPatmStd resb 16*6
98 .aGuestCpuIdPatmExt resb 16*10
99 .aGuestCpuIdPatmCentaur resb 16*4
100
101 alignb 8
102 .cMsrWrites resq 1
103 .cMsrWritesToIgnoredBits resq 1
104 .cMsrWritesRaiseGp resq 1
105 .cMsrWritesUnknown resq 1
106 .cMsrReads resq 1
107 .cMsrReadsRaiseGp resq 1
108 .cMsrReadsUnknown resq 1
109endstruc
110
111struc CPUMCPU
112 ;
113 ; Guest context state
114 ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.)
115 ;
116 .Guest resq 0
117 .Guest.eax resq 1
118 .Guest.ecx resq 1
119 .Guest.edx resq 1
120 .Guest.ebx resq 1
121 .Guest.esp resq 1
122 .Guest.ebp resq 1
123 .Guest.esi resq 1
124 .Guest.edi resq 1
125 .Guest.r8 resq 1
126 .Guest.r9 resq 1
127 .Guest.r10 resq 1
128 .Guest.r11 resq 1
129 .Guest.r12 resq 1
130 .Guest.r13 resq 1
131 .Guest.r14 resq 1
132 .Guest.r15 resq 1
133 .Guest.es.Sel resw 1
134 .Guest.es.PaddingSel resw 1
135 .Guest.es.ValidSel resw 1
136 .Guest.es.fFlags resw 1
137 .Guest.es.u64Base resq 1
138 .Guest.es.u32Limit resd 1
139 .Guest.es.Attr resd 1
140 .Guest.cs.Sel resw 1
141 .Guest.cs.PaddingSel resw 1
142 .Guest.cs.ValidSel resw 1
143 .Guest.cs.fFlags resw 1
144 .Guest.cs.u64Base resq 1
145 .Guest.cs.u32Limit resd 1
146 .Guest.cs.Attr resd 1
147 .Guest.ss.Sel resw 1
148 .Guest.ss.PaddingSel resw 1
149 .Guest.ss.ValidSel resw 1
150 .Guest.ss.fFlags resw 1
151 .Guest.ss.u64Base resq 1
152 .Guest.ss.u32Limit resd 1
153 .Guest.ss.Attr resd 1
154 .Guest.ds.Sel resw 1
155 .Guest.ds.PaddingSel resw 1
156 .Guest.ds.ValidSel resw 1
157 .Guest.ds.fFlags resw 1
158 .Guest.ds.u64Base resq 1
159 .Guest.ds.u32Limit resd 1
160 .Guest.ds.Attr resd 1
161 .Guest.fs.Sel resw 1
162 .Guest.fs.PaddingSel resw 1
163 .Guest.fs.ValidSel resw 1
164 .Guest.fs.fFlags resw 1
165 .Guest.fs.u64Base resq 1
166 .Guest.fs.u32Limit resd 1
167 .Guest.fs.Attr resd 1
168 .Guest.gs.Sel resw 1
169 .Guest.gs.PaddingSel resw 1
170 .Guest.gs.ValidSel resw 1
171 .Guest.gs.fFlags resw 1
172 .Guest.gs.u64Base resq 1
173 .Guest.gs.u32Limit resd 1
174 .Guest.gs.Attr resd 1
175 .Guest.eip resq 1
176 .Guest.eflags resq 1
177 .Guest.cr0 resq 1
178 .Guest.cr2 resq 1
179 .Guest.cr3 resq 1
180 .Guest.cr4 resq 1
181 .Guest.dr resq 8
182 .Guest.gdtrPadding resw 3
183 .Guest.gdtr resw 0
184 .Guest.gdtr.cbGdt resw 1
185 .Guest.gdtr.pGdt resq 1
186 .Guest.idtrPadding resw 3
187 .Guest.idtr resw 0
188 .Guest.idtr.cbIdt resw 1
189 .Guest.idtr.pIdt resq 1
190 .Guest.ldtr.Sel resw 1
191 .Guest.ldtr.PaddingSel resw 1
192 .Guest.ldtr.ValidSel resw 1
193 .Guest.ldtr.fFlags resw 1
194 .Guest.ldtr.u64Base resq 1
195 .Guest.ldtr.u32Limit resd 1
196 .Guest.ldtr.Attr resd 1
197 .Guest.tr.Sel resw 1
198 .Guest.tr.PaddingSel resw 1
199 .Guest.tr.ValidSel resw 1
200 .Guest.tr.fFlags resw 1
201 .Guest.tr.u64Base resq 1
202 .Guest.tr.u32Limit resd 1
203 .Guest.tr.Attr resd 1
204 .Guest.SysEnter.cs resb 8
205 .Guest.SysEnter.eip resb 8
206 .Guest.SysEnter.esp resb 8
207 .Guest.msrEFER resb 8
208 .Guest.msrSTAR resb 8
209 .Guest.msrPAT resb 8
210 .Guest.msrLSTAR resb 8
211 .Guest.msrCSTAR resb 8
212 .Guest.msrSFMASK resb 8
213 .Guest.msrKERNELGSBASE resb 8
214 .Guest.uMsrPadding0 resb 8
215
216 alignb 8
217 .Guest.fExtrn resq 1
218
219 alignb 32
220 .Guest.aPaePdpes resq 4
221
222 alignb 8
223 .Guest.aXcr resq 2
224 .Guest.fXStateMask resq 1
225 .Guest.fUsedFpuGuest resb 1
226 alignb 8
227 .Guest.aoffXState resw 64
228 alignb 256
229 .Guest.abXState resb 0x4000-0x300
230 .Guest.XState EQU .Guest.abXState
231
232;;
233 alignb 4096
234 .Guest.hwvirt resb 0
235 .Guest.hwvirt.svm resb 0
236 .Guest.hwvirt.vmx resb 0
237
238 .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
239 .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
240 .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
241 .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
242 .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
243 alignb 8
244 .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
245 .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
246 .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
247 .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
248 .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
249
250 .Guest.hwvirt.vmx.Vmcs resb 0x1000
251 .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
252 .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
253 .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
254 .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
255 .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
256 .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
257 .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
258 .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
259 alignb 8
260 .Guest.hwvirt.vmx.GCPhysVmxon resq 1
261 .Guest.hwvirt.vmx.GCPhysVmcs resq 1
262 .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
263 .Guest.hwvirt.vmx.enmDiag resd 1
264 .Guest.hwvirt.vmx.enmAbort resd 1
265 .Guest.hwvirt.vmx.uDiagAux resq 1
266 .Guest.hwvirt.vmx.uAbortAux resd 1
267 .Guest.hwvirt.vmx.fInVmxRootMode resb 1
268 .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
269 .Guest.hwvirt.vmx.fInterceptEvents resb 1
270 .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
271 .Guest.hwvirt.vmx.pvVirtApicPageR0 resq 1
272 .Guest.hwvirt.vmx.pvVirtApicPageR3 resq 1
273 .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
274 .Guest.hwvirt.vmx.uPrevPauseTick resq 1
275 .Guest.hwvirt.vmx.uEntryTick resq 1
276 .Guest.hwvirt.vmx.offVirtApicWrite resw 1
277 .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
278 alignb 8
279 .Guest.hwvirt.vmx.Msrs resb 224
280 .Guest.hwvirt.vmx.HCPhysVirtApicPage resq 1
281
282 alignb 8
283 .Guest.hwvirt.enmHwvirt resd 1
284 .Guest.hwvirt.fGif resb 1
285 alignb 8
286 .Guest.hwvirt.fLocalForcedActions resd 1
287 alignb 64
288
289 .GuestMsrs resq 0
290 .GuestMsrs.au64 resq 64
291
292 ;
293 ; Other stuff.
294 ;
295 .hNestedVmxPreemptTimer resq 1
296
297 .fUseFlags resd 1
298 .fChanged resd 1
299 .u32RetCode resd 1
300
301%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
302 .fApicDisVectors resd 1
303 .pvApicBase RTR0PTR_RES 1
304 .fX2Apic resb 1
305%else
306 .abPadding3 resb (4 + RTR0PTR_CB + 1)
307%endif
308
309 .fCpuIdApicFeatureVisible resb 1
310
311 ;
312 ; Host context state
313 ;
314 alignb 64
315 .Host resb 0
316 .Host.abXState resb 0x4000-0x300
317 .Host.XState EQU .Host.abXState
318 ;.Host.rax resq 1 - scratch
319 .Host.rbx resq 1
320 ;.Host.rcx resq 1 - scratch
321 ;.Host.rdx resq 1 - scratch
322 .Host.rdi resq 1
323 .Host.rsi resq 1
324 .Host.rbp resq 1
325 .Host.rsp resq 1
326 ;.Host.r8 resq 1 - scratch
327 ;.Host.r9 resq 1 - scratch
328 .Host.r10 resq 1
329 .Host.r11 resq 1
330 .Host.r12 resq 1
331 .Host.r13 resq 1
332 .Host.r14 resq 1
333 .Host.r15 resq 1
334 ;.Host.rip resd 1 - scratch
335 .Host.rflags resq 1
336 .Host.ss resw 1
337 .Host.ssPadding resw 1
338 .Host.gs resw 1
339 .Host.gsPadding resw 1
340 .Host.fs resw 1
341 .Host.fsPadding resw 1
342 .Host.es resw 1
343 .Host.esPadding resw 1
344 .Host.ds resw 1
345 .Host.dsPadding resw 1
346 .Host.cs resw 1
347 .Host.csPadding resw 1
348
349 .Host.cr0Fpu:
350 .Host.cr0 resq 1
351 ;.Host.cr2 resq 1 - scratch
352 .Host.cr3 resq 1
353 .Host.cr4 resq 1
354 .Host.cr8 resq 1
355
356 .Host.dr0 resq 1
357 .Host.dr1 resq 1
358 .Host.dr2 resq 1
359 .Host.dr3 resq 1
360 .Host.dr6 resq 1
361 .Host.dr7 resq 1
362
363 .Host.gdtr resb 10 ; GDT limit + linear address
364 .Host.gdtrPadding resw 1
365 .Host.idtr resb 10 ; IDT limit + linear address
366 .Host.idtrPadding resw 1
367 .Host.ldtr resw 1
368 .Host.ldtrPadding resw 1
369 .Host.tr resw 1
370 .Host.trPadding resw 1
371
372 .Host.SysEnter.cs resq 1
373 .Host.SysEnter.eip resq 1
374 .Host.SysEnter.esp resq 1
375 .Host.FSbase resq 1
376 .Host.GSbase resq 1
377 .Host.efer resq 1
378 alignb 8
379 .Host.xcr0 resq 1
380 .Host.fXStateMask resq 1
381
382 ;
383 ; Hypervisor Context.
384 ;
385 alignb 64
386 .Hyper resq 0
387 .Hyper.dr resq 8
388 .Hyper.cr3 resq 1
389 alignb 64
390
391%ifdef VBOX_WITH_CRASHDUMP_MAGIC
392 .aMagic resb 56
393 .uMagic resq 1
394%endif
395endstruc
396
397
398
399%if 0 ; Currently not used anywhere.
400;;
401; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
402;
403; Cleans the FPU state, if necessary, before restoring the FPU.
404;
405; This macro ASSUMES CR0.TS is not set!
406;
407; @param xDX Pointer to CPUMCPU.
408; @uses xAX, EFLAGS
409;
410; Changes here should also be reflected in CPUMRCA.asm's copy!
411;
412%macro CLEANFPU 0
413 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
414 jz .nothing_to_clean
415
416 xor eax, eax
417 fnstsw ax ; FSW -> AX.
418 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
419 ; while clearing & loading the FPU bits in 'clean_fpu' below.
420 jz .clean_fpu
421 fnclex
422
423.clean_fpu:
424 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
425 ; for the upcoming push (load)
426 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
427.nothing_to_clean:
428%endmacro
429%endif ; Unused.
430
431
432;;
433; Makes sure we don't trap (#NM) accessing the FPU.
434;
435; In ring-0 this is a bit of work since we may have try convince the host kernel
436; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
437; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
438;
439; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
440; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
441; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
442;
443; In raw-mode we will always have to clear TS and it will be recalculated
444; elsewhere and thus needs no saving.
445;
446; @param %1 Register to return the return status code in.
447; @param %2 Temporary scratch register.
448; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
449; of the EMT we're on.
450; @uses EFLAGS, CR0, %1, %2
451;
452%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
453 ;
454 ; ring-0 - slightly complicated (than old raw-mode).
455 ;
456 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
457 mov [%3 + CPUMCPU.Host.cr0Fpu], %1
458
459 mov %2, cr0
460 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
461 jz %%no_cr0_change
462
463 %ifdef VMM_R0_TOUCH_FPU
464 ; Touch the state and check that the kernel updated CR0 for us.
465 movdqa xmm0, xmm0
466 mov %2, cr0
467 test %2, X86_CR0_TS | X86_CR0_EM
468 jz %%cr0_changed
469 %endif
470
471 ; Save CR0 and clear them flags ourselves.
472 mov [%3 + CPUMCPU.Host.cr0Fpu], %2
473 and %2, ~(X86_CR0_TS | X86_CR0_EM)
474 mov cr0, %2
475
476%%cr0_changed:
477 mov %1, VINF_CPUM_HOST_CR0_MODIFIED
478%%no_cr0_change:
479%endmacro
480
481
482;;
483; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
484;
485; @param %1 The original state to restore (or zero).
486;
487%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
488 test %1, X86_CR0_TS | X86_CR0_EM
489 jz %%skip_cr0_restore
490 mov cr0, %1
491%%skip_cr0_restore:
492%endmacro
493
494
495;;
496; Saves the host state.
497;
498; @uses rax, rdx
499; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
500; @param pXState Define for the register containing the extended state pointer.
501;
502%macro CPUMR0_SAVE_HOST 0
503 ;
504 ; Load a couple of registers we'll use later in all branches.
505 ;
506 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
507 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
508
509 ;
510 ; XSAVE or FXSAVE?
511 ;
512 or eax, eax
513 jz %%host_fxsave
514
515 ; XSAVE
516 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
517 %ifdef RT_ARCH_AMD64
518 o64 xsave [pXState]
519 %else
520 xsave [pXState]
521 %endif
522 jmp %%host_done
523
524 ; FXSAVE
525%%host_fxsave:
526 %ifdef RT_ARCH_AMD64
527 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
528 %else
529 fxsave [pXState]
530 %endif
531
532%%host_done:
533%endmacro ; CPUMR0_SAVE_HOST
534
535
536;;
537; Loads the host state.
538;
539; @uses rax, rdx
540; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
541; @param pXState Define for the register containing the extended state pointer.
542;
543%macro CPUMR0_LOAD_HOST 0
544 ;
545 ; Load a couple of registers we'll use later in all branches.
546 ;
547 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
548 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
549
550 ;
551 ; XRSTOR or FXRSTOR?
552 ;
553 or eax, eax
554 jz %%host_fxrstor
555
556 ; XRSTOR
557 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
558 %ifdef RT_ARCH_AMD64
559 o64 xrstor [pXState]
560 %else
561 xrstor [pXState]
562 %endif
563 jmp %%host_done
564
565 ; FXRSTOR
566%%host_fxrstor:
567 %ifdef RT_ARCH_AMD64
568 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
569 %else
570 fxrstor [pXState]
571 %endif
572
573%%host_done:
574%endmacro ; CPUMR0_LOAD_HOST
575
576
577
578;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
579; save the 32-bit FPU state or 64-bit FPU state.
580;
581; @param %1 Pointer to CPUMCPU.
582; @param %2 Pointer to XState.
583; @param %3 Force AMD64
584; @param %4 The instruction to use (xsave or fxsave)
585; @uses xAX, xDX, EFLAGS, 20h of stack.
586;
587%macro SAVE_32_OR_64_FPU 4
588%if CPUM_IS_AMD64 || %3
589 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
590 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
591 jnz short %%save_long_mode_guest
592%endif
593 %4 [pXState]
594%if CPUM_IS_AMD64 || %3
595 jmp %%save_done_32bit_cs_ds
596
597%%save_long_mode_guest:
598 o64 %4 [pXState]
599
600 xor edx, edx
601 cmp dword [pXState + X86FXSTATE.FPUCS], 0
602 jne short %%save_done
603
604 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
605 fnstenv [rsp]
606 movzx eax, word [rsp + 10h]
607 mov [pXState + X86FXSTATE.FPUCS], eax
608 movzx eax, word [rsp + 18h]
609 add rsp, 20h
610 mov [pXState + X86FXSTATE.FPUDS], eax
611%endif
612%%save_done_32bit_cs_ds:
613 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
614%%save_done:
615 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
616%endmacro ; SAVE_32_OR_64_FPU
617
618
619;;
620; Save the guest state.
621;
622; @uses rax, rdx
623; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
624; @param pXState Define for the register containing the extended state pointer.
625;
626%macro CPUMR0_SAVE_GUEST 0
627 ;
628 ; Load a couple of registers we'll use later in all branches.
629 ;
630 %ifdef IN_RING0
631 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
632 %else
633 %error "Unsupported context!"
634 %endif
635 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
636
637 ;
638 ; XSAVE or FXSAVE?
639 ;
640 or eax, eax
641 jz %%guest_fxsave
642
643 ; XSAVE
644 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
645 %ifdef VBOX_WITH_KERNEL_USING_XMM
646 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
647 %endif
648 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
649 jmp %%guest_done
650
651 ; FXSAVE
652%%guest_fxsave:
653 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
654
655%%guest_done:
656%endmacro ; CPUMR0_SAVE_GUEST
657
658
659;;
660; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
661;
662; @param %1 Pointer to CPUMCPU.
663; @param %2 Pointer to XState.
664; @param %3 Force AMD64.
665; @param %4 The instruction to use (xrstor or fxrstor).
666; @uses xAX, xDX, EFLAGS
667;
668%macro RESTORE_32_OR_64_FPU 4
669%if CPUM_IS_AMD64 || %3
670 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
671 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
672 jz %%restore_32bit_fpu
673 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
674 jne short %%restore_64bit_fpu
675%%restore_32bit_fpu:
676%endif
677 %4 [pXState]
678%if CPUM_IS_AMD64 || %3
679 ; TODO: Restore XMM8-XMM15!
680 jmp short %%restore_fpu_done
681%%restore_64bit_fpu:
682 o64 %4 [pXState]
683%%restore_fpu_done:
684%endif
685%endmacro ; RESTORE_32_OR_64_FPU
686
687
688;;
689; Loads the guest state.
690;
691; @uses rax, rdx
692; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
693; @param pXState Define for the register containing the extended state pointer.
694;
695%macro CPUMR0_LOAD_GUEST 0
696 ;
697 ; Load a couple of registers we'll use later in all branches.
698 ;
699 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
700 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
701
702 ;
703 ; XRSTOR or FXRSTOR?
704 ;
705 or eax, eax
706 jz %%guest_fxrstor
707
708 ; XRSTOR
709 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
710 %ifdef VBOX_WITH_KERNEL_USING_XMM
711 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
712 %endif
713 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
714 jmp %%guest_done
715
716 ; FXRSTOR
717%%guest_fxrstor:
718 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
719
720%%guest_done:
721%endmacro ; CPUMR0_LOAD_GUEST
722
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette