VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 85704

Last change on this file since 85704 was 83067, checked in by vboxsync, 5 years ago

VMM/HM: Cleanup nits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 51.1 KB
Line 
1; $Id: HMR0A.asm 83067 2020-02-13 04:39:07Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines.
4;
5
6;
7; Copyright (C) 2006-2020 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*********************************************************************************************************************************
46;* Defined Constants And Macros *
47;*********************************************************************************************************************************
48;; The offset of the XMM registers in X86FXSTATE.
49; Use define because I'm too lazy to convert the struct.
50%define XMM_OFF_IN_X86FXSTATE 160
51
52;; Spectre filler for 32-bit mode.
53; Some user space address that points to a 4MB page boundrary in hope that it
54; will somehow make it less useful.
55%define SPECTRE_FILLER32 0x227fffff
56;; Spectre filler for 64-bit mode.
57; Choosen to be an invalid address (also with 5 level paging).
58%define SPECTRE_FILLER64 0x02204204207fffff
59;; Spectre filler for the current CPU mode.
60%ifdef RT_ARCH_AMD64
61 %define SPECTRE_FILLER SPECTRE_FILLER64
62%else
63 %define SPECTRE_FILLER SPECTRE_FILLER32
64%endif
65
66;;
67; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation.
68;
69%ifdef RT_ARCH_AMD64
70 %define VMX_SKIP_GDTR
71 %define VMX_SKIP_TR
72 %define VBOX_SKIP_RESTORE_SEG
73 %ifdef RT_OS_DARWIN
74 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
75 ; risk loading a stale LDT value or something invalid.
76 %define HM_64_BIT_USE_NULL_SEL
77 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
78 ; See @bugref{6875}.
79 %else
80 %define VMX_SKIP_IDTR
81 %endif
82%endif
83
84;; @def MYPUSHAD
85; Macro generating an equivalent to PUSHAD instruction.
86
87;; @def MYPOPAD
88; Macro generating an equivalent to POPAD instruction.
89
90;; @def MYPUSHSEGS
91; Macro saving all segment registers on the stack.
92; @param 1 Full width register name.
93; @param 2 16-bit register name for \a 1.
94
95;; @def MYPOPSEGS
96; Macro restoring all segment registers on the stack.
97; @param 1 Full width register name.
98; @param 2 16-bit register name for \a 1.
99
100%ifdef ASM_CALL64_GCC
101 %macro MYPUSHAD64 0
102 push r15
103 push r14
104 push r13
105 push r12
106 push rbx
107 %endmacro
108 %macro MYPOPAD64 0
109 pop rbx
110 pop r12
111 pop r13
112 pop r14
113 pop r15
114 %endmacro
115
116%else ; ASM_CALL64_MSC
117 %macro MYPUSHAD64 0
118 push r15
119 push r14
120 push r13
121 push r12
122 push rbx
123 push rsi
124 push rdi
125 %endmacro
126 %macro MYPOPAD64 0
127 pop rdi
128 pop rsi
129 pop rbx
130 pop r12
131 pop r13
132 pop r14
133 pop r15
134 %endmacro
135%endif
136
137%ifdef VBOX_SKIP_RESTORE_SEG
138 %macro MYPUSHSEGS64 2
139 %endmacro
140
141 %macro MYPOPSEGS64 2
142 %endmacro
143%else ; !VBOX_SKIP_RESTORE_SEG
144 ; Trashes, rax, rdx & rcx.
145 %macro MYPUSHSEGS64 2
146 %ifndef HM_64_BIT_USE_NULL_SEL
147 mov %2, es
148 push %1
149 mov %2, ds
150 push %1
151 %endif
152
153 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode,
154 ; Solaris OTOH doesn't and we must save it.
155 mov ecx, MSR_K8_FS_BASE
156 rdmsr
157 push rdx
158 push rax
159 %ifndef HM_64_BIT_USE_NULL_SEL
160 push fs
161 %endif
162
163 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel.
164 ; The same happens on exit.
165 mov ecx, MSR_K8_GS_BASE
166 rdmsr
167 push rdx
168 push rax
169 %ifndef HM_64_BIT_USE_NULL_SEL
170 push gs
171 %endif
172 %endmacro
173
174 ; trashes, rax, rdx & rcx
175 %macro MYPOPSEGS64 2
176 ; Note: do not step through this code with a debugger!
177 %ifndef HM_64_BIT_USE_NULL_SEL
178 xor eax, eax
179 mov ds, ax
180 mov es, ax
181 mov fs, ax
182 mov gs, ax
183 %endif
184
185 %ifndef HM_64_BIT_USE_NULL_SEL
186 pop gs
187 %endif
188 pop rax
189 pop rdx
190 mov ecx, MSR_K8_GS_BASE
191 wrmsr
192
193 %ifndef HM_64_BIT_USE_NULL_SEL
194 pop fs
195 %endif
196 pop rax
197 pop rdx
198 mov ecx, MSR_K8_FS_BASE
199 wrmsr
200 ; Now it's safe to step again
201
202 %ifndef HM_64_BIT_USE_NULL_SEL
203 pop %1
204 mov ds, %2
205 pop %1
206 mov es, %2
207 %endif
208 %endmacro
209%endif ; VBOX_SKIP_RESTORE_SEG
210
211%macro MYPUSHAD32 0
212 pushad
213%endmacro
214%macro MYPOPAD32 0
215 popad
216%endmacro
217
218%macro MYPUSHSEGS32 2
219 push ds
220 push es
221 push fs
222 push gs
223%endmacro
224%macro MYPOPSEGS32 2
225 pop gs
226 pop fs
227 pop es
228 pop ds
229%endmacro
230
231%ifdef RT_ARCH_AMD64
232 %define MYPUSHAD MYPUSHAD64
233 %define MYPOPAD MYPOPAD64
234 %define MYPUSHSEGS MYPUSHSEGS64
235 %define MYPOPSEGS MYPOPSEGS64
236%else
237 %define MYPUSHAD MYPUSHAD32
238 %define MYPOPAD MYPOPAD32
239 %define MYPUSHSEGS MYPUSHSEGS32
240 %define MYPOPSEGS MYPOPSEGS32
241%endif
242
243;;
244; Creates an indirect branch prediction barrier on CPUs that need and supports that.
245; @clobbers eax, edx, ecx
246; @param 1 How to address CPUMCTX.
247; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
248%macro INDIRECT_BRANCH_PREDICTION_BARRIER 2
249 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
250 jz %%no_indirect_branch_barrier
251 mov ecx, MSR_IA32_PRED_CMD
252 mov eax, MSR_IA32_PRED_CMD_F_IBPB
253 xor edx, edx
254 wrmsr
255%%no_indirect_branch_barrier:
256%endmacro
257
258;;
259; Creates an indirect branch prediction and L1D barrier on CPUs that need and supports that.
260; @clobbers eax, edx, ecx
261; @param 1 How to address CPUMCTX.
262; @param 2 Which IBPB flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT)
263; @param 3 Which FLUSH flag to test for (CPUMCTX_WSF_L1D_ENTRY)
264; @param 4 Which MDS flag to test for (CPUMCTX_WSF_MDS_ENTRY)
265%macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 4
266 ; Only one test+jmp when disabled CPUs.
267 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4)
268 jz %%no_barrier_needed
269
270 ; The eax:edx value is the same for both.
271 AssertCompile(MSR_IA32_PRED_CMD_F_IBPB == MSR_IA32_FLUSH_CMD_F_L1D)
272 mov eax, MSR_IA32_PRED_CMD_F_IBPB
273 xor edx, edx
274
275 ; Indirect branch barrier.
276 test byte [%1 + CPUMCTX.fWorldSwitcher], %2
277 jz %%no_indirect_branch_barrier
278 mov ecx, MSR_IA32_PRED_CMD
279 wrmsr
280%%no_indirect_branch_barrier:
281
282 ; Level 1 data cache flush.
283 test byte [%1 + CPUMCTX.fWorldSwitcher], %3
284 jz %%no_cache_flush_barrier
285 mov ecx, MSR_IA32_FLUSH_CMD
286 wrmsr
287 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH
288%%no_cache_flush_barrier:
289
290 ; MDS buffer flushing.
291 test byte [%1 + CPUMCTX.fWorldSwitcher], %4
292 jz %%no_mds_buffer_flushing
293 sub xSP, xSP
294 mov [xSP], ds
295 verw [xSP]
296 add xSP, xSP
297%%no_mds_buffer_flushing:
298
299%%no_barrier_needed:
300%endmacro
301
302
303;*********************************************************************************************************************************
304;* External Symbols *
305;*********************************************************************************************************************************
306%ifdef VBOX_WITH_KERNEL_USING_XMM
307extern NAME(CPUMIsGuestFPUStateActive)
308%endif
309
310
311BEGINCODE
312
313
314;;
315; Restores host-state fields.
316;
317; @returns VBox status code
318; @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
319; @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
320;
321ALIGNCODE(16)
322BEGINPROC VMXRestoreHostState
323%ifdef RT_ARCH_AMD64
324 %ifndef ASM_CALL64_GCC
325 ; Use GCC's input registers since we'll be needing both rcx and rdx further
326 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
327 ; RDI and RSI since MSC preserve the two latter registers.
328 mov r10, rdi
329 mov r11, rsi
330 mov rdi, rcx
331 mov rsi, rdx
332 %endif
333
334 test edi, VMX_RESTORE_HOST_GDTR
335 jz .test_idtr
336 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
337
338.test_idtr:
339 test edi, VMX_RESTORE_HOST_IDTR
340 jz .test_ds
341 lidt [rsi + VMXRESTOREHOST.HostIdtr]
342
343.test_ds:
344 test edi, VMX_RESTORE_HOST_SEL_DS
345 jz .test_es
346 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
347 mov ds, eax
348
349.test_es:
350 test edi, VMX_RESTORE_HOST_SEL_ES
351 jz .test_tr
352 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
353 mov es, eax
354
355.test_tr:
356 test edi, VMX_RESTORE_HOST_SEL_TR
357 jz .test_fs
358 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
359 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
360 mov ax, dx
361 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
362 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY | VMX_RESTORE_HOST_GDT_NEED_WRITABLE
363 jnz .gdt_readonly
364 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
365 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
366 ltr dx
367 jmp short .test_fs
368.gdt_readonly:
369 test edi, VMX_RESTORE_HOST_GDT_NEED_WRITABLE
370 jnz .gdt_readonly_need_writable
371 mov rcx, cr0
372 mov r9, rcx
373 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
374 and rcx, ~X86_CR0_WP
375 mov cr0, rcx
376 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
377 ltr dx
378 mov cr0, r9
379 jmp short .test_fs
380.gdt_readonly_need_writable:
381 add rax, qword [rsi + VMXRESTOREHOST.HostGdtrRw + 2] ; xAX <- descriptor offset + GDTR.pGdtRw
382 and dword [rax + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
383 lgdt [rsi + VMXRESTOREHOST.HostGdtrRw]
384 ltr dx
385 lgdt [rsi + VMXRESTOREHOST.HostGdtr] ; load the original GDT
386
387.test_fs:
388 ;
389 ; When restoring the selector values for FS and GS, we'll temporarily trash
390 ; the base address (at least the high 32-bit bits, but quite possibly the
391 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
392 ; restores the base correctly when leaving guest mode, but not the selector
393 ; value, so there is little problem with interrupts being enabled prior to
394 ; this restore job.)
395 ; We'll disable ints once for both FS and GS as that's probably faster.
396 ;
397 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
398 jz .restore_success
399 pushfq
400 cli ; (see above)
401
402 test edi, VMX_RESTORE_HOST_SEL_FS
403 jz .test_gs
404 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
405 mov fs, eax
406 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
407 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
408 mov ecx, MSR_K8_FS_BASE
409 wrmsr
410
411.test_gs:
412 test edi, VMX_RESTORE_HOST_SEL_GS
413 jz .restore_flags
414 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
415 mov gs, eax
416 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
417 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
418 mov ecx, MSR_K8_GS_BASE
419 wrmsr
420
421.restore_flags:
422 popfq
423
424.restore_success:
425 mov eax, VINF_SUCCESS
426 %ifndef ASM_CALL64_GCC
427 ; Restore RDI and RSI on MSC.
428 mov rdi, r10
429 mov rsi, r11
430 %endif
431%else ; RT_ARCH_X86
432 mov eax, VERR_NOT_IMPLEMENTED
433%endif
434 ret
435ENDPROC VMXRestoreHostState
436
437
438;;
439; Dispatches an NMI to the host.
440;
441ALIGNCODE(16)
442BEGINPROC VMXDispatchHostNmi
443 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
444 int 2
445 ret
446ENDPROC VMXDispatchHostNmi
447
448
449;;
450; Executes VMWRITE, 64-bit value.
451;
452; @returns VBox status code.
453; @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
454; @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
455;
456ALIGNCODE(16)
457BEGINPROC VMXWriteVmcs64
458%ifdef RT_ARCH_AMD64
459 %ifdef ASM_CALL64_GCC
460 and edi, 0ffffffffh
461 xor rax, rax
462 vmwrite rdi, rsi
463 %else
464 and ecx, 0ffffffffh
465 xor rax, rax
466 vmwrite rcx, rdx
467 %endif
468%else ; RT_ARCH_X86
469 mov ecx, [esp + 4] ; idxField
470 lea edx, [esp + 8] ; &u64Data
471 vmwrite ecx, [edx] ; low dword
472 jz .done
473 jc .done
474 inc ecx
475 xor eax, eax
476 vmwrite ecx, [edx + 4] ; high dword
477.done:
478%endif ; RT_ARCH_X86
479 jnc .valid_vmcs
480 mov eax, VERR_VMX_INVALID_VMCS_PTR
481 ret
482.valid_vmcs:
483 jnz .the_end
484 mov eax, VERR_VMX_INVALID_VMCS_FIELD
485.the_end:
486 ret
487ENDPROC VMXWriteVmcs64
488
489
490;;
491; Executes VMREAD, 64-bit value.
492;
493; @returns VBox status code.
494; @param idxField VMCS index.
495; @param pData Where to store VM field value.
496;
497;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
498ALIGNCODE(16)
499BEGINPROC VMXReadVmcs64
500%ifdef RT_ARCH_AMD64
501 %ifdef ASM_CALL64_GCC
502 and edi, 0ffffffffh
503 xor rax, rax
504 vmread [rsi], rdi
505 %else
506 and ecx, 0ffffffffh
507 xor rax, rax
508 vmread [rdx], rcx
509 %endif
510%else ; RT_ARCH_X86
511 mov ecx, [esp + 4] ; idxField
512 mov edx, [esp + 8] ; pData
513 vmread [edx], ecx ; low dword
514 jz .done
515 jc .done
516 inc ecx
517 xor eax, eax
518 vmread [edx + 4], ecx ; high dword
519.done:
520%endif ; RT_ARCH_X86
521 jnc .valid_vmcs
522 mov eax, VERR_VMX_INVALID_VMCS_PTR
523 ret
524.valid_vmcs:
525 jnz .the_end
526 mov eax, VERR_VMX_INVALID_VMCS_FIELD
527.the_end:
528 ret
529ENDPROC VMXReadVmcs64
530
531
532;;
533; Executes VMREAD, 32-bit value.
534;
535; @returns VBox status code.
536; @param idxField VMCS index.
537; @param pu32Data Where to store VM field value.
538;
539;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
540ALIGNCODE(16)
541BEGINPROC VMXReadVmcs32
542%ifdef RT_ARCH_AMD64
543 %ifdef ASM_CALL64_GCC
544 and edi, 0ffffffffh
545 xor rax, rax
546 vmread r10, rdi
547 mov [rsi], r10d
548 %else
549 and ecx, 0ffffffffh
550 xor rax, rax
551 vmread r10, rcx
552 mov [rdx], r10d
553 %endif
554%else ; RT_ARCH_X86
555 mov ecx, [esp + 4] ; idxField
556 mov edx, [esp + 8] ; pu32Data
557 xor eax, eax
558 vmread [edx], ecx
559%endif ; RT_ARCH_X86
560 jnc .valid_vmcs
561 mov eax, VERR_VMX_INVALID_VMCS_PTR
562 ret
563.valid_vmcs:
564 jnz .the_end
565 mov eax, VERR_VMX_INVALID_VMCS_FIELD
566.the_end:
567 ret
568ENDPROC VMXReadVmcs32
569
570
571;;
572; Executes VMWRITE, 32-bit value.
573;
574; @returns VBox status code.
575; @param idxField VMCS index.
576; @param u32Data Where to store VM field value.
577;
578;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
579ALIGNCODE(16)
580BEGINPROC VMXWriteVmcs32
581%ifdef RT_ARCH_AMD64
582 %ifdef ASM_CALL64_GCC
583 and edi, 0ffffffffh
584 and esi, 0ffffffffh
585 xor rax, rax
586 vmwrite rdi, rsi
587 %else
588 and ecx, 0ffffffffh
589 and edx, 0ffffffffh
590 xor rax, rax
591 vmwrite rcx, rdx
592 %endif
593%else ; RT_ARCH_X86
594 mov ecx, [esp + 4] ; idxField
595 mov edx, [esp + 8] ; u32Data
596 xor eax, eax
597 vmwrite ecx, edx
598%endif ; RT_ARCH_X86
599 jnc .valid_vmcs
600 mov eax, VERR_VMX_INVALID_VMCS_PTR
601 ret
602.valid_vmcs:
603 jnz .the_end
604 mov eax, VERR_VMX_INVALID_VMCS_FIELD
605.the_end:
606 ret
607ENDPROC VMXWriteVmcs32
608
609
610;;
611; Executes VMXON.
612;
613; @returns VBox status code.
614; @param HCPhysVMXOn Physical address of VMXON structure.
615;
616;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
617BEGINPROC VMXEnable
618%ifdef RT_ARCH_AMD64
619 xor rax, rax
620 %ifdef ASM_CALL64_GCC
621 push rdi
622 %else
623 push rcx
624 %endif
625 vmxon [rsp]
626%else ; RT_ARCH_X86
627 xor eax, eax
628 vmxon [esp + 4]
629%endif ; RT_ARCH_X86
630 jnc .good
631 mov eax, VERR_VMX_INVALID_VMXON_PTR
632 jmp .the_end
633
634.good:
635 jnz .the_end
636 mov eax, VERR_VMX_VMXON_FAILED
637
638.the_end:
639%ifdef RT_ARCH_AMD64
640 add rsp, 8
641%endif
642 ret
643ENDPROC VMXEnable
644
645
646;;
647; Executes VMXOFF.
648;
649;DECLASM(void) VMXDisable(void);
650BEGINPROC VMXDisable
651 vmxoff
652.the_end:
653 ret
654ENDPROC VMXDisable
655
656
657;;
658; Executes VMCLEAR.
659;
660; @returns VBox status code.
661; @param HCPhysVmcs Physical address of VM control structure.
662;
663;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
664ALIGNCODE(16)
665BEGINPROC VMXClearVmcs
666%ifdef RT_ARCH_AMD64
667 xor rax, rax
668 %ifdef ASM_CALL64_GCC
669 push rdi
670 %else
671 push rcx
672 %endif
673 vmclear [rsp]
674%else ; RT_ARCH_X86
675 xor eax, eax
676 vmclear [esp + 4]
677%endif ; RT_ARCH_X86
678 jnc .the_end
679 mov eax, VERR_VMX_INVALID_VMCS_PTR
680.the_end:
681%ifdef RT_ARCH_AMD64
682 add rsp, 8
683%endif
684 ret
685ENDPROC VMXClearVmcs
686
687
688;;
689; Executes VMPTRLD.
690;
691; @returns VBox status code.
692; @param HCPhysVmcs Physical address of VMCS structure.
693;
694;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
695ALIGNCODE(16)
696BEGINPROC VMXLoadVmcs
697%ifdef RT_ARCH_AMD64
698 xor rax, rax
699 %ifdef ASM_CALL64_GCC
700 push rdi
701 %else
702 push rcx
703 %endif
704 vmptrld [rsp]
705%else
706 xor eax, eax
707 vmptrld [esp + 4]
708%endif
709 jnc .the_end
710 mov eax, VERR_VMX_INVALID_VMCS_PTR
711.the_end:
712%ifdef RT_ARCH_AMD64
713 add rsp, 8
714%endif
715 ret
716ENDPROC VMXLoadVmcs
717
718
719;;
720; Executes VMPTRST.
721;
722; @returns VBox status code.
723; @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
724;
725;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS);
726BEGINPROC VMXGetCurrentVmcs
727%ifdef RT_OS_OS2
728 mov eax, VERR_NOT_SUPPORTED
729 ret
730%else
731 %ifdef RT_ARCH_AMD64
732 %ifdef ASM_CALL64_GCC
733 vmptrst qword [rdi]
734 %else
735 vmptrst qword [rcx]
736 %endif
737 %else
738 vmptrst qword [esp+04h]
739 %endif
740 xor eax, eax
741.the_end:
742 ret
743%endif
744ENDPROC VMXGetCurrentVmcs
745
746;;
747; Invalidate a page using INVEPT.
748;
749; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
750; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
751;
752;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor);
753BEGINPROC VMXR0InvEPT
754%ifdef RT_ARCH_AMD64
755 %ifdef ASM_CALL64_GCC
756 and edi, 0ffffffffh
757 xor rax, rax
758; invept rdi, qword [rsi]
759 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
760 %else
761 and ecx, 0ffffffffh
762 xor rax, rax
763; invept rcx, qword [rdx]
764 DB 0x66, 0x0F, 0x38, 0x80, 0xA
765 %endif
766%else
767 mov ecx, [esp + 4]
768 mov edx, [esp + 8]
769 xor eax, eax
770; invept ecx, qword [edx]
771 DB 0x66, 0x0F, 0x38, 0x80, 0xA
772%endif
773 jnc .valid_vmcs
774 mov eax, VERR_VMX_INVALID_VMCS_PTR
775 ret
776.valid_vmcs:
777 jnz .the_end
778 mov eax, VERR_INVALID_PARAMETER
779.the_end:
780 ret
781ENDPROC VMXR0InvEPT
782
783
784;;
785; Invalidate a page using INVVPID.
786;
787; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
788; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
789;
790;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor);
791BEGINPROC VMXR0InvVPID
792%ifdef RT_ARCH_AMD64
793 %ifdef ASM_CALL64_GCC
794 and edi, 0ffffffffh
795 xor rax, rax
796; invvpid rdi, qword [rsi]
797 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
798 %else
799 and ecx, 0ffffffffh
800 xor rax, rax
801; invvpid rcx, qword [rdx]
802 DB 0x66, 0x0F, 0x38, 0x81, 0xA
803 %endif
804%else
805 mov ecx, [esp + 4]
806 mov edx, [esp + 8]
807 xor eax, eax
808; invvpid ecx, qword [edx]
809 DB 0x66, 0x0F, 0x38, 0x81, 0xA
810%endif
811 jnc .valid_vmcs
812 mov eax, VERR_VMX_INVALID_VMCS_PTR
813 ret
814.valid_vmcs:
815 jnz .the_end
816 mov eax, VERR_INVALID_PARAMETER
817.the_end:
818 ret
819ENDPROC VMXR0InvVPID
820
821
822%if GC_ARCH_BITS == 64
823;;
824; Executes INVLPGA.
825;
826; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
827; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
828;
829;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
830BEGINPROC SVMR0InvlpgA
831%ifdef RT_ARCH_AMD64
832 %ifdef ASM_CALL64_GCC
833 mov rax, rdi
834 mov rcx, rsi
835 %else
836 mov rax, rcx
837 mov rcx, rdx
838 %endif
839%else
840 mov eax, [esp + 4]
841 mov ecx, [esp + 0Ch]
842%endif
843 invlpga [xAX], ecx
844 ret
845ENDPROC SVMR0InvlpgA
846
847%else ; GC_ARCH_BITS != 64
848;;
849; Executes INVLPGA
850;
851; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
852; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
853;
854;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
855BEGINPROC SVMR0InvlpgA
856%ifdef RT_ARCH_AMD64
857 %ifdef ASM_CALL64_GCC
858 movzx rax, edi
859 mov ecx, esi
860 %else
861 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
862 ; "Perhaps unexpectedly, instructions that move or generate 32-bit register
863 ; values also set the upper 32 bits of the register to zero. Consequently
864 ; there is no need for an instruction movzlq."
865 mov eax, ecx
866 mov ecx, edx
867 %endif
868%else
869 mov eax, [esp + 4]
870 mov ecx, [esp + 8]
871%endif
872 invlpga [xAX], ecx
873 ret
874ENDPROC SVMR0InvlpgA
875
876%endif ; GC_ARCH_BITS != 64
877
878
879%ifdef VBOX_WITH_KERNEL_USING_XMM
880
881;;
882; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
883; load the guest ones when necessary.
884;
885; @cproto DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, void *pvUnused, PVM pVM,
886; PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
887;
888; @returns eax
889;
890; @param fResumeVM msc:rcx
891; @param pCtx msc:rdx
892; @param pvUnused msc:r8
893; @param pVM msc:r9
894; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
895; @param pfnStartVM msc:[rbp+38h]
896;
897; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.
898;
899; @remarks Drivers shouldn't use AVX registers without saving+loading:
900; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
901; However the compiler docs have different idea:
902; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
903; We'll go with the former for now.
904;
905; ASSUMING 64-bit and windows for now.
906;
907ALIGNCODE(16)
908BEGINPROC hmR0VMXStartVMWrapXMM
909 push xBP
910 mov xBP, xSP
911 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
912
913 ; Spill input parameters.
914 mov [xBP + 010h], rcx ; fResumeVM
915 mov [xBP + 018h], rdx ; pCtx
916 mov [xBP + 020h], r8 ; pvUnused
917 mov [xBP + 028h], r9 ; pVM
918
919 ; Ask CPUM whether we've started using the FPU yet.
920 mov rcx, [xBP + 30h] ; pVCpu
921 call NAME(CPUMIsGuestFPUStateActive)
922 test al, al
923 jnz .guest_fpu_state_active
924
925 ; No need to mess with XMM registers just call the start routine and return.
926 mov r11, [xBP + 38h] ; pfnStartVM
927 mov r10, [xBP + 30h] ; pVCpu
928 mov [xSP + 020h], r10
929 mov rcx, [xBP + 010h] ; fResumeVM
930 mov rdx, [xBP + 018h] ; pCtx
931 mov r8, [xBP + 020h] ; pvUnused
932 mov r9, [xBP + 028h] ; pVM
933 call r11
934
935 leave
936 ret
937
938ALIGNCODE(8)
939.guest_fpu_state_active:
940 ; Save the non-volatile host XMM registers.
941 movdqa [rsp + 040h + 000h], xmm6
942 movdqa [rsp + 040h + 010h], xmm7
943 movdqa [rsp + 040h + 020h], xmm8
944 movdqa [rsp + 040h + 030h], xmm9
945 movdqa [rsp + 040h + 040h], xmm10
946 movdqa [rsp + 040h + 050h], xmm11
947 movdqa [rsp + 040h + 060h], xmm12
948 movdqa [rsp + 040h + 070h], xmm13
949 movdqa [rsp + 040h + 080h], xmm14
950 movdqa [rsp + 040h + 090h], xmm15
951 stmxcsr [rsp + 040h + 0a0h]
952
953 mov r10, [xBP + 018h] ; pCtx
954 mov eax, [r10 + CPUMCTX.fXStateMask]
955 test eax, eax
956 jz .guest_fpu_state_manually
957
958 ;
959 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.
960 ;
961 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
962 xor edx, edx
963 mov r10, [r10 + CPUMCTX.pXStateR0]
964 xrstor [r10]
965
966 ; Make the call (same as in the other case).
967 mov r11, [xBP + 38h] ; pfnStartVM
968 mov r10, [xBP + 30h] ; pVCpu
969 mov [xSP + 020h], r10
970 mov rcx, [xBP + 010h] ; fResumeVM
971 mov rdx, [xBP + 018h] ; pCtx
972 mov r8, [xBP + 020h] ; pvUnused
973 mov r9, [xBP + 028h] ; pVM
974 call r11
975
976 mov r11d, eax ; save return value (xsave below uses eax)
977
978 ; Save the guest XMM registers.
979 mov r10, [xBP + 018h] ; pCtx
980 mov eax, [r10 + CPUMCTX.fXStateMask]
981 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
982 xor edx, edx
983 mov r10, [r10 + CPUMCTX.pXStateR0]
984 xsave [r10]
985
986 mov eax, r11d ; restore return value
987
988.restore_non_volatile_host_xmm_regs:
989 ; Load the non-volatile host XMM registers.
990 movdqa xmm6, [rsp + 040h + 000h]
991 movdqa xmm7, [rsp + 040h + 010h]
992 movdqa xmm8, [rsp + 040h + 020h]
993 movdqa xmm9, [rsp + 040h + 030h]
994 movdqa xmm10, [rsp + 040h + 040h]
995 movdqa xmm11, [rsp + 040h + 050h]
996 movdqa xmm12, [rsp + 040h + 060h]
997 movdqa xmm13, [rsp + 040h + 070h]
998 movdqa xmm14, [rsp + 040h + 080h]
999 movdqa xmm15, [rsp + 040h + 090h]
1000 ldmxcsr [rsp + 040h + 0a0h]
1001 leave
1002 ret
1003
1004 ;
1005 ; No XSAVE, load and save the guest XMM registers manually.
1006 ;
1007.guest_fpu_state_manually:
1008 ; Load the full guest XMM register state.
1009 mov r10, [r10 + CPUMCTX.pXStateR0]
1010 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1011 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1012 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1013 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1014 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1015 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1016 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1017 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1018 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1019 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1020 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1021 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1022 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1023 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1024 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1025 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1026 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1027
1028 ; Make the call (same as in the other case).
1029 mov r11, [xBP + 38h] ; pfnStartVM
1030 mov r10, [xBP + 30h] ; pVCpu
1031 mov [xSP + 020h], r10
1032 mov rcx, [xBP + 010h] ; fResumeVM
1033 mov rdx, [xBP + 018h] ; pCtx
1034 mov r8, [xBP + 020h] ; pvUnused
1035 mov r9, [xBP + 028h] ; pVM
1036 call r11
1037
1038 ; Save the guest XMM registers.
1039 mov r10, [xBP + 018h] ; pCtx
1040 mov r10, [r10 + CPUMCTX.pXStateR0]
1041 stmxcsr [r10 + X86FXSTATE.MXCSR]
1042 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1043 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1044 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1045 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1046 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1047 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1048 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1049 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1050 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1051 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1052 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1053 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1054 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1055 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1056 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1057 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1058 jmp .restore_non_volatile_host_xmm_regs
1059ENDPROC hmR0VMXStartVMWrapXMM
1060
1061;;
1062; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1063; load the guest ones when necessary.
1064;
1065; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
1066; PFNHMSVMVMRUN pfnVMRun);
1067;
1068; @returns eax
1069;
1070; @param HCPhysVmcbHost msc:rcx
1071; @param HCPhysVmcb msc:rdx
1072; @param pCtx msc:r8
1073; @param pVM msc:r9
1074; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
1075; @param pfnVMRun msc:[rbp+38h]
1076;
1077; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1078;
1079; @remarks Drivers shouldn't use AVX registers without saving+loading:
1080; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
1081; However the compiler docs have different idea:
1082; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
1083; We'll go with the former for now.
1084;
1085; ASSUMING 64-bit and windows for now.
1086ALIGNCODE(16)
1087BEGINPROC hmR0SVMRunWrapXMM
1088 push xBP
1089 mov xBP, xSP
1090 sub xSP, 0b0h + 040h ; don't bother optimizing the frame size
1091
1092 ; Spill input parameters.
1093 mov [xBP + 010h], rcx ; HCPhysVmcbHost
1094 mov [xBP + 018h], rdx ; HCPhysVmcb
1095 mov [xBP + 020h], r8 ; pCtx
1096 mov [xBP + 028h], r9 ; pVM
1097
1098 ; Ask CPUM whether we've started using the FPU yet.
1099 mov rcx, [xBP + 30h] ; pVCpu
1100 call NAME(CPUMIsGuestFPUStateActive)
1101 test al, al
1102 jnz .guest_fpu_state_active
1103
1104 ; No need to mess with XMM registers just call the start routine and return.
1105 mov r11, [xBP + 38h] ; pfnVMRun
1106 mov r10, [xBP + 30h] ; pVCpu
1107 mov [xSP + 020h], r10
1108 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1109 mov rdx, [xBP + 018h] ; HCPhysVmcb
1110 mov r8, [xBP + 020h] ; pCtx
1111 mov r9, [xBP + 028h] ; pVM
1112 call r11
1113
1114 leave
1115 ret
1116
1117ALIGNCODE(8)
1118.guest_fpu_state_active:
1119 ; Save the non-volatile host XMM registers.
1120 movdqa [rsp + 040h + 000h], xmm6
1121 movdqa [rsp + 040h + 010h], xmm7
1122 movdqa [rsp + 040h + 020h], xmm8
1123 movdqa [rsp + 040h + 030h], xmm9
1124 movdqa [rsp + 040h + 040h], xmm10
1125 movdqa [rsp + 040h + 050h], xmm11
1126 movdqa [rsp + 040h + 060h], xmm12
1127 movdqa [rsp + 040h + 070h], xmm13
1128 movdqa [rsp + 040h + 080h], xmm14
1129 movdqa [rsp + 040h + 090h], xmm15
1130 stmxcsr [rsp + 040h + 0a0h]
1131
1132 mov r10, [xBP + 020h] ; pCtx
1133 mov eax, [r10 + CPUMCTX.fXStateMask]
1134 test eax, eax
1135 jz .guest_fpu_state_manually
1136
1137 ;
1138 ; Using XSAVE.
1139 ;
1140 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1141 xor edx, edx
1142 mov r10, [r10 + CPUMCTX.pXStateR0]
1143 xrstor [r10]
1144
1145 ; Make the call (same as in the other case).
1146 mov r11, [xBP + 38h] ; pfnVMRun
1147 mov r10, [xBP + 30h] ; pVCpu
1148 mov [xSP + 020h], r10
1149 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1150 mov rdx, [xBP + 018h] ; HCPhysVmcb
1151 mov r8, [xBP + 020h] ; pCtx
1152 mov r9, [xBP + 028h] ; pVM
1153 call r11
1154
1155 mov r11d, eax ; save return value (xsave below uses eax)
1156
1157 ; Save the guest XMM registers.
1158 mov r10, [xBP + 020h] ; pCtx
1159 mov eax, [r10 + CPUMCTX.fXStateMask]
1160 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1161 xor edx, edx
1162 mov r10, [r10 + CPUMCTX.pXStateR0]
1163 xsave [r10]
1164
1165 mov eax, r11d ; restore return value
1166
1167.restore_non_volatile_host_xmm_regs:
1168 ; Load the non-volatile host XMM registers.
1169 movdqa xmm6, [rsp + 040h + 000h]
1170 movdqa xmm7, [rsp + 040h + 010h]
1171 movdqa xmm8, [rsp + 040h + 020h]
1172 movdqa xmm9, [rsp + 040h + 030h]
1173 movdqa xmm10, [rsp + 040h + 040h]
1174 movdqa xmm11, [rsp + 040h + 050h]
1175 movdqa xmm12, [rsp + 040h + 060h]
1176 movdqa xmm13, [rsp + 040h + 070h]
1177 movdqa xmm14, [rsp + 040h + 080h]
1178 movdqa xmm15, [rsp + 040h + 090h]
1179 ldmxcsr [rsp + 040h + 0a0h]
1180 leave
1181 ret
1182
1183 ;
1184 ; No XSAVE, load and save the guest XMM registers manually.
1185 ;
1186.guest_fpu_state_manually:
1187 ; Load the full guest XMM register state.
1188 mov r10, [r10 + CPUMCTX.pXStateR0]
1189 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1190 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1191 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1192 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1193 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1194 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1195 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1196 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1197 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1198 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1199 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1200 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1201 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1202 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1203 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1204 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1205 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1206
1207 ; Make the call (same as in the other case).
1208 mov r11, [xBP + 38h] ; pfnVMRun
1209 mov r10, [xBP + 30h] ; pVCpu
1210 mov [xSP + 020h], r10
1211 mov rcx, [xBP + 010h] ; HCPhysVmcbHost
1212 mov rdx, [xBP + 018h] ; HCPhysVmcb
1213 mov r8, [xBP + 020h] ; pCtx
1214 mov r9, [xBP + 028h] ; pVM
1215 call r11
1216
1217 ; Save the guest XMM registers.
1218 mov r10, [xBP + 020h] ; pCtx
1219 mov r10, [r10 + CPUMCTX.pXStateR0]
1220 stmxcsr [r10 + X86FXSTATE.MXCSR]
1221 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1222 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1223 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1224 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1225 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1226 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1227 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1228 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1229 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1230 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1231 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1232 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1233 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1234 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1235 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1236 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1237 jmp .restore_non_volatile_host_xmm_regs
1238ENDPROC hmR0SVMRunWrapXMM
1239
1240%endif ; VBOX_WITH_KERNEL_USING_XMM
1241
1242
1243%ifdef RT_ARCH_AMD64
1244;; @def RESTORE_STATE_VM64
1245; Macro restoring essential host state and updating guest state
1246; for 64-bit host, 64-bit guest for VT-x.
1247;
1248%macro RESTORE_STATE_VM64 0
1249 ; Restore base and limit of the IDTR & GDTR.
1250 %ifndef VMX_SKIP_IDTR
1251 lidt [xSP]
1252 add xSP, xCB * 2
1253 %endif
1254 %ifndef VMX_SKIP_GDTR
1255 lgdt [xSP]
1256 add xSP, xCB * 2
1257 %endif
1258
1259 push xDI
1260 %ifndef VMX_SKIP_TR
1261 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
1262 %else
1263 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
1264 %endif
1265
1266 mov qword [xDI + CPUMCTX.eax], rax
1267 mov rax, SPECTRE_FILLER64
1268 mov qword [xDI + CPUMCTX.ebx], rbx
1269 mov rbx, rax
1270 mov qword [xDI + CPUMCTX.ecx], rcx
1271 mov rcx, rax
1272 mov qword [xDI + CPUMCTX.edx], rdx
1273 mov rdx, rax
1274 mov qword [xDI + CPUMCTX.esi], rsi
1275 mov rsi, rax
1276 mov qword [xDI + CPUMCTX.ebp], rbp
1277 mov rbp, rax
1278 mov qword [xDI + CPUMCTX.r8], r8
1279 mov r8, rax
1280 mov qword [xDI + CPUMCTX.r9], r9
1281 mov r9, rax
1282 mov qword [xDI + CPUMCTX.r10], r10
1283 mov r10, rax
1284 mov qword [xDI + CPUMCTX.r11], r11
1285 mov r11, rax
1286 mov qword [xDI + CPUMCTX.r12], r12
1287 mov r12, rax
1288 mov qword [xDI + CPUMCTX.r13], r13
1289 mov r13, rax
1290 mov qword [xDI + CPUMCTX.r14], r14
1291 mov r14, rax
1292 mov qword [xDI + CPUMCTX.r15], r15
1293 mov r15, rax
1294 mov rax, cr2
1295 mov qword [xDI + CPUMCTX.cr2], rax
1296
1297 pop xAX ; The guest rdi we pushed above
1298 mov qword [xDI + CPUMCTX.edi], rax
1299
1300 ; Fight spectre.
1301 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT
1302
1303 %ifndef VMX_SKIP_TR
1304 ; Restore TSS selector; must mark it as not busy before using ltr!
1305 ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
1306 ; @todo get rid of sgdt
1307 pop xBX ; Saved TR
1308 sub xSP, xCB * 2
1309 sgdt [xSP]
1310 mov xAX, xBX
1311 and eax, X86_SEL_MASK_OFF_RPL ; mask away TI and RPL bits leaving only the descriptor offset
1312 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset
1313 and dword [xAX + 4], ~RT_BIT(9) ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
1314 ltr bx
1315 add xSP, xCB * 2
1316 %endif
1317
1318 pop xAX ; Saved LDTR
1319 cmp eax, 0
1320 je %%skip_ldt_write64
1321 lldt ax
1322
1323%%skip_ldt_write64:
1324 pop xSI ; pCtx (needed in rsi by the macros below)
1325
1326 ; Restore segment registers.
1327 MYPOPSEGS xAX, ax
1328
1329 ; Restore the host XCR0 if necessary.
1330 pop xCX
1331 test ecx, ecx
1332 jnz %%xcr0_after_skip
1333 pop xAX
1334 pop xDX
1335 xsetbv ; ecx is already zero.
1336%%xcr0_after_skip:
1337
1338 ; Restore general purpose registers.
1339 MYPOPAD
1340%endmacro
1341
1342
1343;;
1344; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
1345;
1346; @returns VBox status code
1347; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
1348; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
1349; @param pvUnused msc:r8, gcc:rdx Unused argument.
1350; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1351; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.
1352;
1353ALIGNCODE(16)
1354BEGINPROC VMXR0StartVM64
1355 push xBP
1356 mov xBP, xSP
1357
1358 pushf
1359 cli
1360
1361 ; Save all general purpose host registers.
1362 MYPUSHAD
1363
1364 ; First we have to save some final CPU context registers.
1365 lea r10, [.vmlaunch64_done wrt rip]
1366 mov rax, VMX_VMCS_HOST_RIP ; return address (too difficult to continue after VMLAUNCH?)
1367 vmwrite rax, r10
1368 ; Note: ASSUMES success!
1369
1370 ;
1371 ; Unify the input parameter registers.
1372 ;
1373%ifdef ASM_CALL64_GCC
1374 ; fResume already in rdi
1375 ; pCtx already in rsi
1376 mov rbx, rdx ; pvUnused
1377%else
1378 mov rdi, rcx ; fResume
1379 mov rsi, rdx ; pCtx
1380 mov rbx, r8 ; pvUnused
1381%endif
1382
1383 ;
1384 ; Save the host XCR0 and load the guest one if necessary.
1385 ; Note! Trashes rdx and rcx.
1386 ;
1387%ifdef ASM_CALL64_MSC
1388 mov rax, [xBP + 30h] ; pVCpu
1389%else
1390 mov rax, r8 ; pVCpu
1391%endif
1392 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1393 jz .xcr0_before_skip
1394
1395 xor ecx, ecx
1396 xgetbv ; save the host one on the stack
1397 push xDX
1398 push xAX
1399
1400 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest one
1401 mov edx, [xSI + CPUMCTX.aXcr + 4]
1402 xor ecx, ecx ; paranoia
1403 xsetbv
1404
1405 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
1406 jmp .xcr0_before_done
1407
1408.xcr0_before_skip:
1409 push 3fh ; indicate that we need not
1410.xcr0_before_done:
1411
1412 ;
1413 ; Save segment registers.
1414 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1415 ;
1416 MYPUSHSEGS xAX, ax
1417
1418 ; Save the pCtx pointer.
1419 push xSI
1420
1421 ; Save host LDTR.
1422 xor eax, eax
1423 sldt ax
1424 push xAX
1425
1426%ifndef VMX_SKIP_TR
1427 ; The host TR limit is reset to 0x67; save & restore it manually.
1428 str eax
1429 push xAX
1430%endif
1431
1432%ifndef VMX_SKIP_GDTR
1433 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1434 sub xSP, xCB * 2
1435 sgdt [xSP]
1436%endif
1437%ifndef VMX_SKIP_IDTR
1438 sub xSP, xCB * 2
1439 sidt [xSP]
1440%endif
1441
1442 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1443 mov rbx, qword [xSI + CPUMCTX.cr2]
1444 mov rdx, cr2
1445 cmp rbx, rdx
1446 je .skip_cr2_write
1447 mov cr2, rbx
1448
1449.skip_cr2_write:
1450 mov eax, VMX_VMCS_HOST_RSP
1451 vmwrite xAX, xSP
1452 ; Note: ASSUMES success!
1453 ; Don't mess with ESP anymore!!!
1454
1455 ; Fight spectre and similar.
1456 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY
1457
1458 ; Load guest general purpose registers.
1459 mov rax, qword [xSI + CPUMCTX.eax]
1460 mov rbx, qword [xSI + CPUMCTX.ebx]
1461 mov rcx, qword [xSI + CPUMCTX.ecx]
1462 mov rdx, qword [xSI + CPUMCTX.edx]
1463 mov rbp, qword [xSI + CPUMCTX.ebp]
1464 mov r8, qword [xSI + CPUMCTX.r8]
1465 mov r9, qword [xSI + CPUMCTX.r9]
1466 mov r10, qword [xSI + CPUMCTX.r10]
1467 mov r11, qword [xSI + CPUMCTX.r11]
1468 mov r12, qword [xSI + CPUMCTX.r12]
1469 mov r13, qword [xSI + CPUMCTX.r13]
1470 mov r14, qword [xSI + CPUMCTX.r14]
1471 mov r15, qword [xSI + CPUMCTX.r15]
1472
1473 ; Resume or start VM?
1474 cmp xDI, 0 ; fResume
1475
1476 ; Load guest rdi & rsi.
1477 mov rdi, qword [xSI + CPUMCTX.edi]
1478 mov rsi, qword [xSI + CPUMCTX.esi]
1479
1480 je .vmlaunch64_launch
1481
1482 vmresume
1483 jc near .vmxstart64_invalid_vmcs_ptr
1484 jz near .vmxstart64_start_failed
1485 jmp .vmlaunch64_done; ; here if vmresume detected a failure
1486
1487.vmlaunch64_launch:
1488 vmlaunch
1489 jc near .vmxstart64_invalid_vmcs_ptr
1490 jz near .vmxstart64_start_failed
1491 jmp .vmlaunch64_done; ; here if vmlaunch detected a failure
1492
1493ALIGNCODE(16)
1494.vmlaunch64_done:
1495 RESTORE_STATE_VM64
1496 mov eax, VINF_SUCCESS
1497
1498.vmstart64_end:
1499 popf
1500 pop xBP
1501 ret
1502
1503.vmxstart64_invalid_vmcs_ptr:
1504 RESTORE_STATE_VM64
1505 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1506 jmp .vmstart64_end
1507
1508.vmxstart64_start_failed:
1509 RESTORE_STATE_VM64
1510 mov eax, VERR_VMX_UNABLE_TO_START_VM
1511 jmp .vmstart64_end
1512ENDPROC VMXR0StartVM64
1513%endif ; RT_ARCH_AMD64
1514
1515
1516;;
1517; Clears the MDS buffers using VERW.
1518ALIGNCODE(16)
1519BEGINPROC hmR0MdsClear
1520 sub xSP, xCB
1521 mov [xSP], ds
1522 verw [xSP]
1523 add xSP, xCB
1524 ret
1525ENDPROC hmR0MdsClear
1526
1527
1528%ifdef RT_ARCH_AMD64
1529;;
1530; Prepares for and executes VMRUN (32-bit and 64-bit guests).
1531;
1532; @returns VBox status code
1533; @param HCPhysVmcbHost msc:rcx,gcc:rdi Physical address of host VMCB.
1534; @param HCPhysVmcb msc:rdx,gcc:rsi Physical address of guest VMCB.
1535; @param pCtx msc:r8,gcc:rdx Pointer to the guest-CPU context.
1536; @param pVM msc:r9,gcc:rcx The cross context VM structure.
1537; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1538;
1539ALIGNCODE(16)
1540BEGINPROC SVMR0VMRun
1541 ; Fake a cdecl stack frame
1542 %ifdef ASM_CALL64_GCC
1543 push r8 ; pVCpu
1544 push rcx ; pVM
1545 push rdx ; pCtx
1546 push rsi ; HCPhysVmcb
1547 push rdi ; HCPhysVmcbHost
1548 %else
1549 mov rax, [rsp + 28h]
1550 push rax ; rbp + 30h pVCpu
1551 push r9 ; rbp + 28h pVM
1552 push r8 ; rbp + 20h pCtx
1553 push rdx ; rbp + 18h HCPhysVmcb
1554 push rcx ; rbp + 10h HCPhysVmcbHost
1555 %endif
1556 push 0 ; rbp + 08h "fake ret addr"
1557 push rbp ; rbp + 00h
1558 mov rbp, rsp
1559 pushf
1560
1561 ; Manual save and restore:
1562 ; - General purpose registers except RIP, RSP, RAX
1563 ;
1564 ; Trashed:
1565 ; - CR2 (we don't care)
1566 ; - LDTR (reset to 0)
1567 ; - DRx (presumably not changed at all)
1568 ; - DR7 (reset to 0x400)
1569
1570 ; Save all general purpose host registers.
1571 MYPUSHAD
1572
1573 ; Load pCtx into xSI.
1574 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
1575
1576 ; Save the host XCR0 and load the guest one if necessary.
1577 mov rax, [xBP + 30h] ; pVCpu
1578 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1579 jz .xcr0_before_skip
1580
1581 xor ecx, ecx
1582 xgetbv ; save the host XCR0 on the stack
1583 push xDX
1584 push xAX
1585
1586 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1587 mov eax, [xSI + CPUMCTX.aXcr] ; load the guest XCR0
1588 mov edx, [xSI + CPUMCTX.aXcr + 4]
1589 xor ecx, ecx ; paranoia
1590 xsetbv
1591
1592 push 0 ; indicate that we must restore XCR0 (popped into ecx, thus 0)
1593 jmp .xcr0_before_done
1594
1595.xcr0_before_skip:
1596 push 3fh ; indicate that we need not restore XCR0
1597.xcr0_before_done:
1598
1599 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1600 push rsi
1601
1602 ; Save host fs, gs, sysenter msr etc.
1603 mov rax, [rbp + xCB * 2] ; HCPhysVmcbHost (64 bits physical address; x86: take low dword only)
1604 push rax ; save for the vmload after vmrun
1605 vmsave
1606
1607 ; Fight spectre.
1608 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY
1609
1610 ; Setup rax for VMLOAD.
1611 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only)
1612
1613 ; Load guest general purpose registers (rax is loaded from the VMCB by VMRUN).
1614 mov rbx, qword [xSI + CPUMCTX.ebx]
1615 mov rcx, qword [xSI + CPUMCTX.ecx]
1616 mov rdx, qword [xSI + CPUMCTX.edx]
1617 mov rdi, qword [xSI + CPUMCTX.edi]
1618 mov rbp, qword [xSI + CPUMCTX.ebp]
1619 mov r8, qword [xSI + CPUMCTX.r8]
1620 mov r9, qword [xSI + CPUMCTX.r9]
1621 mov r10, qword [xSI + CPUMCTX.r10]
1622 mov r11, qword [xSI + CPUMCTX.r11]
1623 mov r12, qword [xSI + CPUMCTX.r12]
1624 mov r13, qword [xSI + CPUMCTX.r13]
1625 mov r14, qword [xSI + CPUMCTX.r14]
1626 mov r15, qword [xSI + CPUMCTX.r15]
1627 mov rsi, qword [xSI + CPUMCTX.esi]
1628
1629 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1630 clgi
1631 sti
1632
1633 ; Load guest FS, GS, Sysenter MSRs etc.
1634 vmload
1635
1636 ; Run the VM.
1637 vmrun
1638
1639 ; Save guest fs, gs, sysenter msr etc.
1640 vmsave
1641
1642 ; Load host fs, gs, sysenter msr etc.
1643 pop rax ; load HCPhysVmcbHost (pushed above)
1644 vmload
1645
1646 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1647 cli
1648 stgi
1649
1650 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
1651 pop rax
1652
1653 mov qword [rax + CPUMCTX.ebx], rbx
1654 mov rbx, SPECTRE_FILLER64
1655 mov qword [rax + CPUMCTX.ecx], rcx
1656 mov rcx, rbx
1657 mov qword [rax + CPUMCTX.edx], rdx
1658 mov rdx, rbx
1659 mov qword [rax + CPUMCTX.esi], rsi
1660 mov rsi, rbx
1661 mov qword [rax + CPUMCTX.edi], rdi
1662 mov rdi, rbx
1663 mov qword [rax + CPUMCTX.ebp], rbp
1664 mov rbp, rbx
1665 mov qword [rax + CPUMCTX.r8], r8
1666 mov r8, rbx
1667 mov qword [rax + CPUMCTX.r9], r9
1668 mov r9, rbx
1669 mov qword [rax + CPUMCTX.r10], r10
1670 mov r10, rbx
1671 mov qword [rax + CPUMCTX.r11], r11
1672 mov r11, rbx
1673 mov qword [rax + CPUMCTX.r12], r12
1674 mov r12, rbx
1675 mov qword [rax + CPUMCTX.r13], r13
1676 mov r13, rbx
1677 mov qword [rax + CPUMCTX.r14], r14
1678 mov r14, rbx
1679 mov qword [rax + CPUMCTX.r15], r15
1680 mov r15, rbx
1681
1682 ; Fight spectre. Note! Trashes rax!
1683 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT
1684
1685 ; Restore the host xcr0 if necessary.
1686 pop xCX
1687 test ecx, ecx
1688 jnz .xcr0_after_skip
1689 pop xAX
1690 pop xDX
1691 xsetbv ; ecx is already zero
1692.xcr0_after_skip:
1693
1694 ; Restore host general purpose registers.
1695 MYPOPAD
1696
1697 mov eax, VINF_SUCCESS
1698
1699 popf
1700 pop rbp
1701 add rsp, 6 * xCB
1702 ret
1703ENDPROC SVMR0VMRun
1704%endif ; RT_ARCH_AMD64
1705
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette