VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 66989

Last change on this file since 66989 was 66878, checked in by vboxsync, 7 years ago

CPUM,HM: Added CPUMRZFpuStateActualizeAvxForRead and fixed missing MXCSR availability for the SSE variant. Should save MXCSR when VBOX_WITH_KERNEL_USING_XMM is define.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.2 KB
Line 
1; $Id: HMR0A.asm 66878 2017-05-12 12:40:17Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines
4;
5
6;
7; Copyright (C) 2006-2016 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*********************************************************************************************************************************
46;* Defined Constants And Macros *
47;*********************************************************************************************************************************
48;; The offset of the XMM registers in X86FXSTATE.
49; Use define because I'm too lazy to convert the struct.
50%define XMM_OFF_IN_X86FXSTATE 160
51
52;;
53; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation
54;
55%ifdef RT_ARCH_AMD64
56 %define VMX_SKIP_GDTR
57 %define VMX_SKIP_TR
58 %define VBOX_SKIP_RESTORE_SEG
59 %ifdef RT_OS_DARWIN
60 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
61 ; risk loading a stale LDT value or something invalid.
62 %define HM_64_BIT_USE_NULL_SEL
63 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
64 ; See @bugref{6875}.
65 %else
66 %define VMX_SKIP_IDTR
67 %endif
68%endif
69
70;; @def MYPUSHAD
71; Macro generating an equivalent to pushad
72
73;; @def MYPOPAD
74; Macro generating an equivalent to popad
75
76;; @def MYPUSHSEGS
77; Macro saving all segment registers on the stack.
78; @param 1 full width register name
79; @param 2 16-bit register name for \a 1.
80
81;; @def MYPOPSEGS
82; Macro restoring all segment registers on the stack
83; @param 1 full width register name
84; @param 2 16-bit register name for \a 1.
85
86%ifdef ASM_CALL64_GCC
87 %macro MYPUSHAD64 0
88 push r15
89 push r14
90 push r13
91 push r12
92 push rbx
93 %endmacro
94 %macro MYPOPAD64 0
95 pop rbx
96 pop r12
97 pop r13
98 pop r14
99 pop r15
100 %endmacro
101
102%else ; ASM_CALL64_MSC
103 %macro MYPUSHAD64 0
104 push r15
105 push r14
106 push r13
107 push r12
108 push rbx
109 push rsi
110 push rdi
111 %endmacro
112 %macro MYPOPAD64 0
113 pop rdi
114 pop rsi
115 pop rbx
116 pop r12
117 pop r13
118 pop r14
119 pop r15
120 %endmacro
121%endif
122
123%ifdef VBOX_SKIP_RESTORE_SEG
124 %macro MYPUSHSEGS64 2
125 %endmacro
126
127 %macro MYPOPSEGS64 2
128 %endmacro
129%else ; !VBOX_SKIP_RESTORE_SEG
130 ; trashes, rax, rdx & rcx
131 %macro MYPUSHSEGS64 2
132 %ifndef HM_64_BIT_USE_NULL_SEL
133 mov %2, es
134 push %1
135 mov %2, ds
136 push %1
137 %endif
138
139 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
140 mov ecx, MSR_K8_FS_BASE
141 rdmsr
142 push rdx
143 push rax
144 %ifndef HM_64_BIT_USE_NULL_SEL
145 push fs
146 %endif
147
148 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
149 mov ecx, MSR_K8_GS_BASE
150 rdmsr
151 push rdx
152 push rax
153 %ifndef HM_64_BIT_USE_NULL_SEL
154 push gs
155 %endif
156 %endmacro
157
158 ; trashes, rax, rdx & rcx
159 %macro MYPOPSEGS64 2
160 ; Note: do not step through this code with a debugger!
161 %ifndef HM_64_BIT_USE_NULL_SEL
162 xor eax, eax
163 mov ds, ax
164 mov es, ax
165 mov fs, ax
166 mov gs, ax
167 %endif
168
169 %ifndef HM_64_BIT_USE_NULL_SEL
170 pop gs
171 %endif
172 pop rax
173 pop rdx
174 mov ecx, MSR_K8_GS_BASE
175 wrmsr
176
177 %ifndef HM_64_BIT_USE_NULL_SEL
178 pop fs
179 %endif
180 pop rax
181 pop rdx
182 mov ecx, MSR_K8_FS_BASE
183 wrmsr
184 ; Now it's safe to step again
185
186 %ifndef HM_64_BIT_USE_NULL_SEL
187 pop %1
188 mov ds, %2
189 pop %1
190 mov es, %2
191 %endif
192 %endmacro
193%endif ; VBOX_SKIP_RESTORE_SEG
194
195%macro MYPUSHAD32 0
196 pushad
197%endmacro
198%macro MYPOPAD32 0
199 popad
200%endmacro
201
202%macro MYPUSHSEGS32 2
203 push ds
204 push es
205 push fs
206 push gs
207%endmacro
208%macro MYPOPSEGS32 2
209 pop gs
210 pop fs
211 pop es
212 pop ds
213%endmacro
214
215%ifdef RT_ARCH_AMD64
216 %define MYPUSHAD MYPUSHAD64
217 %define MYPOPAD MYPOPAD64
218 %define MYPUSHSEGS MYPUSHSEGS64
219 %define MYPOPSEGS MYPOPSEGS64
220%else
221 %define MYPUSHAD MYPUSHAD32
222 %define MYPOPAD MYPOPAD32
223 %define MYPUSHSEGS MYPUSHSEGS32
224 %define MYPOPSEGS MYPOPSEGS32
225%endif
226
227
228;*********************************************************************************************************************************
229;* External Symbols *
230;*********************************************************************************************************************************
231%ifdef VBOX_WITH_KERNEL_USING_XMM
232extern NAME(CPUMIsGuestFPUStateActive)
233%endif
234
235
236BEGINCODE
237
238
239;/**
240; * Restores host-state fields.
241; *
242; * @returns VBox status code
243; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
244; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
245; */
246ALIGNCODE(16)
247BEGINPROC VMXRestoreHostState
248%ifdef RT_ARCH_AMD64
249 %ifndef ASM_CALL64_GCC
250 ; Use GCC's input registers since we'll be needing both rcx and rdx further
251 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
252 ; RDI and RSI since MSC preserve the two latter registers.
253 mov r10, rdi
254 mov r11, rsi
255 mov rdi, rcx
256 mov rsi, rdx
257 %endif
258
259 test edi, VMX_RESTORE_HOST_GDTR
260 jz .test_idtr
261 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
262
263.test_idtr:
264 test edi, VMX_RESTORE_HOST_IDTR
265 jz .test_ds
266 lidt [rsi + VMXRESTOREHOST.HostIdtr]
267
268.test_ds:
269 test edi, VMX_RESTORE_HOST_SEL_DS
270 jz .test_es
271 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
272 mov ds, eax
273
274.test_es:
275 test edi, VMX_RESTORE_HOST_SEL_ES
276 jz .test_tr
277 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
278 mov es, eax
279
280.test_tr:
281 test edi, VMX_RESTORE_HOST_SEL_TR
282 jz .test_fs
283 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
284 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
285 mov ax, dx
286 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
287 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
288 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY
289 jnz .gdt_readonly
290 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
291 ltr dx
292 jmp short .test_fs
293.gdt_readonly:
294 mov rcx, cr0
295 mov r9, rcx
296 and rcx, ~X86_CR0_WP
297 mov cr0, rcx
298 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
299 ltr dx
300 mov cr0, r9
301
302.test_fs:
303 ;
304 ; When restoring the selector values for FS and GS, we'll temporarily trash
305 ; the base address (at least the high 32-bit bits, but quite possibly the
306 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
307 ; restores the base correctly when leaving guest mode, but not the selector
308 ; value, so there is little problem with interrupts being enabled prior to
309 ; this restore job.)
310 ; We'll disable ints once for both FS and GS as that's probably faster.
311 ;
312 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
313 jz .restore_success
314 pushfq
315 cli ; (see above)
316
317 test edi, VMX_RESTORE_HOST_SEL_FS
318 jz .test_gs
319 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
320 mov fs, eax
321 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
322 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
323 mov ecx, MSR_K8_FS_BASE
324 wrmsr
325
326.test_gs:
327 test edi, VMX_RESTORE_HOST_SEL_GS
328 jz .restore_flags
329 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
330 mov gs, eax
331 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
332 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
333 mov ecx, MSR_K8_GS_BASE
334 wrmsr
335
336.restore_flags:
337 popfq
338
339.restore_success:
340 mov eax, VINF_SUCCESS
341 %ifndef ASM_CALL64_GCC
342 ; Restore RDI and RSI on MSC.
343 mov rdi, r10
344 mov rsi, r11
345 %endif
346%else ; RT_ARCH_X86
347 mov eax, VERR_NOT_IMPLEMENTED
348%endif
349 ret
350ENDPROC VMXRestoreHostState
351
352
353;/**
354; * Dispatches an NMI to the host.
355; */
356ALIGNCODE(16)
357BEGINPROC VMXDispatchHostNmi
358 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
359 ret
360ENDPROC VMXDispatchHostNmi
361
362
363;/**
364; * Executes VMWRITE, 64-bit value.
365; *
366; * @returns VBox status code.
367; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
368; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
369; */
370ALIGNCODE(16)
371BEGINPROC VMXWriteVmcs64
372%ifdef RT_ARCH_AMD64
373 %ifdef ASM_CALL64_GCC
374 and edi, 0ffffffffh
375 xor rax, rax
376 vmwrite rdi, rsi
377 %else
378 and ecx, 0ffffffffh
379 xor rax, rax
380 vmwrite rcx, rdx
381 %endif
382%else ; RT_ARCH_X86
383 mov ecx, [esp + 4] ; idxField
384 lea edx, [esp + 8] ; &u64Data
385 vmwrite ecx, [edx] ; low dword
386 jz .done
387 jc .done
388 inc ecx
389 xor eax, eax
390 vmwrite ecx, [edx + 4] ; high dword
391.done:
392%endif ; RT_ARCH_X86
393 jnc .valid_vmcs
394 mov eax, VERR_VMX_INVALID_VMCS_PTR
395 ret
396.valid_vmcs:
397 jnz .the_end
398 mov eax, VERR_VMX_INVALID_VMCS_FIELD
399.the_end:
400 ret
401ENDPROC VMXWriteVmcs64
402
403
404;/**
405; * Executes VMREAD, 64-bit value.
406; *
407; * @returns VBox status code.
408; * @param idxField VMCS index.
409; * @param pData Where to store VM field value.
410; */
411;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
412ALIGNCODE(16)
413BEGINPROC VMXReadVmcs64
414%ifdef RT_ARCH_AMD64
415 %ifdef ASM_CALL64_GCC
416 and edi, 0ffffffffh
417 xor rax, rax
418 vmread [rsi], rdi
419 %else
420 and ecx, 0ffffffffh
421 xor rax, rax
422 vmread [rdx], rcx
423 %endif
424%else ; RT_ARCH_X86
425 mov ecx, [esp + 4] ; idxField
426 mov edx, [esp + 8] ; pData
427 vmread [edx], ecx ; low dword
428 jz .done
429 jc .done
430 inc ecx
431 xor eax, eax
432 vmread [edx + 4], ecx ; high dword
433.done:
434%endif ; RT_ARCH_X86
435 jnc .valid_vmcs
436 mov eax, VERR_VMX_INVALID_VMCS_PTR
437 ret
438.valid_vmcs:
439 jnz .the_end
440 mov eax, VERR_VMX_INVALID_VMCS_FIELD
441.the_end:
442 ret
443ENDPROC VMXReadVmcs64
444
445
446;/**
447; * Executes VMREAD, 32-bit value.
448; *
449; * @returns VBox status code.
450; * @param idxField VMCS index.
451; * @param pu32Data Where to store VM field value.
452; */
453;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
454ALIGNCODE(16)
455BEGINPROC VMXReadVmcs32
456%ifdef RT_ARCH_AMD64
457 %ifdef ASM_CALL64_GCC
458 and edi, 0ffffffffh
459 xor rax, rax
460 vmread r10, rdi
461 mov [rsi], r10d
462 %else
463 and ecx, 0ffffffffh
464 xor rax, rax
465 vmread r10, rcx
466 mov [rdx], r10d
467 %endif
468%else ; RT_ARCH_X86
469 mov ecx, [esp + 4] ; idxField
470 mov edx, [esp + 8] ; pu32Data
471 xor eax, eax
472 vmread [edx], ecx
473%endif ; RT_ARCH_X86
474 jnc .valid_vmcs
475 mov eax, VERR_VMX_INVALID_VMCS_PTR
476 ret
477.valid_vmcs:
478 jnz .the_end
479 mov eax, VERR_VMX_INVALID_VMCS_FIELD
480.the_end:
481 ret
482ENDPROC VMXReadVmcs32
483
484
485;/**
486; * Executes VMWRITE, 32-bit value.
487; *
488; * @returns VBox status code.
489; * @param idxField VMCS index.
490; * @param u32Data Where to store VM field value.
491; */
492;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
493ALIGNCODE(16)
494BEGINPROC VMXWriteVmcs32
495%ifdef RT_ARCH_AMD64
496 %ifdef ASM_CALL64_GCC
497 and edi, 0ffffffffh
498 and esi, 0ffffffffh
499 xor rax, rax
500 vmwrite rdi, rsi
501 %else
502 and ecx, 0ffffffffh
503 and edx, 0ffffffffh
504 xor rax, rax
505 vmwrite rcx, rdx
506 %endif
507%else ; RT_ARCH_X86
508 mov ecx, [esp + 4] ; idxField
509 mov edx, [esp + 8] ; u32Data
510 xor eax, eax
511 vmwrite ecx, edx
512%endif ; RT_ARCH_X86
513 jnc .valid_vmcs
514 mov eax, VERR_VMX_INVALID_VMCS_PTR
515 ret
516.valid_vmcs:
517 jnz .the_end
518 mov eax, VERR_VMX_INVALID_VMCS_FIELD
519.the_end:
520 ret
521ENDPROC VMXWriteVmcs32
522
523
524;/**
525; * Executes VMXON.
526; *
527; * @returns VBox status code.
528; * @param HCPhysVMXOn Physical address of VMXON structure.
529; */
530;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
531BEGINPROC VMXEnable
532%ifdef RT_ARCH_AMD64
533 xor rax, rax
534 %ifdef ASM_CALL64_GCC
535 push rdi
536 %else
537 push rcx
538 %endif
539 vmxon [rsp]
540%else ; RT_ARCH_X86
541 xor eax, eax
542 vmxon [esp + 4]
543%endif ; RT_ARCH_X86
544 jnc .good
545 mov eax, VERR_VMX_INVALID_VMXON_PTR
546 jmp .the_end
547
548.good:
549 jnz .the_end
550 mov eax, VERR_VMX_VMXON_FAILED
551
552.the_end:
553%ifdef RT_ARCH_AMD64
554 add rsp, 8
555%endif
556 ret
557ENDPROC VMXEnable
558
559
560;/**
561; * Executes VMXOFF.
562; */
563;DECLASM(void) VMXDisable(void);
564BEGINPROC VMXDisable
565 vmxoff
566.the_end:
567 ret
568ENDPROC VMXDisable
569
570
571;/**
572; * Executes VMCLEAR.
573; *
574; * @returns VBox status code.
575; * @param HCPhysVmcs Physical address of VM control structure.
576; */
577;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
578ALIGNCODE(16)
579BEGINPROC VMXClearVmcs
580%ifdef RT_ARCH_AMD64
581 xor rax, rax
582 %ifdef ASM_CALL64_GCC
583 push rdi
584 %else
585 push rcx
586 %endif
587 vmclear [rsp]
588%else ; RT_ARCH_X86
589 xor eax, eax
590 vmclear [esp + 4]
591%endif ; RT_ARCH_X86
592 jnc .the_end
593 mov eax, VERR_VMX_INVALID_VMCS_PTR
594.the_end:
595%ifdef RT_ARCH_AMD64
596 add rsp, 8
597%endif
598 ret
599ENDPROC VMXClearVmcs
600
601
602;/**
603; * Executes VMPTRLD.
604; *
605; * @returns VBox status code.
606; * @param HCPhysVmcs Physical address of VMCS structure.
607; */
608;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
609ALIGNCODE(16)
610BEGINPROC VMXActivateVmcs
611%ifdef RT_ARCH_AMD64
612 xor rax, rax
613 %ifdef ASM_CALL64_GCC
614 push rdi
615 %else
616 push rcx
617 %endif
618 vmptrld [rsp]
619%else
620 xor eax, eax
621 vmptrld [esp + 4]
622%endif
623 jnc .the_end
624 mov eax, VERR_VMX_INVALID_VMCS_PTR
625.the_end:
626%ifdef RT_ARCH_AMD64
627 add rsp, 8
628%endif
629 ret
630ENDPROC VMXActivateVmcs
631
632
633;/**
634; * Executes VMPTRST.
635; *
636; * @returns VBox status code.
637; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
638; */
639;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
640BEGINPROC VMXGetActivatedVmcs
641%ifdef RT_OS_OS2
642 mov eax, VERR_NOT_SUPPORTED
643 ret
644%else
645 %ifdef RT_ARCH_AMD64
646 %ifdef ASM_CALL64_GCC
647 vmptrst qword [rdi]
648 %else
649 vmptrst qword [rcx]
650 %endif
651 %else
652 vmptrst qword [esp+04h]
653 %endif
654 xor eax, eax
655.the_end:
656 ret
657%endif
658ENDPROC VMXGetActivatedVmcs
659
660;/**
661; * Invalidate a page using INVEPT.
662; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
663; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
664; */
665;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
666BEGINPROC VMXR0InvEPT
667%ifdef RT_ARCH_AMD64
668 %ifdef ASM_CALL64_GCC
669 and edi, 0ffffffffh
670 xor rax, rax
671; invept rdi, qword [rsi]
672 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
673 %else
674 and ecx, 0ffffffffh
675 xor rax, rax
676; invept rcx, qword [rdx]
677 DB 0x66, 0x0F, 0x38, 0x80, 0xA
678 %endif
679%else
680 mov ecx, [esp + 4]
681 mov edx, [esp + 8]
682 xor eax, eax
683; invept ecx, qword [edx]
684 DB 0x66, 0x0F, 0x38, 0x80, 0xA
685%endif
686 jnc .valid_vmcs
687 mov eax, VERR_VMX_INVALID_VMCS_PTR
688 ret
689.valid_vmcs:
690 jnz .the_end
691 mov eax, VERR_INVALID_PARAMETER
692.the_end:
693 ret
694ENDPROC VMXR0InvEPT
695
696
697;/**
698; * Invalidate a page using invvpid
699; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
700; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
701; */
702;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
703BEGINPROC VMXR0InvVPID
704%ifdef RT_ARCH_AMD64
705 %ifdef ASM_CALL64_GCC
706 and edi, 0ffffffffh
707 xor rax, rax
708; invvpid rdi, qword [rsi]
709 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
710 %else
711 and ecx, 0ffffffffh
712 xor rax, rax
713; invvpid rcx, qword [rdx]
714 DB 0x66, 0x0F, 0x38, 0x81, 0xA
715 %endif
716%else
717 mov ecx, [esp + 4]
718 mov edx, [esp + 8]
719 xor eax, eax
720; invvpid ecx, qword [edx]
721 DB 0x66, 0x0F, 0x38, 0x81, 0xA
722%endif
723 jnc .valid_vmcs
724 mov eax, VERR_VMX_INVALID_VMCS_PTR
725 ret
726.valid_vmcs:
727 jnz .the_end
728 mov eax, VERR_INVALID_PARAMETER
729.the_end:
730 ret
731ENDPROC VMXR0InvVPID
732
733
734%if GC_ARCH_BITS == 64
735;;
736; Executes INVLPGA
737;
738; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
739; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
740;
741;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
742BEGINPROC SVMR0InvlpgA
743%ifdef RT_ARCH_AMD64
744 %ifdef ASM_CALL64_GCC
745 mov rax, rdi
746 mov rcx, rsi
747 %else
748 mov rax, rcx
749 mov rcx, rdx
750 %endif
751%else
752 mov eax, [esp + 4]
753 mov ecx, [esp + 0Ch]
754%endif
755 invlpga [xAX], ecx
756 ret
757ENDPROC SVMR0InvlpgA
758
759%else ; GC_ARCH_BITS != 64
760;;
761; Executes INVLPGA
762;
763; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
764; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
765;
766;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
767BEGINPROC SVMR0InvlpgA
768%ifdef RT_ARCH_AMD64
769 %ifdef ASM_CALL64_GCC
770 movzx rax, edi
771 mov ecx, esi
772 %else
773 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
774 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
775 ; values also set the upper 32 bits of the register to zero. Consequently
776 ; there is no need for an instruction movzlq.''
777 mov eax, ecx
778 mov ecx, edx
779 %endif
780%else
781 mov eax, [esp + 4]
782 mov ecx, [esp + 8]
783%endif
784 invlpga [xAX], ecx
785 ret
786ENDPROC SVMR0InvlpgA
787
788%endif ; GC_ARCH_BITS != 64
789
790
791%ifdef VBOX_WITH_KERNEL_USING_XMM
792
793;;
794; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
795; load the guest ones when necessary.
796;
797; @cproto DECLASM(int) HMR0VMXStartVMhmR0DumpDescriptorM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
798;
799; @returns eax
800;
801; @param fResumeVM msc:rcx
802; @param pCtx msc:rdx
803; @param pVMCSCache msc:r8
804; @param pVM msc:r9
805; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
806; @param pfnStartVM msc:[rbp+38h]
807;
808; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.
809;
810; @remarks Drivers shouldn't use AVX registers without saving+loading:
811; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
812; However the compiler docs have different idea:
813; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
814; We'll go with the former for now.
815;
816; ASSUMING 64-bit and windows for now.
817;
818ALIGNCODE(16)
819BEGINPROC hmR0VMXStartVMWrapXMM
820 push xBP
821 mov xBP, xSP
822 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
823
824 ; spill input parameters.
825 mov [xBP + 010h], rcx ; fResumeVM
826 mov [xBP + 018h], rdx ; pCtx
827 mov [xBP + 020h], r8 ; pVMCSCache
828 mov [xBP + 028h], r9 ; pVM
829
830 ; Ask CPUM whether we've started using the FPU yet.
831 mov rcx, [xBP + 30h] ; pVCpu
832 call NAME(CPUMIsGuestFPUStateActive)
833 test al, al
834 jnz .guest_fpu_state_active
835
836 ; No need to mess with XMM registers just call the start routine and return.
837 mov r11, [xBP + 38h] ; pfnStartVM
838 mov r10, [xBP + 30h] ; pVCpu
839 mov [xSP + 020h], r10
840 mov rcx, [xBP + 010h] ; fResumeVM
841 mov rdx, [xBP + 018h] ; pCtx
842 mov r8, [xBP + 020h] ; pVMCSCache
843 mov r9, [xBP + 028h] ; pVM
844 call r11
845
846 leave
847 ret
848
849ALIGNCODE(8)
850.guest_fpu_state_active:
851 ; Save the non-volatile host XMM registers.
852 movdqa [rsp + 040h + 000h], xmm6
853 movdqa [rsp + 040h + 010h], xmm7
854 movdqa [rsp + 040h + 020h], xmm8
855 movdqa [rsp + 040h + 030h], xmm9
856 movdqa [rsp + 040h + 040h], xmm10
857 movdqa [rsp + 040h + 050h], xmm11
858 movdqa [rsp + 040h + 060h], xmm12
859 movdqa [rsp + 040h + 070h], xmm13
860 movdqa [rsp + 040h + 080h], xmm14
861 movdqa [rsp + 040h + 090h], xmm15
862 stmxcsr [rsp + 040h + 0a0h]
863
864 mov r10, [xBP + 018h] ; pCtx
865 mov eax, [r10 + CPUMCTX.fXStateMask]
866 test eax, eax
867 jz .guest_fpu_state_manually
868
869 ;
870 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.
871 ;
872 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
873 xor edx, edx
874 mov r10, [r10 + CPUMCTX.pXStateR0]
875 xrstor [r10]
876
877 ; Make the call (same as in the other case ).
878 mov r11, [xBP + 38h] ; pfnStartVM
879 mov r10, [xBP + 30h] ; pVCpu
880 mov [xSP + 020h], r10
881 mov rcx, [xBP + 010h] ; fResumeVM
882 mov rdx, [xBP + 018h] ; pCtx
883 mov r8, [xBP + 020h] ; pVMCSCache
884 mov r9, [xBP + 028h] ; pVM
885 call r11
886
887 mov r11d, eax ; save return value (xsave below uses eax)
888
889 ; Save the guest XMM registers.
890 mov r10, [xBP + 018h] ; pCtx
891 mov eax, [r10 + CPUMCTX.fXStateMask]
892 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
893 xor edx, edx
894 mov r10, [r10 + CPUMCTX.pXStateR0]
895 xsave [r10]
896
897 mov eax, r11d ; restore return value.
898
899.restore_non_volatile_host_xmm_regs:
900 ; Load the non-volatile host XMM registers.
901 movdqa xmm6, [rsp + 040h + 000h]
902 movdqa xmm7, [rsp + 040h + 010h]
903 movdqa xmm8, [rsp + 040h + 020h]
904 movdqa xmm9, [rsp + 040h + 030h]
905 movdqa xmm10, [rsp + 040h + 040h]
906 movdqa xmm11, [rsp + 040h + 050h]
907 movdqa xmm12, [rsp + 040h + 060h]
908 movdqa xmm13, [rsp + 040h + 070h]
909 movdqa xmm14, [rsp + 040h + 080h]
910 movdqa xmm15, [rsp + 040h + 090h]
911 ldmxcsr [rsp + 040h + 0a0h]
912 leave
913 ret
914
915 ;
916 ; No XSAVE, load and save the guest XMM registers manually.
917 ;
918.guest_fpu_state_manually:
919 ; Load the full guest XMM register state.
920 mov r10, [r10 + CPUMCTX.pXStateR0]
921 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
922 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
923 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
924 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
925 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
926 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
927 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
928 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
929 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
930 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
931 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
932 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
933 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
934 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
935 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
936 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
937 ldmxcsr [r10 + X86FXSTATE.MXCSR]
938
939 ; Make the call (same as in the other case ).
940 mov r11, [xBP + 38h] ; pfnStartVM
941 mov r10, [xBP + 30h] ; pVCpu
942 mov [xSP + 020h], r10
943 mov rcx, [xBP + 010h] ; fResumeVM
944 mov rdx, [xBP + 018h] ; pCtx
945 mov r8, [xBP + 020h] ; pVMCSCache
946 mov r9, [xBP + 028h] ; pVM
947 call r11
948
949 ; Save the guest XMM registers.
950 mov r10, [xBP + 018h] ; pCtx
951 mov r10, [r10 + CPUMCTX.pXStateR0]
952 stmxcsr [r10 + X86FXSTATE.MXCSR]
953 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
954 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
955 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
956 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
957 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
958 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
959 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
960 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
961 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
962 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
963 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
964 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
965 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
966 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
967 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
968 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
969 jmp .restore_non_volatile_host_xmm_regs
970ENDPROC hmR0VMXStartVMWrapXMM
971
972;;
973; Wrapper around svm.pfnVMRun that preserves host XMM registers and
974; load the guest ones when necessary.
975;
976; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
977;
978; @returns eax
979;
980; @param pVMCBHostPhys msc:rcx
981; @param pVMCBPhys msc:rdx
982; @param pCtx msc:r8
983; @param pVM msc:r9
984; @param pVCpu msc:[rbp+30h] The cross context virtual CPU structure of the calling EMT.
985; @param pfnVMRun msc:[rbp+38h]
986;
987; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
988;
989; @remarks Drivers shouldn't use AVX registers without saving+loading:
990; https://msdn.microsoft.com/en-us/library/windows/hardware/ff545910%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
991; However the compiler docs have different idea:
992; https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
993; We'll go with the former for now.
994;
995; ASSUMING 64-bit and windows for now.
996ALIGNCODE(16)
997BEGINPROC hmR0SVMRunWrapXMM
998 push xBP
999 mov xBP, xSP
1000 sub xSP, 0b0h + 040h ; Don't bother optimizing the frame size.
1001
1002 ; spill input parameters.
1003 mov [xBP + 010h], rcx ; pVMCBHostPhys
1004 mov [xBP + 018h], rdx ; pVMCBPhys
1005 mov [xBP + 020h], r8 ; pCtx
1006 mov [xBP + 028h], r9 ; pVM
1007
1008 ; Ask CPUM whether we've started using the FPU yet.
1009 mov rcx, [xBP + 30h] ; pVCpu
1010 call NAME(CPUMIsGuestFPUStateActive)
1011 test al, al
1012 jnz .guest_fpu_state_active
1013
1014 ; No need to mess with XMM registers just call the start routine and return.
1015 mov r11, [xBP + 38h] ; pfnVMRun
1016 mov r10, [xBP + 30h] ; pVCpu
1017 mov [xSP + 020h], r10
1018 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1019 mov rdx, [xBP + 018h] ; pVMCBPhys
1020 mov r8, [xBP + 020h] ; pCtx
1021 mov r9, [xBP + 028h] ; pVM
1022 call r11
1023
1024 leave
1025 ret
1026
1027ALIGNCODE(8)
1028.guest_fpu_state_active:
1029 ; Save the non-volatile host XMM registers.
1030 movdqa [rsp + 040h + 000h], xmm6
1031 movdqa [rsp + 040h + 010h], xmm7
1032 movdqa [rsp + 040h + 020h], xmm8
1033 movdqa [rsp + 040h + 030h], xmm9
1034 movdqa [rsp + 040h + 040h], xmm10
1035 movdqa [rsp + 040h + 050h], xmm11
1036 movdqa [rsp + 040h + 060h], xmm12
1037 movdqa [rsp + 040h + 070h], xmm13
1038 movdqa [rsp + 040h + 080h], xmm14
1039 movdqa [rsp + 040h + 090h], xmm15
1040 stmxcsr [rsp + 040h + 0a0h]
1041
1042 mov r10, [xBP + 020h] ; pCtx
1043 mov eax, [r10 + CPUMCTX.fXStateMask]
1044 test eax, eax
1045 jz .guest_fpu_state_manually
1046
1047 ;
1048 ; Using XSAVE.
1049 ;
1050 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1051 xor edx, edx
1052 mov r10, [r10 + CPUMCTX.pXStateR0]
1053 xrstor [r10]
1054
1055 ; Make the call (same as in the other case ).
1056 mov r11, [xBP + 38h] ; pfnVMRun
1057 mov r10, [xBP + 30h] ; pVCpu
1058 mov [xSP + 020h], r10
1059 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1060 mov rdx, [xBP + 018h] ; pVMCBPhys
1061 mov r8, [xBP + 020h] ; pCtx
1062 mov r9, [xBP + 028h] ; pVM
1063 call r11
1064
1065 mov r11d, eax ; save return value (xsave below uses eax)
1066
1067 ; Save the guest XMM registers.
1068 mov r10, [xBP + 020h] ; pCtx
1069 mov eax, [r10 + CPUMCTX.fXStateMask]
1070 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1071 xor edx, edx
1072 mov r10, [r10 + CPUMCTX.pXStateR0]
1073 xsave [r10]
1074
1075 mov eax, r11d ; restore return value.
1076
1077.restore_non_volatile_host_xmm_regs:
1078 ; Load the non-volatile host XMM registers.
1079 movdqa xmm6, [rsp + 040h + 000h]
1080 movdqa xmm7, [rsp + 040h + 010h]
1081 movdqa xmm8, [rsp + 040h + 020h]
1082 movdqa xmm9, [rsp + 040h + 030h]
1083 movdqa xmm10, [rsp + 040h + 040h]
1084 movdqa xmm11, [rsp + 040h + 050h]
1085 movdqa xmm12, [rsp + 040h + 060h]
1086 movdqa xmm13, [rsp + 040h + 070h]
1087 movdqa xmm14, [rsp + 040h + 080h]
1088 movdqa xmm15, [rsp + 040h + 090h]
1089 ldmxcsr [rsp + 040h + 0a0h]
1090 leave
1091 ret
1092
1093 ;
1094 ; No XSAVE, load and save the guest XMM registers manually.
1095 ;
1096.guest_fpu_state_manually:
1097 ; Load the full guest XMM register state.
1098 mov r10, [r10 + CPUMCTX.pXStateR0]
1099 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1100 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1101 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1102 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1103 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1104 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1105 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1106 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1107 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1108 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1109 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1110 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1111 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1112 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1113 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1114 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1115 ldmxcsr [r10 + X86FXSTATE.MXCSR]
1116
1117 ; Make the call (same as in the other case ).
1118 mov r11, [xBP + 38h] ; pfnVMRun
1119 mov r10, [xBP + 30h] ; pVCpu
1120 mov [xSP + 020h], r10
1121 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1122 mov rdx, [xBP + 018h] ; pVMCBPhys
1123 mov r8, [xBP + 020h] ; pCtx
1124 mov r9, [xBP + 028h] ; pVM
1125 call r11
1126
1127 ; Save the guest XMM registers.
1128 mov r10, [xBP + 020h] ; pCtx
1129 mov r10, [r10 + CPUMCTX.pXStateR0]
1130 stmxcsr [r10 + X86FXSTATE.MXCSR]
1131 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1132 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1133 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1134 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1135 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1136 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1137 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1138 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1139 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1140 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1141 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1142 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1143 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1144 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1145 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1146 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1147 jmp .restore_non_volatile_host_xmm_regs
1148ENDPROC hmR0SVMRunWrapXMM
1149
1150%endif ; VBOX_WITH_KERNEL_USING_XMM
1151
1152
1153;; @def RESTORE_STATE_VM32
1154; Macro restoring essential host state and updating guest state
1155; for common host, 32-bit guest for VT-x.
1156%macro RESTORE_STATE_VM32 0
1157 ; Restore base and limit of the IDTR & GDTR.
1158 %ifndef VMX_SKIP_IDTR
1159 lidt [xSP]
1160 add xSP, xCB * 2
1161 %endif
1162 %ifndef VMX_SKIP_GDTR
1163 lgdt [xSP]
1164 add xSP, xCB * 2
1165 %endif
1166
1167 push xDI
1168 %ifndef VMX_SKIP_TR
1169 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
1170 %else
1171 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
1172 %endif
1173
1174 mov [ss:xDI + CPUMCTX.eax], eax
1175 mov [ss:xDI + CPUMCTX.ebx], ebx
1176 mov [ss:xDI + CPUMCTX.ecx], ecx
1177 mov [ss:xDI + CPUMCTX.edx], edx
1178 mov [ss:xDI + CPUMCTX.esi], esi
1179 mov [ss:xDI + CPUMCTX.ebp], ebp
1180 mov xAX, cr2
1181 mov [ss:xDI + CPUMCTX.cr2], xAX
1182
1183 %ifdef RT_ARCH_AMD64
1184 pop xAX ; The guest edi we pushed above.
1185 mov dword [ss:xDI + CPUMCTX.edi], eax
1186 %else
1187 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
1188 %endif
1189
1190 %ifndef VMX_SKIP_TR
1191 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1192 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1193 ; @todo get rid of sgdt
1194 pop xBX ; Saved TR
1195 sub xSP, xCB * 2
1196 sgdt [xSP]
1197 mov xAX, xBX
1198 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1199 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1200 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1201 ltr bx
1202 add xSP, xCB * 2
1203 %endif
1204
1205 pop xAX ; Saved LDTR
1206 %ifdef RT_ARCH_AMD64
1207 cmp eax, 0
1208 je %%skip_ldt_write32
1209 %endif
1210 lldt ax
1211
1212%%skip_ldt_write32:
1213 add xSP, xCB ; pCtx
1214
1215 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1216 pop xDX ; Saved pCache
1217
1218 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1219 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1220 ; trouble only just less efficient.
1221 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
1222 cmp ecx, 0 ; Can't happen
1223 je %%no_cached_read32
1224 jmp %%cached_read32
1225
1226ALIGN(16)
1227%%cached_read32:
1228 dec xCX
1229 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
1230 ; Note! This leaves the high 32 bits of the cache entry unmodified!!
1231 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1232 cmp xCX, 0
1233 jnz %%cached_read32
1234%%no_cached_read32:
1235 %endif
1236
1237 ; Restore segment registers.
1238 MYPOPSEGS xAX, ax
1239
1240 ; Restore the host XCR0 if necessary.
1241 pop xCX
1242 test ecx, ecx
1243 jnz %%xcr0_after_skip
1244 pop xAX
1245 pop xDX
1246 xsetbv ; ecx is already zero.
1247%%xcr0_after_skip:
1248
1249 ; Restore general purpose registers.
1250 MYPOPAD
1251%endmacro
1252
1253
1254;;
1255; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
1256;
1257; @returns VBox status code
1258; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
1259; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
1260; @param pCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
1261; @param pVM x86:[ebp+14],msc:r9, gcc:rcx The cross context VM structure.
1262; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1263;
1264ALIGNCODE(16)
1265BEGINPROC VMXR0StartVM32
1266 push xBP
1267 mov xBP, xSP
1268
1269 pushf
1270 cli
1271
1272 ;
1273 ; Save all general purpose host registers.
1274 ;
1275 MYPUSHAD
1276
1277 ;
1278 ; First we have to write some final guest CPU context registers.
1279 ;
1280 mov eax, VMX_VMCS_HOST_RIP
1281%ifdef RT_ARCH_AMD64
1282 lea r10, [.vmlaunch_done wrt rip]
1283 vmwrite rax, r10
1284%else
1285 mov ecx, .vmlaunch_done
1286 vmwrite eax, ecx
1287%endif
1288 ; Note: assumes success!
1289
1290 ;
1291 ; Unify input parameter registers.
1292 ;
1293%ifdef RT_ARCH_AMD64
1294 %ifdef ASM_CALL64_GCC
1295 ; fResume already in rdi
1296 ; pCtx already in rsi
1297 mov rbx, rdx ; pCache
1298 %else
1299 mov rdi, rcx ; fResume
1300 mov rsi, rdx ; pCtx
1301 mov rbx, r8 ; pCache
1302 %endif
1303%else
1304 mov edi, [ebp + 8] ; fResume
1305 mov esi, [ebp + 12] ; pCtx
1306 mov ebx, [ebp + 16] ; pCache
1307%endif
1308
1309 ;
1310 ; Save the host XCR0 and load the guest one if necessary.
1311 ; Note! Trashes rdx and rcx.
1312 ;
1313%ifdef ASM_CALL64_MSC
1314 mov rax, [xBP + 30h] ; pVCpu
1315%elifdef ASM_CALL64_GCC
1316 mov rax, r8 ; pVCpu
1317%else
1318 mov eax, [xBP + 18h] ; pVCpu
1319%endif
1320 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1321 jz .xcr0_before_skip
1322
1323 xor ecx, ecx
1324 xgetbv ; Save the host one on the stack.
1325 push xDX
1326 push xAX
1327
1328 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1329 mov edx, [xSI + CPUMCTX.aXcr + 4]
1330 xor ecx, ecx ; paranoia
1331 xsetbv
1332
1333 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1334 jmp .xcr0_before_done
1335
1336.xcr0_before_skip:
1337 push 3fh ; indicate that we need not.
1338.xcr0_before_done:
1339
1340 ;
1341 ; Save segment registers.
1342 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1343 ;
1344 MYPUSHSEGS xAX, ax
1345
1346%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1347 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
1348 cmp ecx, 0
1349 je .no_cached_writes
1350 mov edx, ecx
1351 mov ecx, 0
1352 jmp .cached_write
1353
1354ALIGN(16)
1355.cached_write:
1356 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
1357 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
1358 inc xCX
1359 cmp xCX, xDX
1360 jl .cached_write
1361
1362 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
1363.no_cached_writes:
1364
1365 ; Save the pCache pointer.
1366 push xBX
1367%endif
1368
1369 ; Save the pCtx pointer.
1370 push xSI
1371
1372 ; Save host LDTR.
1373 xor eax, eax
1374 sldt ax
1375 push xAX
1376
1377%ifndef VMX_SKIP_TR
1378 ; The host TR limit is reset to 0x67; save & restore it manually.
1379 str eax
1380 push xAX
1381%endif
1382
1383%ifndef VMX_SKIP_GDTR
1384 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1385 sub xSP, xCB * 2
1386 sgdt [xSP]
1387%endif
1388%ifndef VMX_SKIP_IDTR
1389 sub xSP, xCB * 2
1390 sidt [xSP]
1391%endif
1392
1393 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1394 mov xBX, [xSI + CPUMCTX.cr2]
1395 mov xDX, cr2
1396 cmp xBX, xDX
1397 je .skip_cr2_write32
1398 mov cr2, xBX
1399
1400.skip_cr2_write32:
1401 mov eax, VMX_VMCS_HOST_RSP
1402 vmwrite xAX, xSP
1403 ; Note: assumes success!
1404 ; Don't mess with ESP anymore!!!
1405
1406 ; Load guest general purpose registers.
1407 mov eax, [xSI + CPUMCTX.eax]
1408 mov ebx, [xSI + CPUMCTX.ebx]
1409 mov ecx, [xSI + CPUMCTX.ecx]
1410 mov edx, [xSI + CPUMCTX.edx]
1411 mov ebp, [xSI + CPUMCTX.ebp]
1412
1413 ; Resume or start VM?
1414 cmp xDI, 0 ; fResume
1415
1416 ; Load guest edi & esi.
1417 mov edi, [xSI + CPUMCTX.edi]
1418 mov esi, [xSI + CPUMCTX.esi]
1419
1420 je .vmlaunch_launch
1421
1422 vmresume
1423 jc near .vmxstart_invalid_vmcs_ptr
1424 jz near .vmxstart_start_failed
1425 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
1426
1427.vmlaunch_launch:
1428 vmlaunch
1429 jc near .vmxstart_invalid_vmcs_ptr
1430 jz near .vmxstart_start_failed
1431 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
1432
1433ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
1434.vmlaunch_done:
1435 RESTORE_STATE_VM32
1436 mov eax, VINF_SUCCESS
1437
1438.vmstart_end:
1439 popf
1440 pop xBP
1441 ret
1442
1443.vmxstart_invalid_vmcs_ptr:
1444 RESTORE_STATE_VM32
1445 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1446 jmp .vmstart_end
1447
1448.vmxstart_start_failed:
1449 RESTORE_STATE_VM32
1450 mov eax, VERR_VMX_UNABLE_TO_START_VM
1451 jmp .vmstart_end
1452
1453ENDPROC VMXR0StartVM32
1454
1455
1456%ifdef RT_ARCH_AMD64
1457;; @def RESTORE_STATE_VM64
1458; Macro restoring essential host state and updating guest state
1459; for 64-bit host, 64-bit guest for VT-x.
1460;
1461%macro RESTORE_STATE_VM64 0
1462 ; Restore base and limit of the IDTR & GDTR
1463 %ifndef VMX_SKIP_IDTR
1464 lidt [xSP]
1465 add xSP, xCB * 2
1466 %endif
1467 %ifndef VMX_SKIP_GDTR
1468 lgdt [xSP]
1469 add xSP, xCB * 2
1470 %endif
1471
1472 push xDI
1473 %ifndef VMX_SKIP_TR
1474 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
1475 %else
1476 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
1477 %endif
1478
1479 mov qword [xDI + CPUMCTX.eax], rax
1480 mov qword [xDI + CPUMCTX.ebx], rbx
1481 mov qword [xDI + CPUMCTX.ecx], rcx
1482 mov qword [xDI + CPUMCTX.edx], rdx
1483 mov qword [xDI + CPUMCTX.esi], rsi
1484 mov qword [xDI + CPUMCTX.ebp], rbp
1485 mov qword [xDI + CPUMCTX.r8], r8
1486 mov qword [xDI + CPUMCTX.r9], r9
1487 mov qword [xDI + CPUMCTX.r10], r10
1488 mov qword [xDI + CPUMCTX.r11], r11
1489 mov qword [xDI + CPUMCTX.r12], r12
1490 mov qword [xDI + CPUMCTX.r13], r13
1491 mov qword [xDI + CPUMCTX.r14], r14
1492 mov qword [xDI + CPUMCTX.r15], r15
1493 mov rax, cr2
1494 mov qword [xDI + CPUMCTX.cr2], rax
1495
1496 pop xAX ; The guest rdi we pushed above
1497 mov qword [xDI + CPUMCTX.edi], rax
1498
1499 %ifndef VMX_SKIP_TR
1500 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1501 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
1502 ; @todo get rid of sgdt
1503 pop xBX ; Saved TR
1504 sub xSP, xCB * 2
1505 sgdt [xSP]
1506 mov xAX, xBX
1507 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1508 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1509 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1510 ltr bx
1511 add xSP, xCB * 2
1512 %endif
1513
1514 pop xAX ; Saved LDTR
1515 cmp eax, 0
1516 je %%skip_ldt_write64
1517 lldt ax
1518
1519%%skip_ldt_write64:
1520 pop xSI ; pCtx (needed in rsi by the macros below)
1521
1522 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1523 pop xDX ; Saved pCache
1524
1525 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1526 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1527 ; trouble only just less efficient.
1528 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
1529 cmp ecx, 0 ; Can't happen
1530 je %%no_cached_read64
1531 jmp %%cached_read64
1532
1533ALIGN(16)
1534%%cached_read64:
1535 dec xCX
1536 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
1537 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1538 cmp xCX, 0
1539 jnz %%cached_read64
1540%%no_cached_read64:
1541 %endif
1542
1543 ; Restore segment registers.
1544 MYPOPSEGS xAX, ax
1545
1546 ; Restore the host XCR0 if necessary.
1547 pop xCX
1548 test ecx, ecx
1549 jnz %%xcr0_after_skip
1550 pop xAX
1551 pop xDX
1552 xsetbv ; ecx is already zero.
1553%%xcr0_after_skip:
1554
1555 ; Restore general purpose registers.
1556 MYPOPAD
1557%endmacro
1558
1559
1560;;
1561; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
1562;
1563; @returns VBox status code
1564; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
1565; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
1566; @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache.
1567; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1568; @param pVCpu msc:[ebp+30], gcc:r8 The cross context virtual CPU structure of the calling EMT.
1569;
1570ALIGNCODE(16)
1571BEGINPROC VMXR0StartVM64
1572 push xBP
1573 mov xBP, xSP
1574
1575 pushf
1576 cli
1577
1578 ; Save all general purpose host registers.
1579 MYPUSHAD
1580
1581 ; First we have to save some final CPU context registers.
1582 lea r10, [.vmlaunch64_done wrt rip]
1583 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
1584 vmwrite rax, r10
1585 ; Note: assumes success!
1586
1587 ;
1588 ; Unify the input parameter registers.
1589 ;
1590%ifdef ASM_CALL64_GCC
1591 ; fResume already in rdi
1592 ; pCtx already in rsi
1593 mov rbx, rdx ; pCache
1594%else
1595 mov rdi, rcx ; fResume
1596 mov rsi, rdx ; pCtx
1597 mov rbx, r8 ; pCache
1598%endif
1599
1600 ;
1601 ; Save the host XCR0 and load the guest one if necessary.
1602 ; Note! Trashes rdx and rcx.
1603 ;
1604%ifdef ASM_CALL64_MSC
1605 mov rax, [xBP + 30h] ; pVCpu
1606%else
1607 mov rax, r8 ; pVCpu
1608%endif
1609 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1610 jz .xcr0_before_skip
1611
1612 xor ecx, ecx
1613 xgetbv ; Save the host one on the stack.
1614 push xDX
1615 push xAX
1616
1617 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1618 mov edx, [xSI + CPUMCTX.aXcr + 4]
1619 xor ecx, ecx ; paranoia
1620 xsetbv
1621
1622 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1623 jmp .xcr0_before_done
1624
1625.xcr0_before_skip:
1626 push 3fh ; indicate that we need not.
1627.xcr0_before_done:
1628
1629 ;
1630 ; Save segment registers.
1631 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1632 ;
1633 MYPUSHSEGS xAX, ax
1634
1635%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1636 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
1637 cmp ecx, 0
1638 je .no_cached_writes
1639 mov edx, ecx
1640 mov ecx, 0
1641 jmp .cached_write
1642
1643ALIGN(16)
1644.cached_write:
1645 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
1646 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
1647 inc xCX
1648 cmp xCX, xDX
1649 jl .cached_write
1650
1651 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
1652.no_cached_writes:
1653
1654 ; Save the pCache pointer.
1655 push xBX
1656%endif
1657
1658 ; Save the pCtx pointer.
1659 push xSI
1660
1661 ; Save host LDTR.
1662 xor eax, eax
1663 sldt ax
1664 push xAX
1665
1666%ifndef VMX_SKIP_TR
1667 ; The host TR limit is reset to 0x67; save & restore it manually.
1668 str eax
1669 push xAX
1670%endif
1671
1672%ifndef VMX_SKIP_GDTR
1673 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1674 sub xSP, xCB * 2
1675 sgdt [xSP]
1676%endif
1677%ifndef VMX_SKIP_IDTR
1678 sub xSP, xCB * 2
1679 sidt [xSP]
1680%endif
1681
1682 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1683 mov rbx, qword [xSI + CPUMCTX.cr2]
1684 mov rdx, cr2
1685 cmp rbx, rdx
1686 je .skip_cr2_write
1687 mov cr2, rbx
1688
1689.skip_cr2_write:
1690 mov eax, VMX_VMCS_HOST_RSP
1691 vmwrite xAX, xSP
1692 ; Note: assumes success!
1693 ; Don't mess with ESP anymore!!!
1694
1695 ; Load guest general purpose registers.
1696 mov rax, qword [xSI + CPUMCTX.eax]
1697 mov rbx, qword [xSI + CPUMCTX.ebx]
1698 mov rcx, qword [xSI + CPUMCTX.ecx]
1699 mov rdx, qword [xSI + CPUMCTX.edx]
1700 mov rbp, qword [xSI + CPUMCTX.ebp]
1701 mov r8, qword [xSI + CPUMCTX.r8]
1702 mov r9, qword [xSI + CPUMCTX.r9]
1703 mov r10, qword [xSI + CPUMCTX.r10]
1704 mov r11, qword [xSI + CPUMCTX.r11]
1705 mov r12, qword [xSI + CPUMCTX.r12]
1706 mov r13, qword [xSI + CPUMCTX.r13]
1707 mov r14, qword [xSI + CPUMCTX.r14]
1708 mov r15, qword [xSI + CPUMCTX.r15]
1709
1710 ; Resume or start VM?
1711 cmp xDI, 0 ; fResume
1712
1713 ; Load guest rdi & rsi.
1714 mov rdi, qword [xSI + CPUMCTX.edi]
1715 mov rsi, qword [xSI + CPUMCTX.esi]
1716
1717 je .vmlaunch64_launch
1718
1719 vmresume
1720 jc near .vmxstart64_invalid_vmcs_ptr
1721 jz near .vmxstart64_start_failed
1722 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
1723
1724.vmlaunch64_launch:
1725 vmlaunch
1726 jc near .vmxstart64_invalid_vmcs_ptr
1727 jz near .vmxstart64_start_failed
1728 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1729
1730ALIGNCODE(16)
1731.vmlaunch64_done:
1732 RESTORE_STATE_VM64
1733 mov eax, VINF_SUCCESS
1734
1735.vmstart64_end:
1736 popf
1737 pop xBP
1738 ret
1739
1740.vmxstart64_invalid_vmcs_ptr:
1741 RESTORE_STATE_VM64
1742 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1743 jmp .vmstart64_end
1744
1745.vmxstart64_start_failed:
1746 RESTORE_STATE_VM64
1747 mov eax, VERR_VMX_UNABLE_TO_START_VM
1748 jmp .vmstart64_end
1749ENDPROC VMXR0StartVM64
1750%endif ; RT_ARCH_AMD64
1751
1752
1753;;
1754; Prepares for and executes VMRUN (32 bits guests)
1755;
1756; @returns VBox status code
1757; @param HCPhysVMCB Physical address of host VMCB.
1758; @param HCPhysVMCB Physical address of guest VMCB.
1759; @param pCtx Pointer to the guest CPU-context.
1760; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1761; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1762;
1763ALIGNCODE(16)
1764BEGINPROC SVMR0VMRun
1765%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
1766 %ifdef ASM_CALL64_GCC
1767 push r8
1768 push rcx
1769 push rdx
1770 push rsi
1771 push rdi
1772 %else
1773 mov rax, [rsp + 28h]
1774 push rax ; pVCpu
1775 push r9 ; pVM
1776 push r8 ; pCtx
1777 push rdx ; HCPHYSGuestVMCB
1778 push rcx ; HCPhysHostVMCB
1779 %endif
1780 push 0
1781%endif
1782 push xBP
1783 mov xBP, xSP
1784 pushf
1785
1786 ;
1787 ; Save all general purpose host registers.
1788 ;
1789 MYPUSHAD
1790
1791 ;
1792 ; Load pCtx into xSI.
1793 ;
1794 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1795
1796 ;
1797 ; Save the host XCR0 and load the guest one if necessary.
1798 ;
1799 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
1800 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1801 jz .xcr0_before_skip
1802
1803 xor ecx, ecx
1804 xgetbv ; Save the host one on the stack.
1805 push xDX
1806 push xAX
1807
1808 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1809 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1810 mov edx, [xSI + CPUMCTX.aXcr + 4]
1811 xor ecx, ecx ; paranoia
1812 xsetbv
1813
1814 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1815 jmp .xcr0_before_done
1816
1817.xcr0_before_skip:
1818 push 3fh ; indicate that we need not.
1819.xcr0_before_done:
1820
1821 ;
1822 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1823 ;
1824 push xSI
1825
1826 ; Save host fs, gs, sysenter msr etc.
1827 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1828 push xAX ; save for the vmload after vmrun
1829 vmsave
1830
1831 ; Setup xAX for VMLOAD.
1832 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1833
1834 ; Load guest general purpose registers.
1835 ; eax is loaded from the VMCB by VMRUN.
1836 mov ebx, [xSI + CPUMCTX.ebx]
1837 mov ecx, [xSI + CPUMCTX.ecx]
1838 mov edx, [xSI + CPUMCTX.edx]
1839 mov edi, [xSI + CPUMCTX.edi]
1840 mov ebp, [xSI + CPUMCTX.ebp]
1841 mov esi, [xSI + CPUMCTX.esi]
1842
1843 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1844 clgi
1845 sti
1846
1847 ; Load guest fs, gs, sysenter msr etc.
1848 vmload
1849 ; Run the VM.
1850 vmrun
1851
1852 ; eax is in the VMCB already; we can use it here.
1853
1854 ; Save guest fs, gs, sysenter msr etc.
1855 vmsave
1856
1857 ; Load host fs, gs, sysenter msr etc.
1858 pop xAX ; Pushed above
1859 vmload
1860
1861 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1862 cli
1863 stgi
1864
1865 ;
1866 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
1867 ;
1868 pop xAX
1869
1870 mov [ss:xAX + CPUMCTX.ebx], ebx
1871 mov [ss:xAX + CPUMCTX.ecx], ecx
1872 mov [ss:xAX + CPUMCTX.edx], edx
1873 mov [ss:xAX + CPUMCTX.esi], esi
1874 mov [ss:xAX + CPUMCTX.edi], edi
1875 mov [ss:xAX + CPUMCTX.ebp], ebp
1876
1877 ;
1878 ; Restore the host xcr0 if necessary.
1879 ;
1880 pop xCX
1881 test ecx, ecx
1882 jnz .xcr0_after_skip
1883 pop xAX
1884 pop xDX
1885 xsetbv ; ecx is already zero.
1886.xcr0_after_skip:
1887
1888 ;
1889 ; Restore host general purpose registers.
1890 ;
1891 MYPOPAD
1892
1893 mov eax, VINF_SUCCESS
1894
1895 popf
1896 pop xBP
1897%ifdef RT_ARCH_AMD64
1898 add xSP, 6*xCB
1899%endif
1900 ret
1901ENDPROC SVMR0VMRun
1902
1903
1904%ifdef RT_ARCH_AMD64
1905;;
1906; Prepares for and executes VMRUN (64 bits guests)
1907;
1908; @returns VBox status code
1909; @param HCPhysVMCB Physical address of host VMCB.
1910; @param HCPhysVMCB Physical address of guest VMCB.
1911; @param pCtx Pointer to the guest-CPU context.
1912; @param pVM msc:r9, gcc:rcx The cross context VM structure.
1913; @param pVCpu msc:[rsp+28],gcc:r8 The cross context virtual CPU structure of the calling EMT.
1914;
1915ALIGNCODE(16)
1916BEGINPROC SVMR0VMRun64
1917 ; Fake a cdecl stack frame
1918 %ifdef ASM_CALL64_GCC
1919 push r8
1920 push rcx
1921 push rdx
1922 push rsi
1923 push rdi
1924 %else
1925 mov rax, [rsp + 28h]
1926 push rax ; rbp + 30h pVCpu
1927 push r9 ; rbp + 28h pVM
1928 push r8 ; rbp + 20h pCtx
1929 push rdx ; rbp + 18h HCPHYSGuestVMCB
1930 push rcx ; rbp + 10h HCPhysHostVMCB
1931 %endif
1932 push 0 ; rbp + 08h "fake ret addr"
1933 push rbp ; rbp + 00h
1934 mov rbp, rsp
1935 pushf
1936
1937 ; Manual save and restore:
1938 ; - General purpose registers except RIP, RSP, RAX
1939 ;
1940 ; Trashed:
1941 ; - CR2 (we don't care)
1942 ; - LDTR (reset to 0)
1943 ; - DRx (presumably not changed at all)
1944 ; - DR7 (reset to 0x400)
1945 ;
1946
1947 ;
1948 ; Save all general purpose host registers.
1949 ;
1950 MYPUSHAD
1951
1952 ;
1953 ; Load pCtx into xSI.
1954 ;
1955 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
1956
1957 ;
1958 ; Save the host XCR0 and load the guest one if necessary.
1959 ;
1960 mov rax, [xBP + 30h] ; pVCpu
1961 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1962 jz .xcr0_before_skip
1963
1964 xor ecx, ecx
1965 xgetbv ; Save the host one on the stack.
1966 push xDX
1967 push xAX
1968
1969 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1970 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1971 mov edx, [xSI + CPUMCTX.aXcr + 4]
1972 xor ecx, ecx ; paranoia
1973 xsetbv
1974
1975 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1976 jmp .xcr0_before_done
1977
1978.xcr0_before_skip:
1979 push 3fh ; indicate that we need not.
1980.xcr0_before_done:
1981
1982 ;
1983 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1984 ;
1985 push rsi
1986
1987 ;
1988 ; Save host fs, gs, sysenter msr etc.
1989 ;
1990 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1991 push rax ; Save for the vmload after vmrun
1992 vmsave
1993
1994 ; Setup rax for VMLOAD.
1995 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1996
1997 ; Load guest general purpose registers.
1998 ; rax is loaded from the VMCB by VMRUN.
1999 mov rbx, qword [xSI + CPUMCTX.ebx]
2000 mov rcx, qword [xSI + CPUMCTX.ecx]
2001 mov rdx, qword [xSI + CPUMCTX.edx]
2002 mov rdi, qword [xSI + CPUMCTX.edi]
2003 mov rbp, qword [xSI + CPUMCTX.ebp]
2004 mov r8, qword [xSI + CPUMCTX.r8]
2005 mov r9, qword [xSI + CPUMCTX.r9]
2006 mov r10, qword [xSI + CPUMCTX.r10]
2007 mov r11, qword [xSI + CPUMCTX.r11]
2008 mov r12, qword [xSI + CPUMCTX.r12]
2009 mov r13, qword [xSI + CPUMCTX.r13]
2010 mov r14, qword [xSI + CPUMCTX.r14]
2011 mov r15, qword [xSI + CPUMCTX.r15]
2012 mov rsi, qword [xSI + CPUMCTX.esi]
2013
2014 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
2015 clgi
2016 sti
2017
2018 ; Load guest fs, gs, sysenter msr etc.
2019 vmload
2020 ; Run the VM.
2021 vmrun
2022
2023 ; rax is in the VMCB already; we can use it here.
2024
2025 ; Save guest fs, gs, sysenter msr etc.
2026 vmsave
2027
2028 ;
2029 ; Load host fs, gs, sysenter msr etc.
2030 ;
2031 pop rax ; pushed above
2032 vmload
2033
2034 ;
2035 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
2036 ;
2037 cli
2038 stgi
2039
2040 ;
2041 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
2042 ;
2043 pop rax
2044
2045 mov qword [rax + CPUMCTX.ebx], rbx
2046 mov qword [rax + CPUMCTX.ecx], rcx
2047 mov qword [rax + CPUMCTX.edx], rdx
2048 mov qword [rax + CPUMCTX.esi], rsi
2049 mov qword [rax + CPUMCTX.edi], rdi
2050 mov qword [rax + CPUMCTX.ebp], rbp
2051 mov qword [rax + CPUMCTX.r8], r8
2052 mov qword [rax + CPUMCTX.r9], r9
2053 mov qword [rax + CPUMCTX.r10], r10
2054 mov qword [rax + CPUMCTX.r11], r11
2055 mov qword [rax + CPUMCTX.r12], r12
2056 mov qword [rax + CPUMCTX.r13], r13
2057 mov qword [rax + CPUMCTX.r14], r14
2058 mov qword [rax + CPUMCTX.r15], r15
2059
2060 ;
2061 ; Restore the host xcr0 if necessary.
2062 ;
2063 pop xCX
2064 test ecx, ecx
2065 jnz .xcr0_after_skip
2066 pop xAX
2067 pop xDX
2068 xsetbv ; ecx is already zero.
2069.xcr0_after_skip:
2070
2071 ;
2072 ; Restore host general purpose registers.
2073 ;
2074 MYPOPAD
2075
2076 mov eax, VINF_SUCCESS
2077
2078 popf
2079 pop rbp
2080 add rsp, 6 * xCB
2081 ret
2082ENDPROC SVMR0VMRun64
2083%endif ; RT_ARCH_AMD64
2084
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette