VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 57493

Last change on this file since 57493 was 57493, checked in by vboxsync, 9 years ago

VMM/HM: Merge HMR0Mixed.mac into the main assembly file now that VBOX_WITH_HYBRID_32BIT_KERNEL is gone.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 58.9 KB
Line 
1; $Id: HMR0A.asm 57493 2015-08-21 11:54:00Z vboxsync $
2;; @file
3; HM - Ring-0 VMX, SVM world-switch and helper routines
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "VBox/vmm/vm.mac"
26%include "iprt/x86.mac"
27%include "HMInternal.mac"
28
29%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
30 %macro vmwrite 2,
31 int3
32 %endmacro
33 %define vmlaunch int3
34 %define vmresume int3
35 %define vmsave int3
36 %define vmload int3
37 %define vmrun int3
38 %define clgi int3
39 %define stgi int3
40 %macro invlpga 2,
41 int3
42 %endmacro
43%endif
44
45;*********************************************************************************************************************************
46;* Defined Constants And Macros *
47;*********************************************************************************************************************************
48;; The offset of the XMM registers in X86FXSTATE.
49; Use define because I'm too lazy to convert the struct.
50%define XMM_OFF_IN_X86FXSTATE 160
51
52;;
53; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation
54;
55%ifdef RT_ARCH_AMD64
56 %define VMX_SKIP_GDTR
57 %define VMX_SKIP_TR
58 %define VBOX_SKIP_RESTORE_SEG
59 %ifdef RT_OS_DARWIN
60 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
61 ; risk loading a stale LDT value or something invalid.
62 %define HM_64_BIT_USE_NULL_SEL
63 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
64 ; See @bugref{6875}.
65 %else
66 %define VMX_SKIP_IDTR
67 %endif
68%endif
69
70;; @def MYPUSHAD
71; Macro generating an equivalent to pushad
72
73;; @def MYPOPAD
74; Macro generating an equivalent to popad
75
76;; @def MYPUSHSEGS
77; Macro saving all segment registers on the stack.
78; @param 1 full width register name
79; @param 2 16-bit register name for \a 1.
80
81;; @def MYPOPSEGS
82; Macro restoring all segment registers on the stack
83; @param 1 full width register name
84; @param 2 16-bit register name for \a 1.
85
86%ifdef ASM_CALL64_GCC
87 %macro MYPUSHAD64 0
88 push r15
89 push r14
90 push r13
91 push r12
92 push rbx
93 %endmacro
94 %macro MYPOPAD64 0
95 pop rbx
96 pop r12
97 pop r13
98 pop r14
99 pop r15
100 %endmacro
101
102%else ; ASM_CALL64_MSC
103 %macro MYPUSHAD64 0
104 push r15
105 push r14
106 push r13
107 push r12
108 push rbx
109 push rsi
110 push rdi
111 %endmacro
112 %macro MYPOPAD64 0
113 pop rdi
114 pop rsi
115 pop rbx
116 pop r12
117 pop r13
118 pop r14
119 pop r15
120 %endmacro
121%endif
122
123%ifdef VBOX_SKIP_RESTORE_SEG
124 %macro MYPUSHSEGS64 2
125 %endmacro
126
127 %macro MYPOPSEGS64 2
128 %endmacro
129%else ; !VBOX_SKIP_RESTORE_SEG
130 ; trashes, rax, rdx & rcx
131 %macro MYPUSHSEGS64 2
132 %ifndef HM_64_BIT_USE_NULL_SEL
133 mov %2, es
134 push %1
135 mov %2, ds
136 push %1
137 %endif
138
139 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
140 mov ecx, MSR_K8_FS_BASE
141 rdmsr
142 push rdx
143 push rax
144 %ifndef HM_64_BIT_USE_NULL_SEL
145 push fs
146 %endif
147
148 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
149 mov ecx, MSR_K8_GS_BASE
150 rdmsr
151 push rdx
152 push rax
153 %ifndef HM_64_BIT_USE_NULL_SEL
154 push gs
155 %endif
156 %endmacro
157
158 ; trashes, rax, rdx & rcx
159 %macro MYPOPSEGS64 2
160 ; Note: do not step through this code with a debugger!
161 %ifndef HM_64_BIT_USE_NULL_SEL
162 xor eax, eax
163 mov ds, ax
164 mov es, ax
165 mov fs, ax
166 mov gs, ax
167 %endif
168
169 %ifndef HM_64_BIT_USE_NULL_SEL
170 pop gs
171 %endif
172 pop rax
173 pop rdx
174 mov ecx, MSR_K8_GS_BASE
175 wrmsr
176
177 %ifndef HM_64_BIT_USE_NULL_SEL
178 pop fs
179 %endif
180 pop rax
181 pop rdx
182 mov ecx, MSR_K8_FS_BASE
183 wrmsr
184 ; Now it's safe to step again
185
186 %ifndef HM_64_BIT_USE_NULL_SEL
187 pop %1
188 mov ds, %2
189 pop %1
190 mov es, %2
191 %endif
192 %endmacro
193%endif ; VBOX_SKIP_RESTORE_SEG
194
195%macro MYPUSHAD32 0
196 pushad
197%endmacro
198%macro MYPOPAD32 0
199 popad
200%endmacro
201
202%macro MYPUSHSEGS32 2
203 push ds
204 push es
205 push fs
206 push gs
207%endmacro
208%macro MYPOPSEGS32 2
209 pop gs
210 pop fs
211 pop es
212 pop ds
213%endmacro
214
215%ifdef RT_ARCH_AMD64
216 %define MYPUSHAD MYPUSHAD64
217 %define MYPOPAD MYPOPAD64
218 %define MYPUSHSEGS MYPUSHSEGS64
219 %define MYPOPSEGS MYPOPSEGS64
220%else
221 %define MYPUSHAD MYPUSHAD32
222 %define MYPOPAD MYPOPAD32
223 %define MYPUSHSEGS MYPUSHSEGS32
224 %define MYPOPSEGS MYPOPSEGS32
225%endif
226
227
228;*********************************************************************************************************************************
229;* External Symbols *
230;*********************************************************************************************************************************
231%ifdef VBOX_WITH_KERNEL_USING_XMM
232extern NAME(CPUMIsGuestFPUStateActive)
233%endif
234
235
236BEGINCODE
237
238
239;/**
240; * Restores host-state fields.
241; *
242; * @returns VBox status code
243; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
244; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
245; */
246ALIGNCODE(16)
247BEGINPROC VMXRestoreHostState
248%ifdef RT_ARCH_AMD64
249 %ifndef ASM_CALL64_GCC
250 ; Use GCC's input registers since we'll be needing both rcx and rdx further
251 ; down with the wrmsr instruction. Use the R10 and R11 register for saving
252 ; RDI and RSI since MSC preserve the two latter registers.
253 mov r10, rdi
254 mov r11, rsi
255 mov rdi, rcx
256 mov rsi, rdx
257 %endif
258
259 test edi, VMX_RESTORE_HOST_GDTR
260 jz .test_idtr
261 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
262
263.test_idtr:
264 test edi, VMX_RESTORE_HOST_IDTR
265 jz .test_ds
266 lidt [rsi + VMXRESTOREHOST.HostIdtr]
267
268.test_ds:
269 test edi, VMX_RESTORE_HOST_SEL_DS
270 jz .test_es
271 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
272 mov ds, eax
273
274.test_es:
275 test edi, VMX_RESTORE_HOST_SEL_ES
276 jz .test_tr
277 mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
278 mov es, eax
279
280.test_tr:
281 test edi, VMX_RESTORE_HOST_SEL_TR
282 jz .test_fs
283 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
284 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
285 mov ax, dx
286 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
287 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
288 test edi, VMX_RESTORE_HOST_GDT_READ_ONLY
289 jnz .gdt_readonly
290 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
291 ltr dx
292 jmp short .test_fs
293.gdt_readonly:
294 mov rcx, cr0
295 mov r9, rcx
296 and rcx, ~X86_CR0_WP
297 mov cr0, rcx
298 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
299 ltr dx
300 mov cr0, r9
301
302.test_fs:
303 ;
304 ; When restoring the selector values for FS and GS, we'll temporarily trash
305 ; the base address (at least the high 32-bit bits, but quite possibly the
306 ; whole base address), the wrmsr will restore it correctly. (VT-x actually
307 ; restores the base correctly when leaving guest mode, but not the selector
308 ; value, so there is little problem with interrupts being enabled prior to
309 ; this restore job.)
310 ; We'll disable ints once for both FS and GS as that's probably faster.
311 ;
312 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
313 jz .restore_success
314 pushfq
315 cli ; (see above)
316
317 test edi, VMX_RESTORE_HOST_SEL_FS
318 jz .test_gs
319 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
320 mov fs, eax
321 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
322 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
323 mov ecx, MSR_K8_FS_BASE
324 wrmsr
325
326.test_gs:
327 test edi, VMX_RESTORE_HOST_SEL_GS
328 jz .restore_flags
329 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
330 mov gs, eax
331 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
332 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
333 mov ecx, MSR_K8_GS_BASE
334 wrmsr
335
336.restore_flags:
337 popfq
338
339.restore_success:
340 mov eax, VINF_SUCCESS
341 %ifndef ASM_CALL64_GCC
342 ; Restore RDI and RSI on MSC.
343 mov rdi, r10
344 mov rsi, r11
345 %endif
346%else ; RT_ARCH_X86
347 mov eax, VERR_NOT_IMPLEMENTED
348%endif
349 ret
350ENDPROC VMXRestoreHostState
351
352
353;/**
354; * Dispatches an NMI to the host.
355; */
356ALIGNCODE(16)
357BEGINPROC VMXDispatchHostNmi
358 int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
359 ret
360ENDPROC VMXDispatchHostNmi
361
362
363;/**
364; * Executes VMWRITE, 64-bit value.
365; *
366; * @returns VBox status code.
367; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
368; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
369; */
370ALIGNCODE(16)
371BEGINPROC VMXWriteVmcs64
372%ifdef RT_ARCH_AMD64
373 %ifdef ASM_CALL64_GCC
374 and edi, 0ffffffffh
375 xor rax, rax
376 vmwrite rdi, rsi
377 %else
378 and ecx, 0ffffffffh
379 xor rax, rax
380 vmwrite rcx, rdx
381 %endif
382%else ; RT_ARCH_X86
383 mov ecx, [esp + 4] ; idxField
384 lea edx, [esp + 8] ; &u64Data
385 vmwrite ecx, [edx] ; low dword
386 jz .done
387 jc .done
388 inc ecx
389 xor eax, eax
390 vmwrite ecx, [edx + 4] ; high dword
391.done:
392%endif ; RT_ARCH_X86
393 jnc .valid_vmcs
394 mov eax, VERR_VMX_INVALID_VMCS_PTR
395 ret
396.valid_vmcs:
397 jnz .the_end
398 mov eax, VERR_VMX_INVALID_VMCS_FIELD
399.the_end:
400 ret
401ENDPROC VMXWriteVmcs64
402
403
404;/**
405; * Executes VMREAD, 64-bit value.
406; *
407; * @returns VBox status code.
408; * @param idxField VMCS index.
409; * @param pData Where to store VM field value.
410; */
411;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
412ALIGNCODE(16)
413BEGINPROC VMXReadVmcs64
414%ifdef RT_ARCH_AMD64
415 %ifdef ASM_CALL64_GCC
416 and edi, 0ffffffffh
417 xor rax, rax
418 vmread [rsi], rdi
419 %else
420 and ecx, 0ffffffffh
421 xor rax, rax
422 vmread [rdx], rcx
423 %endif
424%else ; RT_ARCH_X86
425 mov ecx, [esp + 4] ; idxField
426 mov edx, [esp + 8] ; pData
427 vmread [edx], ecx ; low dword
428 jz .done
429 jc .done
430 inc ecx
431 xor eax, eax
432 vmread [edx + 4], ecx ; high dword
433.done:
434%endif ; RT_ARCH_X86
435 jnc .valid_vmcs
436 mov eax, VERR_VMX_INVALID_VMCS_PTR
437 ret
438.valid_vmcs:
439 jnz .the_end
440 mov eax, VERR_VMX_INVALID_VMCS_FIELD
441.the_end:
442 ret
443ENDPROC VMXReadVmcs64
444
445
446;/**
447; * Executes VMREAD, 32-bit value.
448; *
449; * @returns VBox status code.
450; * @param idxField VMCS index.
451; * @param pu32Data Where to store VM field value.
452; */
453;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
454ALIGNCODE(16)
455BEGINPROC VMXReadVmcs32
456%ifdef RT_ARCH_AMD64
457 %ifdef ASM_CALL64_GCC
458 and edi, 0ffffffffh
459 xor rax, rax
460 vmread r10, rdi
461 mov [rsi], r10d
462 %else
463 and ecx, 0ffffffffh
464 xor rax, rax
465 vmread r10, rcx
466 mov [rdx], r10d
467 %endif
468%else ; RT_ARCH_X86
469 mov ecx, [esp + 4] ; idxField
470 mov edx, [esp + 8] ; pu32Data
471 xor eax, eax
472 vmread [edx], ecx
473%endif ; RT_ARCH_X86
474 jnc .valid_vmcs
475 mov eax, VERR_VMX_INVALID_VMCS_PTR
476 ret
477.valid_vmcs:
478 jnz .the_end
479 mov eax, VERR_VMX_INVALID_VMCS_FIELD
480.the_end:
481 ret
482ENDPROC VMXReadVmcs32
483
484
485;/**
486; * Executes VMWRITE, 32-bit value.
487; *
488; * @returns VBox status code.
489; * @param idxField VMCS index.
490; * @param u32Data Where to store VM field value.
491; */
492;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
493ALIGNCODE(16)
494BEGINPROC VMXWriteVmcs32
495%ifdef RT_ARCH_AMD64
496 %ifdef ASM_CALL64_GCC
497 and edi, 0ffffffffh
498 and esi, 0ffffffffh
499 xor rax, rax
500 vmwrite rdi, rsi
501 %else
502 and ecx, 0ffffffffh
503 and edx, 0ffffffffh
504 xor rax, rax
505 vmwrite rcx, rdx
506 %endif
507%else ; RT_ARCH_X86
508 mov ecx, [esp + 4] ; idxField
509 mov edx, [esp + 8] ; u32Data
510 xor eax, eax
511 vmwrite ecx, edx
512%endif ; RT_ARCH_X86
513 jnc .valid_vmcs
514 mov eax, VERR_VMX_INVALID_VMCS_PTR
515 ret
516.valid_vmcs:
517 jnz .the_end
518 mov eax, VERR_VMX_INVALID_VMCS_FIELD
519.the_end:
520 ret
521ENDPROC VMXWriteVmcs32
522
523
524;/**
525; * Executes VMXON.
526; *
527; * @returns VBox status code.
528; * @param HCPhysVMXOn Physical address of VMXON structure.
529; */
530;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
531BEGINPROC VMXEnable
532%ifdef RT_ARCH_AMD64
533 xor rax, rax
534 %ifdef ASM_CALL64_GCC
535 push rdi
536 %else
537 push rcx
538 %endif
539 vmxon [rsp]
540%else ; RT_ARCH_X86
541 xor eax, eax
542 vmxon [esp + 4]
543%endif ; RT_ARCH_X86
544 jnc .good
545 mov eax, VERR_VMX_INVALID_VMXON_PTR
546 jmp .the_end
547
548.good:
549 jnz .the_end
550 mov eax, VERR_VMX_VMXON_FAILED
551
552.the_end:
553%ifdef RT_ARCH_AMD64
554 add rsp, 8
555%endif
556 ret
557ENDPROC VMXEnable
558
559
560;/**
561; * Executes VMXOFF.
562; */
563;DECLASM(void) VMXDisable(void);
564BEGINPROC VMXDisable
565 vmxoff
566.the_end:
567 ret
568ENDPROC VMXDisable
569
570
571;/**
572; * Executes VMCLEAR.
573; *
574; * @returns VBox status code.
575; * @param HCPhysVmcs Physical address of VM control structure.
576; */
577;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
578ALIGNCODE(16)
579BEGINPROC VMXClearVmcs
580%ifdef RT_ARCH_AMD64
581 xor rax, rax
582 %ifdef ASM_CALL64_GCC
583 push rdi
584 %else
585 push rcx
586 %endif
587 vmclear [rsp]
588%else ; RT_ARCH_X86
589 xor eax, eax
590 vmclear [esp + 4]
591%endif ; RT_ARCH_X86
592 jnc .the_end
593 mov eax, VERR_VMX_INVALID_VMCS_PTR
594.the_end:
595%ifdef RT_ARCH_AMD64
596 add rsp, 8
597%endif
598 ret
599ENDPROC VMXClearVmcs
600
601
602;/**
603; * Executes VMPTRLD.
604; *
605; * @returns VBox status code.
606; * @param HCPhysVmcs Physical address of VMCS structure.
607; */
608;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
609ALIGNCODE(16)
610BEGINPROC VMXActivateVmcs
611%ifdef RT_ARCH_AMD64
612 xor rax, rax
613 %ifdef ASM_CALL64_GCC
614 push rdi
615 %else
616 push rcx
617 %endif
618 vmptrld [rsp]
619%else
620 xor eax, eax
621 vmptrld [esp + 4]
622%endif
623 jnc .the_end
624 mov eax, VERR_VMX_INVALID_VMCS_PTR
625.the_end:
626%ifdef RT_ARCH_AMD64
627 add rsp, 8
628%endif
629 ret
630ENDPROC VMXActivateVmcs
631
632
633;/**
634; * Executes VMPTRST.
635; *
636; * @returns VBox status code.
637; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
638; */
639;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
640BEGINPROC VMXGetActivatedVmcs
641%ifdef RT_OS_OS2
642 mov eax, VERR_NOT_SUPPORTED
643 ret
644%else
645 %ifdef RT_ARCH_AMD64
646 %ifdef ASM_CALL64_GCC
647 vmptrst qword [rdi]
648 %else
649 vmptrst qword [rcx]
650 %endif
651 %else
652 vmptrst qword [esp+04h]
653 %endif
654 xor eax, eax
655.the_end:
656 ret
657%endif
658ENDPROC VMXGetActivatedVmcs
659
660;/**
661; * Invalidate a page using INVEPT.
662; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
663; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
664; */
665;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
666BEGINPROC VMXR0InvEPT
667%ifdef RT_ARCH_AMD64
668 %ifdef ASM_CALL64_GCC
669 and edi, 0ffffffffh
670 xor rax, rax
671; invept rdi, qword [rsi]
672 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
673 %else
674 and ecx, 0ffffffffh
675 xor rax, rax
676; invept rcx, qword [rdx]
677 DB 0x66, 0x0F, 0x38, 0x80, 0xA
678 %endif
679%else
680 mov ecx, [esp + 4]
681 mov edx, [esp + 8]
682 xor eax, eax
683; invept ecx, qword [edx]
684 DB 0x66, 0x0F, 0x38, 0x80, 0xA
685%endif
686 jnc .valid_vmcs
687 mov eax, VERR_VMX_INVALID_VMCS_PTR
688 ret
689.valid_vmcs:
690 jnz .the_end
691 mov eax, VERR_INVALID_PARAMETER
692.the_end:
693 ret
694ENDPROC VMXR0InvEPT
695
696
697;/**
698; * Invalidate a page using invvpid
699; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
700; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
701; */
702;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
703BEGINPROC VMXR0InvVPID
704%ifdef RT_ARCH_AMD64
705 %ifdef ASM_CALL64_GCC
706 and edi, 0ffffffffh
707 xor rax, rax
708; invvpid rdi, qword [rsi]
709 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
710 %else
711 and ecx, 0ffffffffh
712 xor rax, rax
713; invvpid rcx, qword [rdx]
714 DB 0x66, 0x0F, 0x38, 0x81, 0xA
715 %endif
716%else
717 mov ecx, [esp + 4]
718 mov edx, [esp + 8]
719 xor eax, eax
720; invvpid ecx, qword [edx]
721 DB 0x66, 0x0F, 0x38, 0x81, 0xA
722%endif
723 jnc .valid_vmcs
724 mov eax, VERR_VMX_INVALID_VMCS_PTR
725 ret
726.valid_vmcs:
727 jnz .the_end
728 mov eax, VERR_INVALID_PARAMETER
729.the_end:
730 ret
731ENDPROC VMXR0InvVPID
732
733
734%if GC_ARCH_BITS == 64
735;;
736; Executes INVLPGA
737;
738; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
739; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
740;
741;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
742BEGINPROC SVMR0InvlpgA
743%ifdef RT_ARCH_AMD64
744 %ifdef ASM_CALL64_GCC
745 mov rax, rdi
746 mov rcx, rsi
747 %else
748 mov rax, rcx
749 mov rcx, rdx
750 %endif
751%else
752 mov eax, [esp + 4]
753 mov ecx, [esp + 0Ch]
754%endif
755 invlpga [xAX], ecx
756 ret
757ENDPROC SVMR0InvlpgA
758
759%else ; GC_ARCH_BITS != 64
760;;
761; Executes INVLPGA
762;
763; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
764; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
765;
766;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
767BEGINPROC SVMR0InvlpgA
768%ifdef RT_ARCH_AMD64
769 %ifdef ASM_CALL64_GCC
770 movzx rax, edi
771 mov ecx, esi
772 %else
773 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
774 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
775 ; values also set the upper 32 bits of the register to zero. Consequently
776 ; there is no need for an instruction movzlq.''
777 mov eax, ecx
778 mov ecx, edx
779 %endif
780%else
781 mov eax, [esp + 4]
782 mov ecx, [esp + 8]
783%endif
784 invlpga [xAX], ecx
785 ret
786ENDPROC SVMR0InvlpgA
787
788%endif ; GC_ARCH_BITS != 64
789
790
791%ifdef VBOX_WITH_KERNEL_USING_XMM
792
793;;
794; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
795; load the guest ones when necessary.
796;
797; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
798;
799; @returns eax
800;
801; @param fResumeVM msc:rcx
802; @param pCtx msc:rdx
803; @param pVMCSCache msc:r8
804; @param pVM msc:r9
805; @param pVCpu msc:[rbp+30h]
806; @param pfnStartVM msc:[rbp+38h]
807;
808; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
809;
810; ASSUMING 64-bit and windows for now.
811ALIGNCODE(16)
812BEGINPROC HMR0VMXStartVMWrapXMM
813 push xBP
814 mov xBP, xSP
815 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
816
817 ; spill input parameters.
818 mov [xBP + 010h], rcx ; fResumeVM
819 mov [xBP + 018h], rdx ; pCtx
820 mov [xBP + 020h], r8 ; pVMCSCache
821 mov [xBP + 028h], r9 ; pVM
822
823 ; Ask CPUM whether we've started using the FPU yet.
824 mov rcx, [xBP + 30h] ; pVCpu
825 call NAME(CPUMIsGuestFPUStateActive)
826 test al, al
827 jnz .guest_fpu_state_active
828
829 ; No need to mess with XMM registers just call the start routine and return.
830 mov r11, [xBP + 38h] ; pfnStartVM
831 mov r10, [xBP + 30h] ; pVCpu
832 mov [xSP + 020h], r10
833 mov rcx, [xBP + 010h] ; fResumeVM
834 mov rdx, [xBP + 018h] ; pCtx
835 mov r8, [xBP + 020h] ; pVMCSCache
836 mov r9, [xBP + 028h] ; pVM
837 call r11
838
839 leave
840 ret
841
842ALIGNCODE(8)
843.guest_fpu_state_active:
844 ; Save the non-volatile host XMM registers.
845 movdqa [rsp + 040h + 000h], xmm6
846 movdqa [rsp + 040h + 010h], xmm7
847 movdqa [rsp + 040h + 020h], xmm8
848 movdqa [rsp + 040h + 030h], xmm9
849 movdqa [rsp + 040h + 040h], xmm10
850 movdqa [rsp + 040h + 050h], xmm11
851 movdqa [rsp + 040h + 060h], xmm12
852 movdqa [rsp + 040h + 070h], xmm13
853 movdqa [rsp + 040h + 080h], xmm14
854 movdqa [rsp + 040h + 090h], xmm15
855
856 mov r10, [xBP + 018h] ; pCtx
857 mov eax, [r10 + CPUMCTX.fXStateMask]
858 test eax, eax
859 jz .guest_fpu_state_manually
860
861 ;
862 ; Using XSAVE to load the guest XMM, YMM and ZMM registers.
863 ;
864 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
865 xor edx, edx
866 mov r10, [r10 + CPUMCTX.pXStateR0]
867 xrstor [r10]
868
869 ; Make the call (same as in the other case ).
870 mov r11, [xBP + 38h] ; pfnStartVM
871 mov r10, [xBP + 30h] ; pVCpu
872 mov [xSP + 020h], r10
873 mov rcx, [xBP + 010h] ; fResumeVM
874 mov rdx, [xBP + 018h] ; pCtx
875 mov r8, [xBP + 020h] ; pVMCSCache
876 mov r9, [xBP + 028h] ; pVM
877 call r11
878
879 mov r11d, eax ; save return value (xsave below uses eax)
880
881 ; Save the guest XMM registers.
882 mov r10, [xBP + 018h] ; pCtx
883 mov eax, [r10 + CPUMCTX.fXStateMask]
884 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
885 xor edx, edx
886 mov r10, [r10 + CPUMCTX.pXStateR0]
887 xsave [r10]
888
889 mov eax, r11d ; restore return value.
890
891.restore_non_volatile_host_xmm_regs:
892 ; Load the non-volatile host XMM registers.
893 movdqa xmm6, [rsp + 040h + 000h]
894 movdqa xmm7, [rsp + 040h + 010h]
895 movdqa xmm8, [rsp + 040h + 020h]
896 movdqa xmm9, [rsp + 040h + 030h]
897 movdqa xmm10, [rsp + 040h + 040h]
898 movdqa xmm11, [rsp + 040h + 050h]
899 movdqa xmm12, [rsp + 040h + 060h]
900 movdqa xmm13, [rsp + 040h + 070h]
901 movdqa xmm14, [rsp + 040h + 080h]
902 movdqa xmm15, [rsp + 040h + 090h]
903 leave
904 ret
905
906 ;
907 ; No XSAVE, load and save the guest XMM registers manually.
908 ;
909.guest_fpu_state_manually:
910 ; Load the full guest XMM register state.
911 mov r10, [r10 + CPUMCTX.pXStateR0]
912 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
913 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
914 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
915 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
916 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
917 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
918 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
919 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
920 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
921 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
922 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
923 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
924 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
925 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
926 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
927 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
928
929 ; Make the call (same as in the other case ).
930 mov r11, [xBP + 38h] ; pfnStartVM
931 mov r10, [xBP + 30h] ; pVCpu
932 mov [xSP + 020h], r10
933 mov rcx, [xBP + 010h] ; fResumeVM
934 mov rdx, [xBP + 018h] ; pCtx
935 mov r8, [xBP + 020h] ; pVMCSCache
936 mov r9, [xBP + 028h] ; pVM
937 call r11
938
939 ; Save the guest XMM registers.
940 mov r10, [xBP + 018h] ; pCtx
941 mov r10, [r10 + CPUMCTX.pXStateR0]
942 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
943 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
944 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
945 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
946 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
947 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
948 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
949 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
950 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
951 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
952 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
953 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
954 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
955 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
956 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
957 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
958 jmp .restore_non_volatile_host_xmm_regs
959ENDPROC HMR0VMXStartVMWrapXMM
960
961;;
962; Wrapper around svm.pfnVMRun that preserves host XMM registers and
963; load the guest ones when necessary.
964;
965; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
966;
967; @returns eax
968;
969; @param pVMCBHostPhys msc:rcx
970; @param pVMCBPhys msc:rdx
971; @param pCtx msc:r8
972; @param pVM msc:r9
973; @param pVCpu msc:[rbp+30h]
974; @param pfnVMRun msc:[rbp+38h]
975;
976; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
977;
978; ASSUMING 64-bit and windows for now.
979ALIGNCODE(16)
980BEGINPROC HMR0SVMRunWrapXMM
981 push xBP
982 mov xBP, xSP
983 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
984
985 ; spill input parameters.
986 mov [xBP + 010h], rcx ; pVMCBHostPhys
987 mov [xBP + 018h], rdx ; pVMCBPhys
988 mov [xBP + 020h], r8 ; pCtx
989 mov [xBP + 028h], r9 ; pVM
990
991 ; Ask CPUM whether we've started using the FPU yet.
992 mov rcx, [xBP + 30h] ; pVCpu
993 call NAME(CPUMIsGuestFPUStateActive)
994 test al, al
995 jnz .guest_fpu_state_active
996
997 ; No need to mess with XMM registers just call the start routine and return.
998 mov r11, [xBP + 38h] ; pfnVMRun
999 mov r10, [xBP + 30h] ; pVCpu
1000 mov [xSP + 020h], r10
1001 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1002 mov rdx, [xBP + 018h] ; pVMCBPhys
1003 mov r8, [xBP + 020h] ; pCtx
1004 mov r9, [xBP + 028h] ; pVM
1005 call r11
1006
1007 leave
1008 ret
1009
1010ALIGNCODE(8)
1011.guest_fpu_state_active:
1012 ; Save the non-volatile host XMM registers.
1013 movdqa [rsp + 040h + 000h], xmm6
1014 movdqa [rsp + 040h + 010h], xmm7
1015 movdqa [rsp + 040h + 020h], xmm8
1016 movdqa [rsp + 040h + 030h], xmm9
1017 movdqa [rsp + 040h + 040h], xmm10
1018 movdqa [rsp + 040h + 050h], xmm11
1019 movdqa [rsp + 040h + 060h], xmm12
1020 movdqa [rsp + 040h + 070h], xmm13
1021 movdqa [rsp + 040h + 080h], xmm14
1022 movdqa [rsp + 040h + 090h], xmm15
1023
1024 mov r10, [xBP + 020h] ; pCtx
1025 mov eax, [r10 + CPUMCTX.fXStateMask]
1026 test eax, eax
1027 jz .guest_fpu_state_manually
1028
1029 ;
1030 ; Using XSAVE.
1031 ;
1032 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1033 xor edx, edx
1034 mov r10, [r10 + CPUMCTX.pXStateR0]
1035 xrstor [r10]
1036
1037 ; Make the call (same as in the other case ).
1038 mov r11, [xBP + 38h] ; pfnVMRun
1039 mov r10, [xBP + 30h] ; pVCpu
1040 mov [xSP + 020h], r10
1041 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1042 mov rdx, [xBP + 018h] ; pVMCBPhys
1043 mov r8, [xBP + 020h] ; pCtx
1044 mov r9, [xBP + 028h] ; pVM
1045 call r11
1046
1047 mov r11d, eax ; save return value (xsave below uses eax)
1048
1049 ; Save the guest XMM registers.
1050 mov r10, [xBP + 020h] ; pCtx
1051 mov eax, [r10 + CPUMCTX.fXStateMask]
1052 and eax, CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS
1053 xor edx, edx
1054 mov r10, [r10 + CPUMCTX.pXStateR0]
1055 xsave [r10]
1056
1057 mov eax, r11d ; restore return value.
1058
1059.restore_non_volatile_host_xmm_regs:
1060 ; Load the non-volatile host XMM registers.
1061 movdqa xmm6, [rsp + 040h + 000h]
1062 movdqa xmm7, [rsp + 040h + 010h]
1063 movdqa xmm8, [rsp + 040h + 020h]
1064 movdqa xmm9, [rsp + 040h + 030h]
1065 movdqa xmm10, [rsp + 040h + 040h]
1066 movdqa xmm11, [rsp + 040h + 050h]
1067 movdqa xmm12, [rsp + 040h + 060h]
1068 movdqa xmm13, [rsp + 040h + 070h]
1069 movdqa xmm14, [rsp + 040h + 080h]
1070 movdqa xmm15, [rsp + 040h + 090h]
1071 leave
1072 ret
1073
1074 ;
1075 ; No XSAVE, load and save the guest XMM registers manually.
1076 ;
1077.guest_fpu_state_manually:
1078 ; Load the full guest XMM register state.
1079 mov r10, [r10 + CPUMCTX.pXStateR0]
1080 movdqa xmm0, [r10 + XMM_OFF_IN_X86FXSTATE + 000h]
1081 movdqa xmm1, [r10 + XMM_OFF_IN_X86FXSTATE + 010h]
1082 movdqa xmm2, [r10 + XMM_OFF_IN_X86FXSTATE + 020h]
1083 movdqa xmm3, [r10 + XMM_OFF_IN_X86FXSTATE + 030h]
1084 movdqa xmm4, [r10 + XMM_OFF_IN_X86FXSTATE + 040h]
1085 movdqa xmm5, [r10 + XMM_OFF_IN_X86FXSTATE + 050h]
1086 movdqa xmm6, [r10 + XMM_OFF_IN_X86FXSTATE + 060h]
1087 movdqa xmm7, [r10 + XMM_OFF_IN_X86FXSTATE + 070h]
1088 movdqa xmm8, [r10 + XMM_OFF_IN_X86FXSTATE + 080h]
1089 movdqa xmm9, [r10 + XMM_OFF_IN_X86FXSTATE + 090h]
1090 movdqa xmm10, [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h]
1091 movdqa xmm11, [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h]
1092 movdqa xmm12, [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h]
1093 movdqa xmm13, [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h]
1094 movdqa xmm14, [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h]
1095 movdqa xmm15, [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h]
1096
1097 ; Make the call (same as in the other case ).
1098 mov r11, [xBP + 38h] ; pfnVMRun
1099 mov r10, [xBP + 30h] ; pVCpu
1100 mov [xSP + 020h], r10
1101 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1102 mov rdx, [xBP + 018h] ; pVMCBPhys
1103 mov r8, [xBP + 020h] ; pCtx
1104 mov r9, [xBP + 028h] ; pVM
1105 call r11
1106
1107 ; Save the guest XMM registers.
1108 mov r10, [xBP + 020h] ; pCtx
1109 mov r10, [r10 + CPUMCTX.pXStateR0]
1110 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 000h], xmm0
1111 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 010h], xmm1
1112 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 020h], xmm2
1113 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 030h], xmm3
1114 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 040h], xmm4
1115 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 050h], xmm5
1116 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 060h], xmm6
1117 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 070h], xmm7
1118 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 080h], xmm8
1119 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 090h], xmm9
1120 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0a0h], xmm10
1121 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0b0h], xmm11
1122 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0c0h], xmm12
1123 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0d0h], xmm13
1124 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0e0h], xmm14
1125 movdqa [r10 + XMM_OFF_IN_X86FXSTATE + 0f0h], xmm15
1126 jmp .restore_non_volatile_host_xmm_regs
1127ENDPROC HMR0SVMRunWrapXMM
1128
1129%endif ; VBOX_WITH_KERNEL_USING_XMM
1130
1131
1132;; @def RESTORE_STATE_VM32
1133; Macro restoring essential host state and updating guest state
1134; for common host, 32-bit guest for VT-x.
1135%macro RESTORE_STATE_VM32 0
1136 ; Restore base and limit of the IDTR & GDTR.
1137 %ifndef VMX_SKIP_IDTR
1138 lidt [xSP]
1139 add xSP, xCB * 2
1140 %endif
1141 %ifndef VMX_SKIP_GDTR
1142 lgdt [xSP]
1143 add xSP, xCB * 2
1144 %endif
1145
1146 push xDI
1147 %ifndef VMX_SKIP_TR
1148 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
1149 %else
1150 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
1151 %endif
1152
1153 mov [ss:xDI + CPUMCTX.eax], eax
1154 mov [ss:xDI + CPUMCTX.ebx], ebx
1155 mov [ss:xDI + CPUMCTX.ecx], ecx
1156 mov [ss:xDI + CPUMCTX.edx], edx
1157 mov [ss:xDI + CPUMCTX.esi], esi
1158 mov [ss:xDI + CPUMCTX.ebp], ebp
1159 mov xAX, cr2
1160 mov [ss:xDI + CPUMCTX.cr2], xAX
1161
1162 %ifdef RT_ARCH_AMD64
1163 pop xAX ; The guest edi we pushed above.
1164 mov dword [ss:xDI + CPUMCTX.edi], eax
1165 %else
1166 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
1167 %endif
1168
1169 %ifndef VMX_SKIP_TR
1170 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1171 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1172 ; @todo get rid of sgdt
1173 pop xBX ; Saved TR
1174 sub xSP, xCB * 2
1175 sgdt [xSP]
1176 mov xAX, xBX
1177 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1178 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1179 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1180 ltr bx
1181 add xSP, xCB * 2
1182 %endif
1183
1184 pop xAX ; Saved LDTR
1185 %ifdef RT_ARCH_AMD64
1186 cmp eax, 0
1187 je %%skip_ldt_write32
1188 %endif
1189 lldt ax
1190
1191%%skip_ldt_write32:
1192 add xSP, xCB ; pCtx
1193
1194 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1195 pop xDX ; Saved pCache
1196
1197 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1198 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1199 ; trouble only just less efficient.
1200 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
1201 cmp ecx, 0 ; Can't happen
1202 je %%no_cached_read32
1203 jmp %%cached_read32
1204
1205ALIGN(16)
1206%%cached_read32:
1207 dec xCX
1208 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
1209 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1210 cmp xCX, 0
1211 jnz %%cached_read32
1212%%no_cached_read32:
1213 %endif
1214
1215 ; Restore segment registers.
1216 MYPOPSEGS xAX, ax
1217
1218 ; Restore the host XCR0 if necessary.
1219 pop xCX
1220 test ecx, ecx
1221 jnz %%xcr0_after_skip
1222 pop xAX
1223 pop xDX
1224 xsetbv ; ecx is already zero.
1225%%xcr0_after_skip:
1226
1227 ; Restore general purpose registers.
1228 MYPOPAD
1229%endmacro
1230
1231
1232;;
1233; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
1234;
1235; @returns VBox status code
1236; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
1237; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
1238; @param pCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
1239; @param pVM x86:[ebp+14],msc:r9, gcc:rcx Pointer to the cross context VM structure.
1240; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 Pointer to the cross context VMCPU structure.
1241;
1242ALIGNCODE(16)
1243BEGINPROC VMXR0StartVM32
1244 push xBP
1245 mov xBP, xSP
1246
1247 pushf
1248 cli
1249
1250 ;
1251 ; Save all general purpose host registers.
1252 ;
1253 MYPUSHAD
1254
1255 ;
1256 ; First we have to write some final guest CPU context registers.
1257 ;
1258 mov eax, VMX_VMCS_HOST_RIP
1259%ifdef RT_ARCH_AMD64
1260 lea r10, [.vmlaunch_done wrt rip]
1261 vmwrite rax, r10
1262%else
1263 mov ecx, .vmlaunch_done
1264 vmwrite eax, ecx
1265%endif
1266 ; Note: assumes success!
1267
1268 ;
1269 ; Unify input parameter registers.
1270 ;
1271%ifdef RT_ARCH_AMD64
1272 %ifdef ASM_CALL64_GCC
1273 ; fResume already in rdi
1274 ; pCtx already in rsi
1275 mov rbx, rdx ; pCache
1276 %else
1277 mov rdi, rcx ; fResume
1278 mov rsi, rdx ; pCtx
1279 mov rbx, r8 ; pCache
1280 %endif
1281%else
1282 mov edi, [ebp + 8] ; fResume
1283 mov esi, [ebp + 12] ; pCtx
1284 mov ebx, [ebp + 16] ; pCache
1285%endif
1286
1287 ;
1288 ; Save the host XCR0 and load the guest one if necessary.
1289 ; Note! Trashes rdx and rcx.
1290 ;
1291%ifdef ASM_CALL64_MSC
1292 mov rax, [xBP + 30h] ; pVCpu
1293%elifdef ASM_CALL64_GCC
1294 mov rax, r8 ; pVCpu
1295%else
1296 mov eax, [xBP + 18h] ; pVCpu
1297%endif
1298 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1299 jz .xcr0_before_skip
1300
1301 xor ecx, ecx
1302 xgetbv ; Save the host one on the stack.
1303 push xDX
1304 push xAX
1305
1306 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1307 mov edx, [xSI + CPUMCTX.aXcr + 4]
1308 xor ecx, ecx ; paranoia
1309 xsetbv
1310
1311 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1312 jmp .xcr0_before_done
1313
1314.xcr0_before_skip:
1315 push 3fh ; indicate that we need not.
1316.xcr0_before_done:
1317
1318 ;
1319 ; Save segment registers.
1320 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1321 ;
1322 MYPUSHSEGS xAX, ax
1323
1324%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1325 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
1326 cmp ecx, 0
1327 je .no_cached_writes
1328 mov edx, ecx
1329 mov ecx, 0
1330 jmp .cached_write
1331
1332ALIGN(16)
1333.cached_write:
1334 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
1335 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
1336 inc xCX
1337 cmp xCX, xDX
1338 jl .cached_write
1339
1340 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
1341.no_cached_writes:
1342
1343 ; Save the pCache pointer.
1344 push xBX
1345%endif
1346
1347 ; Save the pCtx pointer.
1348 push xSI
1349
1350 ; Save host LDTR.
1351 xor eax, eax
1352 sldt ax
1353 push xAX
1354
1355%ifndef VMX_SKIP_TR
1356 ; The host TR limit is reset to 0x67; save & restore it manually.
1357 str eax
1358 push xAX
1359%endif
1360
1361%ifndef VMX_SKIP_GDTR
1362 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1363 sub xSP, xCB * 2
1364 sgdt [xSP]
1365%endif
1366%ifndef VMX_SKIP_IDTR
1367 sub xSP, xCB * 2
1368 sidt [xSP]
1369%endif
1370
1371 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1372 mov xBX, [xSI + CPUMCTX.cr2]
1373 mov xDX, cr2
1374 cmp xBX, xDX
1375 je .skip_cr2_write32
1376 mov cr2, xBX
1377
1378.skip_cr2_write32:
1379 mov eax, VMX_VMCS_HOST_RSP
1380 vmwrite xAX, xSP
1381 ; Note: assumes success!
1382 ; Don't mess with ESP anymore!!!
1383
1384 ; Load guest general purpose registers.
1385 mov eax, [xSI + CPUMCTX.eax]
1386 mov ebx, [xSI + CPUMCTX.ebx]
1387 mov ecx, [xSI + CPUMCTX.ecx]
1388 mov edx, [xSI + CPUMCTX.edx]
1389 mov ebp, [xSI + CPUMCTX.ebp]
1390
1391 ; Resume or start VM?
1392 cmp xDI, 0 ; fResume
1393 je .vmlaunch_launch
1394
1395 ; Load guest edi & esi.
1396 mov edi, [xSI + CPUMCTX.edi]
1397 mov esi, [xSI + CPUMCTX.esi]
1398
1399 vmresume
1400 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
1401
1402.vmlaunch_launch:
1403 ; Save guest edi & esi.
1404 mov edi, [xSI + CPUMCTX.edi]
1405 mov esi, [xSI + CPUMCTX.esi]
1406
1407 vmlaunch
1408 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
1409
1410ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
1411.vmlaunch_done:
1412 jc near .vmxstart_invalid_vmcs_ptr
1413 jz near .vmxstart_start_failed
1414
1415 RESTORE_STATE_VM32
1416 mov eax, VINF_SUCCESS
1417
1418.vmstart_end:
1419 popf
1420 pop xBP
1421 ret
1422
1423.vmxstart_invalid_vmcs_ptr:
1424 RESTORE_STATE_VM32
1425 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1426 jmp .vmstart_end
1427
1428.vmxstart_start_failed:
1429 RESTORE_STATE_VM32
1430 mov eax, VERR_VMX_UNABLE_TO_START_VM
1431 jmp .vmstart_end
1432
1433ENDPROC VMXR0StartVM32
1434
1435
1436%ifdef RT_ARCH_AMD64
1437;; @def RESTORE_STATE_VM64
1438; Macro restoring essential host state and updating guest state
1439; for 64-bit host, 64-bit guest for VT-x.
1440;
1441%macro RESTORE_STATE_VM64 0
1442 ; Restore base and limit of the IDTR & GDTR
1443 %ifndef VMX_SKIP_IDTR
1444 lidt [xSP]
1445 add xSP, xCB * 2
1446 %endif
1447 %ifndef VMX_SKIP_GDTR
1448 lgdt [xSP]
1449 add xSP, xCB * 2
1450 %endif
1451
1452 push xDI
1453 %ifndef VMX_SKIP_TR
1454 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
1455 %else
1456 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
1457 %endif
1458
1459 mov qword [xDI + CPUMCTX.eax], rax
1460 mov qword [xDI + CPUMCTX.ebx], rbx
1461 mov qword [xDI + CPUMCTX.ecx], rcx
1462 mov qword [xDI + CPUMCTX.edx], rdx
1463 mov qword [xDI + CPUMCTX.esi], rsi
1464 mov qword [xDI + CPUMCTX.ebp], rbp
1465 mov qword [xDI + CPUMCTX.r8], r8
1466 mov qword [xDI + CPUMCTX.r9], r9
1467 mov qword [xDI + CPUMCTX.r10], r10
1468 mov qword [xDI + CPUMCTX.r11], r11
1469 mov qword [xDI + CPUMCTX.r12], r12
1470 mov qword [xDI + CPUMCTX.r13], r13
1471 mov qword [xDI + CPUMCTX.r14], r14
1472 mov qword [xDI + CPUMCTX.r15], r15
1473 mov rax, cr2
1474 mov qword [xDI + CPUMCTX.cr2], rax
1475
1476 pop xAX ; The guest rdi we pushed above
1477 mov qword [xDI + CPUMCTX.edi], rax
1478
1479 %ifndef VMX_SKIP_TR
1480 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1481 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
1482 ; @todo get rid of sgdt
1483 pop xBX ; Saved TR
1484 sub xSP, xCB * 2
1485 sgdt [xSP]
1486 mov xAX, xBX
1487 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
1488 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
1489 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
1490 ltr bx
1491 add xSP, xCB * 2
1492 %endif
1493
1494 pop xAX ; Saved LDTR
1495 cmp eax, 0
1496 je %%skip_ldt_write64
1497 lldt ax
1498
1499%%skip_ldt_write64:
1500 pop xSI ; pCtx (needed in rsi by the macros below)
1501
1502 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
1503 pop xDX ; Saved pCache
1504
1505 ; Note! If we get here as a result of invalid VMCS pointer, all the following
1506 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
1507 ; trouble only just less efficient.
1508 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
1509 cmp ecx, 0 ; Can't happen
1510 je %%no_cached_read64
1511 jmp %%cached_read64
1512
1513ALIGN(16)
1514%%cached_read64:
1515 dec xCX
1516 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
1517 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
1518 cmp xCX, 0
1519 jnz %%cached_read64
1520%%no_cached_read64:
1521 %endif
1522
1523 ; Restore segment registers.
1524 MYPOPSEGS xAX, ax
1525
1526 ; Restore the host XCR0 if necessary.
1527 pop xCX
1528 test ecx, ecx
1529 jnz %%xcr0_after_skip
1530 pop xAX
1531 pop xDX
1532 xsetbv ; ecx is already zero.
1533%%xcr0_after_skip:
1534
1535 ; Restore general purpose registers.
1536 MYPOPAD
1537%endmacro
1538
1539
1540;;
1541; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
1542;
1543; @returns VBox status code
1544; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
1545; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
1546; @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache.
1547; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure.
1548; @param pVCpu msc:[ebp+30], gcc:r8 Pointer to the cross context VMCPU structure.
1549;
1550ALIGNCODE(16)
1551BEGINPROC VMXR0StartVM64
1552 push xBP
1553 mov xBP, xSP
1554
1555 pushf
1556 cli
1557
1558 ; Save all general purpose host registers.
1559 MYPUSHAD
1560
1561 ; First we have to save some final CPU context registers.
1562 lea r10, [.vmlaunch64_done wrt rip]
1563 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
1564 vmwrite rax, r10
1565 ; Note: assumes success!
1566
1567 ;
1568 ; Unify the input parameter registers.
1569 ;
1570%ifdef ASM_CALL64_GCC
1571 ; fResume already in rdi
1572 ; pCtx already in rsi
1573 mov rbx, rdx ; pCache
1574%else
1575 mov rdi, rcx ; fResume
1576 mov rsi, rdx ; pCtx
1577 mov rbx, r8 ; pCache
1578%endif
1579
1580 ;
1581 ; Save the host XCR0 and load the guest one if necessary.
1582 ; Note! Trashes rdx and rcx.
1583 ;
1584%ifdef ASM_CALL64_MSC
1585 mov rax, [xBP + 30h] ; pVCpu
1586%else
1587 mov rax, r8 ; pVCpu
1588%endif
1589 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1590 jz .xcr0_before_skip
1591
1592 xor ecx, ecx
1593 xgetbv ; Save the host one on the stack.
1594 push xDX
1595 push xAX
1596
1597 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1598 mov edx, [xSI + CPUMCTX.aXcr + 4]
1599 xor ecx, ecx ; paranoia
1600 xsetbv
1601
1602 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1603 jmp .xcr0_before_done
1604
1605.xcr0_before_skip:
1606 push 3fh ; indicate that we need not.
1607.xcr0_before_done:
1608
1609 ;
1610 ; Save segment registers.
1611 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
1612 ;
1613 MYPUSHSEGS xAX, ax
1614
1615%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1616 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
1617 cmp ecx, 0
1618 je .no_cached_writes
1619 mov edx, ecx
1620 mov ecx, 0
1621 jmp .cached_write
1622
1623ALIGN(16)
1624.cached_write:
1625 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
1626 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
1627 inc xCX
1628 cmp xCX, xDX
1629 jl .cached_write
1630
1631 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
1632.no_cached_writes:
1633
1634 ; Save the pCache pointer.
1635 push xBX
1636%endif
1637
1638 ; Save the pCtx pointer.
1639 push xSI
1640
1641 ; Save host LDTR.
1642 xor eax, eax
1643 sldt ax
1644 push xAX
1645
1646%ifndef VMX_SKIP_TR
1647 ; The host TR limit is reset to 0x67; save & restore it manually.
1648 str eax
1649 push xAX
1650%endif
1651
1652%ifndef VMX_SKIP_GDTR
1653 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
1654 sub xSP, xCB * 2
1655 sgdt [xSP]
1656%endif
1657%ifndef VMX_SKIP_IDTR
1658 sub xSP, xCB * 2
1659 sidt [xSP]
1660%endif
1661
1662 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1663 mov rbx, qword [xSI + CPUMCTX.cr2]
1664 mov rdx, cr2
1665 cmp rbx, rdx
1666 je .skip_cr2_write
1667 mov cr2, rbx
1668
1669.skip_cr2_write:
1670 mov eax, VMX_VMCS_HOST_RSP
1671 vmwrite xAX, xSP
1672 ; Note: assumes success!
1673 ; Don't mess with ESP anymore!!!
1674
1675 ; Load guest general purpose registers.
1676 mov rax, qword [xSI + CPUMCTX.eax]
1677 mov rbx, qword [xSI + CPUMCTX.ebx]
1678 mov rcx, qword [xSI + CPUMCTX.ecx]
1679 mov rdx, qword [xSI + CPUMCTX.edx]
1680 mov rbp, qword [xSI + CPUMCTX.ebp]
1681 mov r8, qword [xSI + CPUMCTX.r8]
1682 mov r9, qword [xSI + CPUMCTX.r9]
1683 mov r10, qword [xSI + CPUMCTX.r10]
1684 mov r11, qword [xSI + CPUMCTX.r11]
1685 mov r12, qword [xSI + CPUMCTX.r12]
1686 mov r13, qword [xSI + CPUMCTX.r13]
1687 mov r14, qword [xSI + CPUMCTX.r14]
1688 mov r15, qword [xSI + CPUMCTX.r15]
1689
1690 ; Resume or start VM?
1691 cmp xDI, 0 ; fResume
1692 je .vmlaunch64_launch
1693
1694 ; Load guest rdi & rsi.
1695 mov rdi, qword [xSI + CPUMCTX.edi]
1696 mov rsi, qword [xSI + CPUMCTX.esi]
1697
1698 vmresume
1699 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
1700
1701.vmlaunch64_launch:
1702 ; Save guest rdi & rsi.
1703 mov rdi, qword [xSI + CPUMCTX.edi]
1704 mov rsi, qword [xSI + CPUMCTX.esi]
1705
1706 vmlaunch
1707 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1708
1709ALIGNCODE(16)
1710.vmlaunch64_done:
1711 jc near .vmxstart64_invalid_vmcs_ptr
1712 jz near .vmxstart64_start_failed
1713
1714 RESTORE_STATE_VM64
1715 mov eax, VINF_SUCCESS
1716
1717.vmstart64_end:
1718 popf
1719 pop xBP
1720 ret
1721
1722.vmxstart64_invalid_vmcs_ptr:
1723 RESTORE_STATE_VM64
1724 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1725 jmp .vmstart64_end
1726
1727.vmxstart64_start_failed:
1728 RESTORE_STATE_VM64
1729 mov eax, VERR_VMX_UNABLE_TO_START_VM
1730 jmp .vmstart64_end
1731ENDPROC VMXR0StartVM64
1732%endif ; RT_ARCH_AMD64
1733
1734
1735;;
1736; Prepares for and executes VMRUN (32 bits guests)
1737;
1738; @returns VBox status code
1739; @param HCPhysVMCB Physical address of host VMCB.
1740; @param HCPhysVMCB Physical address of guest VMCB.
1741; @param pCtx Pointer to the guest CPU-context.
1742; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure.
1743; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure.
1744;
1745ALIGNCODE(16)
1746BEGINPROC SVMR0VMRun
1747%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
1748 %ifdef ASM_CALL64_GCC
1749 push r8
1750 push rcx
1751 push rdx
1752 push rsi
1753 push rdi
1754 %else
1755 mov rax, [rsp + 28h]
1756 push rax ; pVCpu
1757 push r9 ; pVM
1758 push r8 ; pCtx
1759 push rdx ; HCPHYSGuestVMCB
1760 push rcx ; HCPhysHostVMCB
1761 %endif
1762 push 0
1763%endif
1764 push xBP
1765 mov xBP, xSP
1766 pushf
1767
1768 ;
1769 ; Save all general purpose host registers.
1770 ;
1771 MYPUSHAD
1772
1773 ;
1774 ; Load pCtx into xSI.
1775 ;
1776 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1777
1778 ;
1779 ; Save the host XCR0 and load the guest one if necessary.
1780 ;
1781 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
1782 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1783 jz .xcr0_before_skip
1784
1785 xor ecx, ecx
1786 xgetbv ; Save the host one on the stack.
1787 push xDX
1788 push xAX
1789
1790 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1791 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1792 mov edx, [xSI + CPUMCTX.aXcr + 4]
1793 xor ecx, ecx ; paranoia
1794 xsetbv
1795
1796 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1797 jmp .xcr0_before_done
1798
1799.xcr0_before_skip:
1800 push 3fh ; indicate that we need not.
1801.xcr0_before_done:
1802
1803 ;
1804 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1805 ;
1806 push xSI
1807
1808 ; Save host fs, gs, sysenter msr etc.
1809 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1810 push xAX ; save for the vmload after vmrun
1811 vmsave
1812
1813 ; Setup eax for VMLOAD.
1814 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1815
1816 ; Load guest general purpose registers.
1817 ; eax is loaded from the VMCB by VMRUN.
1818 mov ebx, [xSI + CPUMCTX.ebx]
1819 mov ecx, [xSI + CPUMCTX.ecx]
1820 mov edx, [xSI + CPUMCTX.edx]
1821 mov edi, [xSI + CPUMCTX.edi]
1822 mov ebp, [xSI + CPUMCTX.ebp]
1823 mov esi, [xSI + CPUMCTX.esi]
1824
1825 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1826 clgi
1827 sti
1828
1829 ; Load guest fs, gs, sysenter msr etc.
1830 vmload
1831 ; Run the VM.
1832 vmrun
1833
1834 ; eax is in the VMCB already; we can use it here.
1835
1836 ; Save guest fs, gs, sysenter msr etc.
1837 vmsave
1838
1839 ; Load host fs, gs, sysenter msr etc.
1840 pop xAX ; Pushed above
1841 vmload
1842
1843 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1844 cli
1845 stgi
1846
1847 ;
1848 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
1849 ;
1850 pop xAX
1851
1852 mov [ss:xAX + CPUMCTX.ebx], ebx
1853 mov [ss:xAX + CPUMCTX.ecx], ecx
1854 mov [ss:xAX + CPUMCTX.edx], edx
1855 mov [ss:xAX + CPUMCTX.esi], esi
1856 mov [ss:xAX + CPUMCTX.edi], edi
1857 mov [ss:xAX + CPUMCTX.ebp], ebp
1858
1859 ;
1860 ; Restore the host xcr0 if necessary.
1861 ;
1862 pop xCX
1863 test ecx, ecx
1864 jnz .xcr0_after_skip
1865 pop xAX
1866 pop xDX
1867 xsetbv ; ecx is already zero.
1868.xcr0_after_skip:
1869
1870 ;
1871 ; Restore host general purpose registers.
1872 ;
1873 MYPOPAD
1874
1875 mov eax, VINF_SUCCESS
1876
1877 popf
1878 pop xBP
1879%ifdef RT_ARCH_AMD64
1880 add xSP, 6*xCB
1881%endif
1882 ret
1883ENDPROC SVMR0VMRun
1884
1885
1886%ifdef RT_ARCH_AMD64
1887;;
1888; Prepares for and executes VMRUN (64 bits guests)
1889;
1890; @returns VBox status code
1891; @param HCPhysVMCB Physical address of host VMCB.
1892; @param HCPhysVMCB Physical address of guest VMCB.
1893; @param pCtx Pointer to the guest-CPU context.
1894; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure.
1895; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure.
1896;
1897ALIGNCODE(16)
1898BEGINPROC SVMR0VMRun64
1899 ; Fake a cdecl stack frame
1900 %ifdef ASM_CALL64_GCC
1901 push r8
1902 push rcx
1903 push rdx
1904 push rsi
1905 push rdi
1906 %else
1907 mov rax, [rsp + 28h]
1908 push rax ; rbp + 30h pVCpu
1909 push r9 ; rbp + 28h pVM
1910 push r8 ; rbp + 20h pCtx
1911 push rdx ; rbp + 18h HCPHYSGuestVMCB
1912 push rcx ; rbp + 10h HCPhysHostVMCB
1913 %endif
1914 push 0 ; rbp + 08h "fake ret addr"
1915 push rbp ; rbp + 00h
1916 mov rbp, rsp
1917 pushf
1918
1919 ; Manual save and restore:
1920 ; - General purpose registers except RIP, RSP, RAX
1921 ;
1922 ; Trashed:
1923 ; - CR2 (we don't care)
1924 ; - LDTR (reset to 0)
1925 ; - DRx (presumably not changed at all)
1926 ; - DR7 (reset to 0x400)
1927 ;
1928
1929 ;
1930 ; Save all general purpose host registers.
1931 ;
1932 MYPUSHAD
1933
1934 ;
1935 ; Load pCtx into xSI.
1936 ;
1937 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
1938
1939 ;
1940 ; Save the host XCR0 and load the guest one if necessary.
1941 ;
1942 mov rax, [xBP + 30h] ; pVCpu
1943 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1944 jz .xcr0_before_skip
1945
1946 xor ecx, ecx
1947 xgetbv ; Save the host one on the stack.
1948 push xDX
1949 push xAX
1950
1951 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
1952 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1953 mov edx, [xSI + CPUMCTX.aXcr + 4]
1954 xor ecx, ecx ; paranoia
1955 xsetbv
1956
1957 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1958 jmp .xcr0_before_done
1959
1960.xcr0_before_skip:
1961 push 3fh ; indicate that we need not.
1962.xcr0_before_done:
1963
1964 ;
1965 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
1966 ;
1967 push rsi
1968
1969 ;
1970 ; Save host fs, gs, sysenter msr etc.
1971 ;
1972 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
1973 push rax ; Save for the vmload after vmrun
1974 vmsave
1975
1976 ; Setup eax for VMLOAD.
1977 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
1978
1979 ; Load guest general purpose registers.
1980 ; rax is loaded from the VMCB by VMRUN.
1981 mov rbx, qword [xSI + CPUMCTX.ebx]
1982 mov rcx, qword [xSI + CPUMCTX.ecx]
1983 mov rdx, qword [xSI + CPUMCTX.edx]
1984 mov rdi, qword [xSI + CPUMCTX.edi]
1985 mov rbp, qword [xSI + CPUMCTX.ebp]
1986 mov r8, qword [xSI + CPUMCTX.r8]
1987 mov r9, qword [xSI + CPUMCTX.r9]
1988 mov r10, qword [xSI + CPUMCTX.r10]
1989 mov r11, qword [xSI + CPUMCTX.r11]
1990 mov r12, qword [xSI + CPUMCTX.r12]
1991 mov r13, qword [xSI + CPUMCTX.r13]
1992 mov r14, qword [xSI + CPUMCTX.r14]
1993 mov r15, qword [xSI + CPUMCTX.r15]
1994 mov rsi, qword [xSI + CPUMCTX.esi]
1995
1996 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1997 clgi
1998 sti
1999
2000 ; Load guest fs, gs, sysenter msr etc.
2001 vmload
2002 ; Run the VM.
2003 vmrun
2004
2005 ; rax is in the VMCB already; we can use it here.
2006
2007 ; Save guest fs, gs, sysenter msr etc.
2008 vmsave
2009
2010 ;
2011 ; Load host fs, gs, sysenter msr etc.
2012 ;
2013 pop rax ; pushed above
2014 vmload
2015
2016 ;
2017 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
2018 ;
2019 cli
2020 stgi
2021
2022 ;
2023 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
2024 ;
2025 pop rax
2026
2027 mov qword [rax + CPUMCTX.ebx], rbx
2028 mov qword [rax + CPUMCTX.ecx], rcx
2029 mov qword [rax + CPUMCTX.edx], rdx
2030 mov qword [rax + CPUMCTX.esi], rsi
2031 mov qword [rax + CPUMCTX.edi], rdi
2032 mov qword [rax + CPUMCTX.ebp], rbp
2033 mov qword [rax + CPUMCTX.r8], r8
2034 mov qword [rax + CPUMCTX.r9], r9
2035 mov qword [rax + CPUMCTX.r10], r10
2036 mov qword [rax + CPUMCTX.r11], r11
2037 mov qword [rax + CPUMCTX.r12], r12
2038 mov qword [rax + CPUMCTX.r13], r13
2039 mov qword [rax + CPUMCTX.r14], r14
2040 mov qword [rax + CPUMCTX.r15], r15
2041
2042 ;
2043 ; Restore the host xcr0 if necessary.
2044 ;
2045 pop xCX
2046 test ecx, ecx
2047 jnz .xcr0_after_skip
2048 pop xAX
2049 pop xDX
2050 xsetbv ; ecx is already zero.
2051.xcr0_after_skip:
2052
2053 ;
2054 ; Restore host general purpose registers.
2055 ;
2056 MYPOPAD
2057
2058 mov eax, VINF_SUCCESS
2059
2060 popf
2061 pop rbp
2062 add rsp, 6 * xCB
2063 ret
2064ENDPROC SVMR0VMRun64
2065%endif ; RT_ARCH_AMD64
2066
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette