VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 26224

Last change on this file since 26224 was 25413, checked in by vboxsync, 15 years ago

Corrected return value in case of success

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.6 KB
Line 
1; $Id: HWACCMR0A.asm 25413 2009-12-15 15:58:50Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30%include "../HWACCMInternal.mac"
31
32%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
33 %macro vmwrite 2,
34 int3
35 %endmacro
36 %define vmlaunch int3
37 %define vmresume int3
38 %define vmsave int3
39 %define vmload int3
40 %define vmrun int3
41 %define clgi int3
42 %define stgi int3
43 %macro invlpga 2,
44 int3
45 %endmacro
46%endif
47
48;*******************************************************************************
49;* Defined Constants And Macros *
50;*******************************************************************************
51%ifdef RT_ARCH_AMD64
52 %define MAYBE_64_BIT
53%endif
54%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
55 %define MAYBE_64_BIT
56%else
57 %ifdef RT_OS_DARWIN
58 %ifdef RT_ARCH_AMD64
59 ;;
60 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
61 ; risk loading a stale LDT value or something invalid.
62 %define HWACCM_64_BIT_USE_NULL_SEL
63 %endif
64 %endif
65%endif
66
67;; The offset of the XMM registers in X86FXSTATE.
68; Use define because I'm too lazy to convert the struct.
69%define XMM_OFF_IN_X86FXSTATE 160
70
71
72;; This is too risky wrt. stability, performance and correctness.
73;%define VBOX_WITH_DR6_EXPERIMENT 1
74
75;; @def MYPUSHAD
76; Macro generating an equivalent to pushad
77
78;; @def MYPOPAD
79; Macro generating an equivalent to popad
80
81;; @def MYPUSHSEGS
82; Macro saving all segment registers on the stack.
83; @param 1 full width register name
84; @param 2 16-bit regsiter name for \a 1.
85
86;; @def MYPOPSEGS
87; Macro restoring all segment registers on the stack
88; @param 1 full width register name
89; @param 2 16-bit regsiter name for \a 1.
90
91%ifdef MAYBE_64_BIT
92 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
93 %macro LOADGUESTMSR 2
94 mov rcx, %1
95 rdmsr
96 push rdx
97 push rax
98 mov edx, dword [xSI + %2 + 4]
99 mov eax, dword [xSI + %2]
100 wrmsr
101 %endmacro
102
103 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
104 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
105 %macro LOADHOSTMSREX 2
106 mov rcx, %1
107 rdmsr
108 mov dword [xSI + %2], eax
109 mov dword [xSI + %2 + 4], edx
110 pop rax
111 pop rdx
112 wrmsr
113 %endmacro
114
115 ; Load the corresponding host MSR (trashes rdx & rcx)
116 %macro LOADHOSTMSR 1
117 mov rcx, %1
118 pop rax
119 pop rdx
120 wrmsr
121 %endmacro
122%endif
123
124%ifdef ASM_CALL64_GCC
125 %macro MYPUSHAD64 0
126 push r15
127 push r14
128 push r13
129 push r12
130 push rbx
131 %endmacro
132 %macro MYPOPAD64 0
133 pop rbx
134 pop r12
135 pop r13
136 pop r14
137 pop r15
138 %endmacro
139
140%else ; ASM_CALL64_MSC
141 %macro MYPUSHAD64 0
142 push r15
143 push r14
144 push r13
145 push r12
146 push rbx
147 push rsi
148 push rdi
149 %endmacro
150 %macro MYPOPAD64 0
151 pop rdi
152 pop rsi
153 pop rbx
154 pop r12
155 pop r13
156 pop r14
157 pop r15
158 %endmacro
159%endif
160
161; trashes, rax, rdx & rcx
162%macro MYPUSHSEGS64 2
163 %ifndef HWACCM_64_BIT_USE_NULL_SEL
164 mov %2, es
165 push %1
166 mov %2, ds
167 push %1
168 %endif
169
170 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
171 mov ecx, MSR_K8_FS_BASE
172 rdmsr
173 push rdx
174 push rax
175 %ifndef HWACCM_64_BIT_USE_NULL_SEL
176 push fs
177 %endif
178
179 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
180 mov ecx, MSR_K8_GS_BASE
181 rdmsr
182 push rdx
183 push rax
184 %ifndef HWACCM_64_BIT_USE_NULL_SEL
185 push gs
186 %endif
187%endmacro
188
189; trashes, rax, rdx & rcx
190%macro MYPOPSEGS64 2
191 ; Note: do not step through this code with a debugger!
192 %ifndef HWACCM_64_BIT_USE_NULL_SEL
193 xor eax, eax
194 mov ds, ax
195 mov es, ax
196 mov fs, ax
197 mov gs, ax
198 %endif
199
200 %ifndef HWACCM_64_BIT_USE_NULL_SEL
201 pop gs
202 %endif
203 pop rax
204 pop rdx
205 mov ecx, MSR_K8_GS_BASE
206 wrmsr
207
208 %ifndef HWACCM_64_BIT_USE_NULL_SEL
209 pop fs
210 %endif
211 pop rax
212 pop rdx
213 mov ecx, MSR_K8_FS_BASE
214 wrmsr
215 ; Now it's safe to step again
216
217 %ifndef HWACCM_64_BIT_USE_NULL_SEL
218 pop %1
219 mov ds, %2
220 pop %1
221 mov es, %2
222 %endif
223%endmacro
224
225%macro MYPUSHAD32 0
226 pushad
227%endmacro
228%macro MYPOPAD32 0
229 popad
230%endmacro
231
232%macro MYPUSHSEGS32 2
233 push ds
234 push es
235 push fs
236 push gs
237%endmacro
238%macro MYPOPSEGS32 2
239 pop gs
240 pop fs
241 pop es
242 pop ds
243%endmacro
244
245
246;*******************************************************************************
247;* External Symbols *
248;*******************************************************************************
249%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
250extern NAME(SUPR0AbsIs64bit)
251extern NAME(SUPR0Abs64bitKernelCS)
252extern NAME(SUPR0Abs64bitKernelSS)
253extern NAME(SUPR0Abs64bitKernelDS)
254extern NAME(SUPR0AbsKernelCS)
255%endif
256%ifdef VBOX_WITH_KERNEL_USING_XMM
257extern NAME(CPUMIsGuestFPUStateActive)
258%endif
259
260
261;*******************************************************************************
262;* Global Variables *
263;*******************************************************************************
264%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
265BEGINDATA
266;;
267; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
268; needing to clobber a register. (This trick doesn't quite work for PE btw.
269; but that's not relevant atm.)
270GLOBALNAME g_fVMXIs64bitHost
271 dd NAME(SUPR0AbsIs64bit)
272%endif
273
274
275BEGINCODE
276
277
278;/**
279; * Executes VMWRITE, 64-bit value.
280; *
281; * @returns VBox status code
282; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
283; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
284; */
285ALIGNCODE(16)
286BEGINPROC VMXWriteVMCS64
287%ifdef RT_ARCH_AMD64
288 %ifdef ASM_CALL64_GCC
289 and edi, 0ffffffffh
290 xor rax, rax
291 vmwrite rdi, rsi
292 %else
293 and ecx, 0ffffffffh
294 xor rax, rax
295 vmwrite rcx, rdx
296 %endif
297%else ; RT_ARCH_X86
298 mov ecx, [esp + 4] ; idxField
299 lea edx, [esp + 8] ; &u64Data
300 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
301 cmp byte [NAME(g_fVMXIs64bitHost)], 0
302 jz .legacy_mode
303 db 0xea ; jmp far .sixtyfourbit_mode
304 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
305.legacy_mode:
306 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
307 vmwrite ecx, [edx] ; low dword
308 jz .done
309 jc .done
310 inc ecx
311 xor eax, eax
312 vmwrite ecx, [edx + 4] ; high dword
313.done:
314%endif ; RT_ARCH_X86
315 jnc .valid_vmcs
316 mov eax, VERR_VMX_INVALID_VMCS_PTR
317 ret
318.valid_vmcs:
319 jnz .the_end
320 mov eax, VERR_VMX_INVALID_VMCS_FIELD
321.the_end:
322 ret
323
324%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
325ALIGNCODE(16)
326BITS 64
327.sixtyfourbit_mode:
328 and edx, 0ffffffffh
329 and ecx, 0ffffffffh
330 xor eax, eax
331 vmwrite rcx, [rdx]
332 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
333 cmovz eax, r8d
334 mov r9d, VERR_VMX_INVALID_VMCS_PTR
335 cmovc eax, r9d
336 jmp far [.fpret wrt rip]
337.fpret: ; 16:32 Pointer to .the_end.
338 dd .the_end, NAME(SUPR0AbsKernelCS)
339BITS 32
340%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
341ENDPROC VMXWriteVMCS64
342
343
344;/**
345; * Executes VMREAD, 64-bit value
346; *
347; * @returns VBox status code
348; * @param idxField VMCS index
349; * @param pData Ptr to store VM field value
350; */
351;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
352ALIGNCODE(16)
353BEGINPROC VMXReadVMCS64
354%ifdef RT_ARCH_AMD64
355 %ifdef ASM_CALL64_GCC
356 and edi, 0ffffffffh
357 xor rax, rax
358 vmread [rsi], rdi
359 %else
360 and ecx, 0ffffffffh
361 xor rax, rax
362 vmread [rdx], rcx
363 %endif
364%else ; RT_ARCH_X86
365 mov ecx, [esp + 4] ; idxField
366 mov edx, [esp + 8] ; pData
367 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
368 cmp byte [NAME(g_fVMXIs64bitHost)], 0
369 jz .legacy_mode
370 db 0xea ; jmp far .sixtyfourbit_mode
371 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
372.legacy_mode:
373 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
374 vmread [edx], ecx ; low dword
375 jz .done
376 jc .done
377 inc ecx
378 xor eax, eax
379 vmread [edx + 4], ecx ; high dword
380.done:
381%endif ; RT_ARCH_X86
382 jnc .valid_vmcs
383 mov eax, VERR_VMX_INVALID_VMCS_PTR
384 ret
385.valid_vmcs:
386 jnz .the_end
387 mov eax, VERR_VMX_INVALID_VMCS_FIELD
388.the_end:
389 ret
390
391%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
392ALIGNCODE(16)
393BITS 64
394.sixtyfourbit_mode:
395 and edx, 0ffffffffh
396 and ecx, 0ffffffffh
397 xor eax, eax
398 vmread [rdx], rcx
399 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
400 cmovz eax, r8d
401 mov r9d, VERR_VMX_INVALID_VMCS_PTR
402 cmovc eax, r9d
403 jmp far [.fpret wrt rip]
404.fpret: ; 16:32 Pointer to .the_end.
405 dd .the_end, NAME(SUPR0AbsKernelCS)
406BITS 32
407%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
408ENDPROC VMXReadVMCS64
409
410
411;/**
412; * Executes VMREAD, 32-bit value.
413; *
414; * @returns VBox status code
415; * @param idxField VMCS index
416; * @param pu32Data Ptr to store VM field value
417; */
418;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
419ALIGNCODE(16)
420BEGINPROC VMXReadVMCS32
421%ifdef RT_ARCH_AMD64
422 %ifdef ASM_CALL64_GCC
423 and edi, 0ffffffffh
424 xor rax, rax
425 vmread r10, rdi
426 mov [rsi], r10d
427 %else
428 and ecx, 0ffffffffh
429 xor rax, rax
430 vmread r10, rcx
431 mov [rdx], r10d
432 %endif
433%else ; RT_ARCH_X86
434 mov ecx, [esp + 4] ; idxField
435 mov edx, [esp + 8] ; pu32Data
436 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
437 cmp byte [NAME(g_fVMXIs64bitHost)], 0
438 jz .legacy_mode
439 db 0xea ; jmp far .sixtyfourbit_mode
440 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
441.legacy_mode:
442 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
443 xor eax, eax
444 vmread [edx], ecx
445%endif ; RT_ARCH_X86
446 jnc .valid_vmcs
447 mov eax, VERR_VMX_INVALID_VMCS_PTR
448 ret
449.valid_vmcs:
450 jnz .the_end
451 mov eax, VERR_VMX_INVALID_VMCS_FIELD
452.the_end:
453 ret
454
455%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
456ALIGNCODE(16)
457BITS 64
458.sixtyfourbit_mode:
459 and edx, 0ffffffffh
460 and ecx, 0ffffffffh
461 xor eax, eax
462 vmread r10, rcx
463 mov [rdx], r10d
464 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
465 cmovz eax, r8d
466 mov r9d, VERR_VMX_INVALID_VMCS_PTR
467 cmovc eax, r9d
468 jmp far [.fpret wrt rip]
469.fpret: ; 16:32 Pointer to .the_end.
470 dd .the_end, NAME(SUPR0AbsKernelCS)
471BITS 32
472%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
473ENDPROC VMXReadVMCS32
474
475
476;/**
477; * Executes VMWRITE, 32-bit value.
478; *
479; * @returns VBox status code
480; * @param idxField VMCS index
481; * @param u32Data Ptr to store VM field value
482; */
483;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
484ALIGNCODE(16)
485BEGINPROC VMXWriteVMCS32
486%ifdef RT_ARCH_AMD64
487 %ifdef ASM_CALL64_GCC
488 and edi, 0ffffffffh
489 and esi, 0ffffffffh
490 xor rax, rax
491 vmwrite rdi, rsi
492 %else
493 and ecx, 0ffffffffh
494 and edx, 0ffffffffh
495 xor rax, rax
496 vmwrite rcx, rdx
497 %endif
498%else ; RT_ARCH_X86
499 mov ecx, [esp + 4] ; idxField
500 mov edx, [esp + 8] ; u32Data
501 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
502 cmp byte [NAME(g_fVMXIs64bitHost)], 0
503 jz .legacy_mode
504 db 0xea ; jmp far .sixtyfourbit_mode
505 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
506.legacy_mode:
507 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
508 xor eax, eax
509 vmwrite ecx, edx
510%endif ; RT_ARCH_X86
511 jnc .valid_vmcs
512 mov eax, VERR_VMX_INVALID_VMCS_PTR
513 ret
514.valid_vmcs:
515 jnz .the_end
516 mov eax, VERR_VMX_INVALID_VMCS_FIELD
517.the_end:
518 ret
519
520%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
521ALIGNCODE(16)
522BITS 64
523.sixtyfourbit_mode:
524 and edx, 0ffffffffh
525 and ecx, 0ffffffffh
526 xor eax, eax
527 vmwrite rcx, rdx
528 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
529 cmovz eax, r8d
530 mov r9d, VERR_VMX_INVALID_VMCS_PTR
531 cmovc eax, r9d
532 jmp far [.fpret wrt rip]
533.fpret: ; 16:32 Pointer to .the_end.
534 dd .the_end, NAME(SUPR0AbsKernelCS)
535BITS 32
536%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
537ENDPROC VMXWriteVMCS32
538
539
540;/**
541; * Executes VMXON
542; *
543; * @returns VBox status code
544; * @param HCPhysVMXOn Physical address of VMXON structure
545; */
546;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
547BEGINPROC VMXEnable
548%ifdef RT_ARCH_AMD64
549 xor rax, rax
550 %ifdef ASM_CALL64_GCC
551 push rdi
552 %else
553 push rcx
554 %endif
555 vmxon [rsp]
556%else ; RT_ARCH_X86
557 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
558 cmp byte [NAME(g_fVMXIs64bitHost)], 0
559 jz .legacy_mode
560 db 0xea ; jmp far .sixtyfourbit_mode
561 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
562.legacy_mode:
563 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
564 xor eax, eax
565 vmxon [esp + 4]
566%endif ; RT_ARCH_X86
567 jnc .good
568 mov eax, VERR_VMX_INVALID_VMXON_PTR
569 jmp .the_end
570
571.good:
572 jnz .the_end
573 mov eax, VERR_VMX_GENERIC
574
575.the_end:
576%ifdef RT_ARCH_AMD64
577 add rsp, 8
578%endif
579 ret
580
581%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
582ALIGNCODE(16)
583BITS 64
584.sixtyfourbit_mode:
585 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
586 and edx, 0ffffffffh
587 xor eax, eax
588 vmxon [rdx]
589 mov r8d, VERR_INVALID_PARAMETER
590 cmovz eax, r8d
591 mov r9d, VERR_VMX_INVALID_VMCS_PTR
592 cmovc eax, r9d
593 jmp far [.fpret wrt rip]
594.fpret: ; 16:32 Pointer to .the_end.
595 dd .the_end, NAME(SUPR0AbsKernelCS)
596BITS 32
597%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
598ENDPROC VMXEnable
599
600
601;/**
602; * Executes VMXOFF
603; */
604;DECLASM(void) VMXDisable(void);
605BEGINPROC VMXDisable
606%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
607 cmp byte [NAME(g_fVMXIs64bitHost)], 0
608 jz .legacy_mode
609 db 0xea ; jmp far .sixtyfourbit_mode
610 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
611.legacy_mode:
612%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
613 vmxoff
614.the_end:
615 ret
616
617%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
618ALIGNCODE(16)
619BITS 64
620.sixtyfourbit_mode:
621 vmxoff
622 jmp far [.fpret wrt rip]
623.fpret: ; 16:32 Pointer to .the_end.
624 dd .the_end, NAME(SUPR0AbsKernelCS)
625BITS 32
626%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
627ENDPROC VMXDisable
628
629
630;/**
631; * Executes VMCLEAR
632; *
633; * @returns VBox status code
634; * @param HCPhysVMCS Physical address of VM control structure
635; */
636;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
637ALIGNCODE(16)
638BEGINPROC VMXClearVMCS
639%ifdef RT_ARCH_AMD64
640 xor rax, rax
641 %ifdef ASM_CALL64_GCC
642 push rdi
643 %else
644 push rcx
645 %endif
646 vmclear [rsp]
647%else ; RT_ARCH_X86
648 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
649 cmp byte [NAME(g_fVMXIs64bitHost)], 0
650 jz .legacy_mode
651 db 0xea ; jmp far .sixtyfourbit_mode
652 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
653.legacy_mode:
654 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
655 xor eax, eax
656 vmclear [esp + 4]
657%endif ; RT_ARCH_X86
658 jnc .the_end
659 mov eax, VERR_VMX_INVALID_VMCS_PTR
660.the_end:
661%ifdef RT_ARCH_AMD64
662 add rsp, 8
663%endif
664 ret
665
666%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
667ALIGNCODE(16)
668BITS 64
669.sixtyfourbit_mode:
670 lea rdx, [rsp + 4] ; &HCPhysVMCS
671 and edx, 0ffffffffh
672 xor eax, eax
673 vmclear [rdx]
674 mov r9d, VERR_VMX_INVALID_VMCS_PTR
675 cmovc eax, r9d
676 jmp far [.fpret wrt rip]
677.fpret: ; 16:32 Pointer to .the_end.
678 dd .the_end, NAME(SUPR0AbsKernelCS)
679BITS 32
680%endif
681ENDPROC VMXClearVMCS
682
683
684;/**
685; * Executes VMPTRLD
686; *
687; * @returns VBox status code
688; * @param HCPhysVMCS Physical address of VMCS structure
689; */
690;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
691ALIGNCODE(16)
692BEGINPROC VMXActivateVMCS
693%ifdef RT_ARCH_AMD64
694 xor rax, rax
695 %ifdef ASM_CALL64_GCC
696 push rdi
697 %else
698 push rcx
699 %endif
700 vmptrld [rsp]
701%else
702 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
703 cmp byte [NAME(g_fVMXIs64bitHost)], 0
704 jz .legacy_mode
705 db 0xea ; jmp far .sixtyfourbit_mode
706 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
707.legacy_mode:
708 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
709 xor eax, eax
710 vmptrld [esp + 4]
711%endif
712 jnc .the_end
713 mov eax, VERR_VMX_INVALID_VMCS_PTR
714.the_end:
715%ifdef RT_ARCH_AMD64
716 add rsp, 8
717%endif
718 ret
719
720%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
721ALIGNCODE(16)
722BITS 64
723.sixtyfourbit_mode:
724 lea rdx, [rsp + 4] ; &HCPhysVMCS
725 and edx, 0ffffffffh
726 xor eax, eax
727 vmptrld [rdx]
728 mov r9d, VERR_VMX_INVALID_VMCS_PTR
729 cmovc eax, r9d
730 jmp far [.fpret wrt rip]
731.fpret: ; 16:32 Pointer to .the_end.
732 dd .the_end, NAME(SUPR0AbsKernelCS)
733BITS 32
734%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
735ENDPROC VMXActivateVMCS
736
737
738;/**
739; * Executes VMPTRST
740; *
741; * @returns VBox status code
742; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
743; */
744;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
745BEGINPROC VMXGetActivateVMCS
746%ifdef RT_OS_OS2
747 mov eax, VERR_NOT_SUPPORTED
748 ret
749%else
750 %ifdef RT_ARCH_AMD64
751 %ifdef ASM_CALL64_GCC
752 vmptrst qword [rdi]
753 %else
754 vmptrst qword [rcx]
755 %endif
756 %else
757 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
758 cmp byte [NAME(g_fVMXIs64bitHost)], 0
759 jz .legacy_mode
760 db 0xea ; jmp far .sixtyfourbit_mode
761 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
762.legacy_mode:
763 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
764 vmptrst qword [esp+04h]
765 %endif
766 xor eax, eax
767.the_end:
768 ret
769
770 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
771ALIGNCODE(16)
772BITS 64
773.sixtyfourbit_mode:
774 lea rdx, [rsp + 4] ; &HCPhysVMCS
775 and edx, 0ffffffffh
776 vmptrst qword [rdx]
777 xor eax, eax
778 jmp far [.fpret wrt rip]
779.fpret: ; 16:32 Pointer to .the_end.
780 dd .the_end, NAME(SUPR0AbsKernelCS)
781BITS 32
782 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
783%endif
784ENDPROC VMXGetActivateVMCS
785
786;/**
787; * Invalidate a page using invept
788; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
789; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
790; */
791;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
792BEGINPROC VMXR0InvEPT
793%ifdef RT_ARCH_AMD64
794 %ifdef ASM_CALL64_GCC
795 and edi, 0ffffffffh
796 xor rax, rax
797; invept rdi, qword [rsi]
798 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
799 %else
800 and ecx, 0ffffffffh
801 xor rax, rax
802; invept rcx, qword [rdx]
803 DB 0x66, 0x0F, 0x38, 0x80, 0xA
804 %endif
805%else
806 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
807 cmp byte [NAME(g_fVMXIs64bitHost)], 0
808 jz .legacy_mode
809 db 0xea ; jmp far .sixtyfourbit_mode
810 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
811.legacy_mode:
812 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
813 mov ecx, [esp + 4]
814 mov edx, [esp + 8]
815 xor eax, eax
816; invept ecx, qword [edx]
817 DB 0x66, 0x0F, 0x38, 0x80, 0xA
818%endif
819 jnc .valid_vmcs
820 mov eax, VERR_VMX_INVALID_VMCS_PTR
821 ret
822.valid_vmcs:
823 jnz .the_end
824 mov eax, VERR_INVALID_PARAMETER
825.the_end:
826 ret
827
828%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
829ALIGNCODE(16)
830BITS 64
831.sixtyfourbit_mode:
832 and esp, 0ffffffffh
833 mov ecx, [rsp + 4] ; enmFlush
834 mov edx, [rsp + 8] ; pDescriptor
835 xor eax, eax
836; invept rcx, qword [rdx]
837 DB 0x66, 0x0F, 0x38, 0x80, 0xA
838 mov r8d, VERR_INVALID_PARAMETER
839 cmovz eax, r8d
840 mov r9d, VERR_VMX_INVALID_VMCS_PTR
841 cmovc eax, r9d
842 jmp far [.fpret wrt rip]
843.fpret: ; 16:32 Pointer to .the_end.
844 dd .the_end, NAME(SUPR0AbsKernelCS)
845BITS 32
846%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
847ENDPROC VMXR0InvEPT
848
849
850;/**
851; * Invalidate a page using invvpid
852; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
853; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
854; */
855;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
856BEGINPROC VMXR0InvVPID
857%ifdef RT_ARCH_AMD64
858 %ifdef ASM_CALL64_GCC
859 and edi, 0ffffffffh
860 xor rax, rax
861 ;invvpid rdi, qword [rsi]
862 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
863 %else
864 and ecx, 0ffffffffh
865 xor rax, rax
866; invvpid rcx, qword [rdx]
867 DB 0x66, 0x0F, 0x38, 0x81, 0xA
868 %endif
869%else
870 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
871 cmp byte [NAME(g_fVMXIs64bitHost)], 0
872 jz .legacy_mode
873 db 0xea ; jmp far .sixtyfourbit_mode
874 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
875.legacy_mode:
876 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
877 mov ecx, [esp + 4]
878 mov edx, [esp + 8]
879 xor eax, eax
880; invvpid ecx, qword [edx]
881 DB 0x66, 0x0F, 0x38, 0x81, 0xA
882%endif
883 jnc .valid_vmcs
884 mov eax, VERR_VMX_INVALID_VMCS_PTR
885 ret
886.valid_vmcs:
887 jnz .the_end
888 mov eax, VERR_INVALID_PARAMETER
889.the_end:
890 ret
891
892%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
893ALIGNCODE(16)
894BITS 64
895.sixtyfourbit_mode:
896 and esp, 0ffffffffh
897 mov ecx, [rsp + 4] ; enmFlush
898 mov edx, [rsp + 8] ; pDescriptor
899 xor eax, eax
900; invvpid rcx, qword [rdx]
901 DB 0x66, 0x0F, 0x38, 0x81, 0xA
902 mov r8d, VERR_INVALID_PARAMETER
903 cmovz eax, r8d
904 mov r9d, VERR_VMX_INVALID_VMCS_PTR
905 cmovc eax, r9d
906 jmp far [.fpret wrt rip]
907.fpret: ; 16:32 Pointer to .the_end.
908 dd .the_end, NAME(SUPR0AbsKernelCS)
909BITS 32
910%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
911ENDPROC VMXR0InvVPID
912
913
914%if GC_ARCH_BITS == 64
915;;
916; Executes INVLPGA
917;
918; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
919; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
920;
921;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
922BEGINPROC SVMR0InvlpgA
923%ifdef RT_ARCH_AMD64
924 %ifdef ASM_CALL64_GCC
925 mov rax, rdi
926 mov rcx, rsi
927 %else
928 mov rax, rcx
929 mov rcx, rdx
930 %endif
931%else
932 mov eax, [esp + 4]
933 mov ecx, [esp + 0Ch]
934%endif
935 invlpga [xAX], ecx
936 ret
937ENDPROC SVMR0InvlpgA
938
939%else ; GC_ARCH_BITS != 64
940;;
941; Executes INVLPGA
942;
943; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
944; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
945;
946;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
947BEGINPROC SVMR0InvlpgA
948%ifdef RT_ARCH_AMD64
949 %ifdef ASM_CALL64_GCC
950 movzx rax, edi
951 mov ecx, esi
952 %else
953 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
954 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
955 ; values also set the upper 32 bits of the register to zero. Consequently
956 ; there is no need for an instruction movzlq.''
957 mov eax, ecx
958 mov ecx, edx
959 %endif
960%else
961 mov eax, [esp + 4]
962 mov ecx, [esp + 8]
963%endif
964 invlpga [xAX], ecx
965 ret
966ENDPROC SVMR0InvlpgA
967
968%endif ; GC_ARCH_BITS != 64
969
970%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
971
972;/**
973; * Gets 64-bit GDTR and IDTR on darwin.
974; * @param pGdtr Where to store the 64-bit GDTR.
975; * @param pIdtr Where to store the 64-bit IDTR.
976; */
977;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
978ALIGNCODE(16)
979BEGINPROC hwaccmR0Get64bitGDTRandIDTR
980 db 0xea ; jmp far .sixtyfourbit_mode
981 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
982.the_end:
983 ret
984
985ALIGNCODE(16)
986BITS 64
987.sixtyfourbit_mode:
988 and esp, 0ffffffffh
989 mov ecx, [rsp + 4] ; pGdtr
990 mov edx, [rsp + 8] ; pIdtr
991 sgdt [rcx]
992 sidt [rdx]
993 jmp far [.fpret wrt rip]
994.fpret: ; 16:32 Pointer to .the_end.
995 dd .the_end, NAME(SUPR0AbsKernelCS)
996BITS 32
997ENDPROC hwaccmR0Get64bitGDTRandIDTR
998
999
1000;/**
1001; * Gets 64-bit CR3 on darwin.
1002; * @returns CR3
1003; */
1004;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
1005ALIGNCODE(16)
1006BEGINPROC hwaccmR0Get64bitCR3
1007 db 0xea ; jmp far .sixtyfourbit_mode
1008 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1009.the_end:
1010 ret
1011
1012ALIGNCODE(16)
1013BITS 64
1014.sixtyfourbit_mode:
1015 mov rax, cr3
1016 mov rdx, rax
1017 shr rdx, 32
1018 jmp far [.fpret wrt rip]
1019.fpret: ; 16:32 Pointer to .the_end.
1020 dd .the_end, NAME(SUPR0AbsKernelCS)
1021BITS 32
1022ENDPROC hwaccmR0Get64bitCR3
1023
1024%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1025
1026%ifdef VBOX_WITH_KERNEL_USING_XMM
1027
1028;;
1029; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
1030; load the guest ones when necessary.
1031;
1032; @cproto DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
1033;
1034; @returns eax
1035;
1036; @param fResumeVM msc:rcx
1037; @param pCtx msc:rdx
1038; @param pVMCSCache msc:r8
1039; @param pVM msc:r9
1040; @param pVCpu msc:[rbp+30h]
1041; @param pfnStartVM msc:[rbp+38h]
1042;
1043; @remarks This is essentially the same code as hwaccmR0SVMRunWrapXMM, only the parameters differ a little bit.
1044;
1045; ASSUMING 64-bit and windows for now.
1046ALIGNCODE(16)
1047BEGINPROC hwaccmR0VMXStartVMWrapXMM
1048 push xBP
1049 mov xBP, xSP
1050 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1051
1052 ; spill input parameters.
1053 mov [xBP + 010h], rcx ; fResumeVM
1054 mov [xBP + 018h], rdx ; pCtx
1055 mov [xBP + 020h], r8 ; pVMCSCache
1056 mov [xBP + 028h], r9 ; pVM
1057
1058 ; Ask CPUM whether we've started using the FPU yet.
1059 mov rcx, [xBP + 30h] ; pVCpu
1060 call NAME(CPUMIsGuestFPUStateActive)
1061 test al, al
1062 jnz .guest_fpu_state_active
1063
1064 ; No need to mess with XMM registers just call the start routine and return.
1065 mov r11, [xBP + 38h] ; pfnStartVM
1066 mov r10, [xBP + 30h] ; pVCpu
1067 mov [xSP + 020h], r10
1068 mov rcx, [xBP + 010h] ; fResumeVM
1069 mov rdx, [xBP + 018h] ; pCtx
1070 mov r8, [xBP + 020h] ; pVMCSCache
1071 mov r9, [xBP + 028h] ; pVM
1072 call r11
1073
1074 leave
1075 ret
1076
1077ALIGNCODE(8)
1078.guest_fpu_state_active:
1079 ; Save the host XMM registers.
1080 movdqa [rsp + 040h + 000h], xmm6
1081 movdqa [rsp + 040h + 010h], xmm7
1082 movdqa [rsp + 040h + 020h], xmm8
1083 movdqa [rsp + 040h + 030h], xmm9
1084 movdqa [rsp + 040h + 040h], xmm10
1085 movdqa [rsp + 040h + 050h], xmm11
1086 movdqa [rsp + 040h + 060h], xmm12
1087 movdqa [rsp + 040h + 070h], xmm13
1088 movdqa [rsp + 040h + 080h], xmm14
1089 movdqa [rsp + 040h + 090h], xmm15
1090
1091 ; Load the full guest XMM register state.
1092 mov r10, [xBP + 018h] ; pCtx
1093 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1094 movdqa xmm0, [r10 + 000h]
1095 movdqa xmm1, [r10 + 010h]
1096 movdqa xmm2, [r10 + 020h]
1097 movdqa xmm3, [r10 + 030h]
1098 movdqa xmm4, [r10 + 040h]
1099 movdqa xmm5, [r10 + 050h]
1100 movdqa xmm6, [r10 + 060h]
1101 movdqa xmm7, [r10 + 070h]
1102 movdqa xmm8, [r10 + 080h]
1103 movdqa xmm9, [r10 + 090h]
1104 movdqa xmm10, [r10 + 0a0h]
1105 movdqa xmm11, [r10 + 0b0h]
1106 movdqa xmm12, [r10 + 0c0h]
1107 movdqa xmm13, [r10 + 0d0h]
1108 movdqa xmm14, [r10 + 0e0h]
1109 movdqa xmm15, [r10 + 0f0h]
1110
1111 ; Make the call (same as in the other case ).
1112 mov r11, [xBP + 38h] ; pfnStartVM
1113 mov r10, [xBP + 30h] ; pVCpu
1114 mov [xSP + 020h], r10
1115 mov rcx, [xBP + 010h] ; fResumeVM
1116 mov rdx, [xBP + 018h] ; pCtx
1117 mov r8, [xBP + 020h] ; pVMCSCache
1118 mov r9, [xBP + 028h] ; pVM
1119 call r11
1120
1121 ; Save the guest XMM registers.
1122 mov r10, [xBP + 018h] ; pCtx
1123 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1124 movdqa [r10 + 000h], xmm0
1125 movdqa [r10 + 010h], xmm1
1126 movdqa [r10 + 020h], xmm2
1127 movdqa [r10 + 030h], xmm3
1128 movdqa [r10 + 040h], xmm4
1129 movdqa [r10 + 050h], xmm5
1130 movdqa [r10 + 060h], xmm6
1131 movdqa [r10 + 070h], xmm7
1132 movdqa [r10 + 080h], xmm8
1133 movdqa [r10 + 090h], xmm9
1134 movdqa [r10 + 0a0h], xmm10
1135 movdqa [r10 + 0b0h], xmm11
1136 movdqa [r10 + 0c0h], xmm12
1137 movdqa [r10 + 0d0h], xmm13
1138 movdqa [r10 + 0e0h], xmm14
1139 movdqa [r10 + 0f0h], xmm15
1140
1141 ; Load the host XMM registers.
1142 movdqa xmm6, [rsp + 040h + 000h]
1143 movdqa xmm7, [rsp + 040h + 010h]
1144 movdqa xmm8, [rsp + 040h + 020h]
1145 movdqa xmm9, [rsp + 040h + 030h]
1146 movdqa xmm10, [rsp + 040h + 040h]
1147 movdqa xmm11, [rsp + 040h + 050h]
1148 movdqa xmm12, [rsp + 040h + 060h]
1149 movdqa xmm13, [rsp + 040h + 070h]
1150 movdqa xmm14, [rsp + 040h + 080h]
1151 movdqa xmm15, [rsp + 040h + 090h]
1152 leave
1153 ret
1154ENDPROC hwaccmR0VMXStartVMWrapXMM
1155
1156;;
1157; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1158; load the guest ones when necessary.
1159;
1160; @cproto DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
1161;
1162; @returns eax
1163;
1164; @param pVMCBHostPhys msc:rcx
1165; @param pVMCBPhys msc:rdx
1166; @param pCtx msc:r8
1167; @param pVM msc:r9
1168; @param pVCpu msc:[rbp+30h]
1169; @param pfnVMRun msc:[rbp+38h]
1170;
1171; @remarks This is essentially the same code as hwaccmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1172;
1173; ASSUMING 64-bit and windows for now.
1174ALIGNCODE(16)
1175BEGINPROC hwaccmR0SVMRunWrapXMM
1176 push xBP
1177 mov xBP, xSP
1178 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1179
1180 ; spill input parameters.
1181 mov [xBP + 010h], rcx ; pVMCBHostPhys
1182 mov [xBP + 018h], rdx ; pVMCBPhys
1183 mov [xBP + 020h], r8 ; pCtx
1184 mov [xBP + 028h], r9 ; pVM
1185
1186 ; Ask CPUM whether we've started using the FPU yet.
1187 mov rcx, [xBP + 30h] ; pVCpu
1188 call NAME(CPUMIsGuestFPUStateActive)
1189 test al, al
1190 jnz .guest_fpu_state_active
1191
1192 ; No need to mess with XMM registers just call the start routine and return.
1193 mov r11, [xBP + 38h] ; pfnVMRun
1194 mov r10, [xBP + 30h] ; pVCpu
1195 mov [xSP + 020h], r10
1196 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1197 mov rdx, [xBP + 018h] ; pVMCBPhys
1198 mov r8, [xBP + 020h] ; pCtx
1199 mov r9, [xBP + 028h] ; pVM
1200 call r11
1201
1202 leave
1203 ret
1204
1205ALIGNCODE(8)
1206.guest_fpu_state_active:
1207 ; Save the host XMM registers.
1208 movdqa [rsp + 040h + 000h], xmm6
1209 movdqa [rsp + 040h + 010h], xmm7
1210 movdqa [rsp + 040h + 020h], xmm8
1211 movdqa [rsp + 040h + 030h], xmm9
1212 movdqa [rsp + 040h + 040h], xmm10
1213 movdqa [rsp + 040h + 050h], xmm11
1214 movdqa [rsp + 040h + 060h], xmm12
1215 movdqa [rsp + 040h + 070h], xmm13
1216 movdqa [rsp + 040h + 080h], xmm14
1217 movdqa [rsp + 040h + 090h], xmm15
1218
1219 ; Load the full guest XMM register state.
1220 mov r10, [xBP + 020h] ; pCtx
1221 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1222 movdqa xmm0, [r10 + 000h]
1223 movdqa xmm1, [r10 + 010h]
1224 movdqa xmm2, [r10 + 020h]
1225 movdqa xmm3, [r10 + 030h]
1226 movdqa xmm4, [r10 + 040h]
1227 movdqa xmm5, [r10 + 050h]
1228 movdqa xmm6, [r10 + 060h]
1229 movdqa xmm7, [r10 + 070h]
1230 movdqa xmm8, [r10 + 080h]
1231 movdqa xmm9, [r10 + 090h]
1232 movdqa xmm10, [r10 + 0a0h]
1233 movdqa xmm11, [r10 + 0b0h]
1234 movdqa xmm12, [r10 + 0c0h]
1235 movdqa xmm13, [r10 + 0d0h]
1236 movdqa xmm14, [r10 + 0e0h]
1237 movdqa xmm15, [r10 + 0f0h]
1238
1239 ; Make the call (same as in the other case ).
1240 mov r11, [xBP + 38h] ; pfnVMRun
1241 mov r10, [xBP + 30h] ; pVCpu
1242 mov [xSP + 020h], r10
1243 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1244 mov rdx, [xBP + 018h] ; pVMCBPhys
1245 mov r8, [xBP + 020h] ; pCtx
1246 mov r9, [xBP + 028h] ; pVM
1247 call r11
1248
1249 ; Save the guest XMM registers.
1250 mov r10, [xBP + 020h] ; pCtx
1251 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1252 movdqa [r10 + 000h], xmm0
1253 movdqa [r10 + 010h], xmm1
1254 movdqa [r10 + 020h], xmm2
1255 movdqa [r10 + 030h], xmm3
1256 movdqa [r10 + 040h], xmm4
1257 movdqa [r10 + 050h], xmm5
1258 movdqa [r10 + 060h], xmm6
1259 movdqa [r10 + 070h], xmm7
1260 movdqa [r10 + 080h], xmm8
1261 movdqa [r10 + 090h], xmm9
1262 movdqa [r10 + 0a0h], xmm10
1263 movdqa [r10 + 0b0h], xmm11
1264 movdqa [r10 + 0c0h], xmm12
1265 movdqa [r10 + 0d0h], xmm13
1266 movdqa [r10 + 0e0h], xmm14
1267 movdqa [r10 + 0f0h], xmm15
1268
1269 ; Load the host XMM registers.
1270 movdqa xmm6, [rsp + 040h + 000h]
1271 movdqa xmm7, [rsp + 040h + 010h]
1272 movdqa xmm8, [rsp + 040h + 020h]
1273 movdqa xmm9, [rsp + 040h + 030h]
1274 movdqa xmm10, [rsp + 040h + 040h]
1275 movdqa xmm11, [rsp + 040h + 050h]
1276 movdqa xmm12, [rsp + 040h + 060h]
1277 movdqa xmm13, [rsp + 040h + 070h]
1278 movdqa xmm14, [rsp + 040h + 080h]
1279 movdqa xmm15, [rsp + 040h + 090h]
1280 leave
1281 ret
1282ENDPROC hwaccmR0SVMRunWrapXMM
1283
1284%endif ; VBOX_WITH_KERNEL_USING_XMM
1285
1286;
1287; The default setup of the StartVM routines.
1288;
1289%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1290 %define MY_NAME(name) name %+ _32
1291%else
1292 %define MY_NAME(name) name
1293%endif
1294%ifdef RT_ARCH_AMD64
1295 %define MYPUSHAD MYPUSHAD64
1296 %define MYPOPAD MYPOPAD64
1297 %define MYPUSHSEGS MYPUSHSEGS64
1298 %define MYPOPSEGS MYPOPSEGS64
1299%else
1300 %define MYPUSHAD MYPUSHAD32
1301 %define MYPOPAD MYPOPAD32
1302 %define MYPUSHSEGS MYPUSHSEGS32
1303 %define MYPOPSEGS MYPOPSEGS32
1304%endif
1305
1306%include "HWACCMR0Mixed.mac"
1307
1308
1309%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1310 ;
1311 ; Write the wrapper procedures.
1312 ;
1313 ; These routines are probably being too paranoid about selector
1314 ; restoring, but better safe than sorry...
1315 ;
1316
1317; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1318ALIGNCODE(16)
1319BEGINPROC VMXR0StartVM32
1320 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1321 je near NAME(VMXR0StartVM32_32)
1322
1323 ; stack frame
1324 push esi
1325 push edi
1326 push fs
1327 push gs
1328
1329 ; jmp far .thunk64
1330 db 0xea
1331 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1332
1333ALIGNCODE(16)
1334BITS 64
1335.thunk64:
1336 sub esp, 20h
1337 mov edi, [rsp + 20h + 14h] ; fResume
1338 mov esi, [rsp + 20h + 18h] ; pCtx
1339 mov edx, [rsp + 20h + 1Ch] ; pCache
1340 call NAME(VMXR0StartVM32_64)
1341 add esp, 20h
1342 jmp far [.fpthunk32 wrt rip]
1343.fpthunk32: ; 16:32 Pointer to .thunk32.
1344 dd .thunk32, NAME(SUPR0AbsKernelCS)
1345
1346BITS 32
1347ALIGNCODE(16)
1348.thunk32:
1349 pop gs
1350 pop fs
1351 pop edi
1352 pop esi
1353 ret
1354ENDPROC VMXR0StartVM32
1355
1356
1357; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1358ALIGNCODE(16)
1359BEGINPROC VMXR0StartVM64
1360 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1361 je .not_in_long_mode
1362
1363 ; stack frame
1364 push esi
1365 push edi
1366 push fs
1367 push gs
1368
1369 ; jmp far .thunk64
1370 db 0xea
1371 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1372
1373ALIGNCODE(16)
1374BITS 64
1375.thunk64:
1376 sub esp, 20h
1377 mov edi, [rsp + 20h + 14h] ; fResume
1378 mov esi, [rsp + 20h + 18h] ; pCtx
1379 mov edx, [rsp + 20h + 1Ch] ; pCache
1380 call NAME(VMXR0StartVM64_64)
1381 add esp, 20h
1382 jmp far [.fpthunk32 wrt rip]
1383.fpthunk32: ; 16:32 Pointer to .thunk32.
1384 dd .thunk32, NAME(SUPR0AbsKernelCS)
1385
1386BITS 32
1387ALIGNCODE(16)
1388.thunk32:
1389 pop gs
1390 pop fs
1391 pop edi
1392 pop esi
1393 ret
1394
1395.not_in_long_mode:
1396 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1397 ret
1398ENDPROC VMXR0StartVM64
1399
1400;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1401ALIGNCODE(16)
1402BEGINPROC SVMR0VMRun
1403 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1404 je near NAME(SVMR0VMRun_32)
1405
1406 ; stack frame
1407 push esi
1408 push edi
1409 push fs
1410 push gs
1411
1412 ; jmp far .thunk64
1413 db 0xea
1414 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1415
1416ALIGNCODE(16)
1417BITS 64
1418.thunk64:
1419 sub esp, 20h
1420 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1421 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1422 mov edx, [rsp + 20h + 24h] ; pCtx
1423 call NAME(SVMR0VMRun_64)
1424 add esp, 20h
1425 jmp far [.fpthunk32 wrt rip]
1426.fpthunk32: ; 16:32 Pointer to .thunk32.
1427 dd .thunk32, NAME(SUPR0AbsKernelCS)
1428
1429BITS 32
1430ALIGNCODE(16)
1431.thunk32:
1432 pop gs
1433 pop fs
1434 pop edi
1435 pop esi
1436 ret
1437ENDPROC SVMR0VMRun
1438
1439
1440; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1441ALIGNCODE(16)
1442BEGINPROC SVMR0VMRun64
1443 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1444 je .not_in_long_mode
1445
1446 ; stack frame
1447 push esi
1448 push edi
1449 push fs
1450 push gs
1451
1452 ; jmp far .thunk64
1453 db 0xea
1454 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1455
1456ALIGNCODE(16)
1457BITS 64
1458.thunk64:
1459 sub esp, 20h
1460 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1461 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1462 mov edx, [rbp + 20h + 24h] ; pCtx
1463 call NAME(SVMR0VMRun64_64)
1464 add esp, 20h
1465 jmp far [.fpthunk32 wrt rip]
1466.fpthunk32: ; 16:32 Pointer to .thunk32.
1467 dd .thunk32, NAME(SUPR0AbsKernelCS)
1468
1469BITS 32
1470ALIGNCODE(16)
1471.thunk32:
1472 pop gs
1473 pop fs
1474 pop edi
1475 pop esi
1476 ret
1477
1478.not_in_long_mode:
1479 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1480 ret
1481ENDPROC SVMR0VMRun64
1482
1483 ;
1484 ; Do it a second time pretending we're a 64-bit host.
1485 ;
1486 ; This *HAS* to be done at the very end of the file to avoid restoring
1487 ; macros. So, add new code *BEFORE* this mess.
1488 ;
1489 BITS 64
1490 %undef RT_ARCH_X86
1491 %define RT_ARCH_AMD64
1492 %undef ASM_CALL64_MSC
1493 %define ASM_CALL64_GCC
1494 %define xS 8
1495 %define xSP rsp
1496 %define xBP rbp
1497 %define xAX rax
1498 %define xBX rbx
1499 %define xCX rcx
1500 %define xDX rdx
1501 %define xDI rdi
1502 %define xSI rsi
1503 %define MY_NAME(name) name %+ _64
1504 %define MYPUSHAD MYPUSHAD64
1505 %define MYPOPAD MYPOPAD64
1506 %define MYPUSHSEGS MYPUSHSEGS64
1507 %define MYPOPSEGS MYPOPSEGS64
1508
1509 %include "HWACCMR0Mixed.mac"
1510%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette