VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 20961

Last change on this file since 20961 was 18851, checked in by vboxsync, 16 years ago

HWACCMR0A.asm: Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't risk loading a stale and/or paged out LDT value or otherwise problematic.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 31.4 KB
Line 
1; $Id: HWACCMR0A.asm 18851 2009-04-08 17:31:06Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30%include "../HWACCMInternal.mac"
31
32%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
33 %macro vmwrite 2,
34 int3
35 %endmacro
36 %define vmlaunch int3
37 %define vmresume int3
38 %define vmsave int3
39 %define vmload int3
40 %define vmrun int3
41 %define clgi int3
42 %define stgi int3
43 %macro invlpga 2,
44 int3
45 %endmacro
46%endif
47
48;*******************************************************************************
49;* Defined Constants And Macros *
50;*******************************************************************************
51%ifdef RT_ARCH_AMD64
52 %define MAYBE_64_BIT
53%endif
54%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
55 %define MAYBE_64_BIT
56%else
57 %ifdef RT_OS_DARWIN
58 %ifdef RT_ARCH_AMD64
59 ;;
60 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
61 ; risk loading a stale LDT value or something invalid.
62 %define HWACCM_64_BIT_USE_NULL_SEL
63 %endif
64 %endif
65%endif
66
67
68;; This is too risky wrt. stability, performance and correctness.
69;%define VBOX_WITH_DR6_EXPERIMENT 1
70
71;; @def MYPUSHAD
72; Macro generating an equivalent to pushad
73
74;; @def MYPOPAD
75; Macro generating an equivalent to popad
76
77;; @def MYPUSHSEGS
78; Macro saving all segment registers on the stack.
79; @param 1 full width register name
80; @param 2 16-bit regsiter name for \a 1.
81
82;; @def MYPOPSEGS
83; Macro restoring all segment registers on the stack
84; @param 1 full width register name
85; @param 2 16-bit regsiter name for \a 1.
86
87%ifdef MAYBE_64_BIT
88 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
89 %macro LOADGUESTMSR 2
90 mov rcx, %1
91 rdmsr
92 push rdx
93 push rax
94 mov edx, dword [xSI + %2 + 4]
95 mov eax, dword [xSI + %2]
96 wrmsr
97 %endmacro
98
99 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
100 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
101 %macro LOADHOSTMSREX 2
102 mov rcx, %1
103 rdmsr
104 mov dword [xSI + %2], eax
105 mov dword [xSI + %2 + 4], edx
106 pop rax
107 pop rdx
108 wrmsr
109 %endmacro
110
111 ; Load the corresponding host MSR (trashes rdx & rcx)
112 %macro LOADHOSTMSR 1
113 mov rcx, %1
114 pop rax
115 pop rdx
116 wrmsr
117 %endmacro
118%endif
119
120%ifdef ASM_CALL64_GCC
121 %macro MYPUSHAD64 0
122 push r15
123 push r14
124 push r13
125 push r12
126 push rbx
127 %endmacro
128 %macro MYPOPAD64 0
129 pop rbx
130 pop r12
131 pop r13
132 pop r14
133 pop r15
134 %endmacro
135
136%else ; ASM_CALL64_MSC
137 %macro MYPUSHAD64 0
138 push r15
139 push r14
140 push r13
141 push r12
142 push rbx
143 push rsi
144 push rdi
145 %endmacro
146 %macro MYPOPAD64 0
147 pop rdi
148 pop rsi
149 pop rbx
150 pop r12
151 pop r13
152 pop r14
153 pop r15
154 %endmacro
155%endif
156
157; trashes, rax, rdx & rcx
158%macro MYPUSHSEGS64 2
159 %ifndef HWACCM_64_BIT_USE_NULL_SEL
160 mov %2, es
161 push %1
162 mov %2, ds
163 push %1
164 %endif
165
166 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
167 mov ecx, MSR_K8_FS_BASE
168 rdmsr
169 push rdx
170 push rax
171 %ifndef HWACCM_64_BIT_USE_NULL_SEL
172 push fs
173 %endif
174
175 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
176 mov ecx, MSR_K8_GS_BASE
177 rdmsr
178 push rdx
179 push rax
180 %ifndef HWACCM_64_BIT_USE_NULL_SEL
181 push gs
182 %endif
183%endmacro
184
185; trashes, rax, rdx & rcx
186%macro MYPOPSEGS64 2
187 ; Note: do not step through this code with a debugger!
188 %ifndef HWACCM_64_BIT_USE_NULL_SEL
189 xor eax, eax
190 mov ds, ax
191 mov es, ax
192 mov fs, ax
193 mov gs, ax
194 %endif
195
196 %ifndef HWACCM_64_BIT_USE_NULL_SEL
197 pop gs
198 %endif
199 pop rax
200 pop rdx
201 mov ecx, MSR_K8_GS_BASE
202 wrmsr
203
204 %ifndef HWACCM_64_BIT_USE_NULL_SEL
205 pop fs
206 %endif
207 pop rax
208 pop rdx
209 mov ecx, MSR_K8_FS_BASE
210 wrmsr
211 ; Now it's safe to step again
212
213 %ifndef HWACCM_64_BIT_USE_NULL_SEL
214 pop %1
215 mov ds, %2
216 pop %1
217 mov es, %2
218 %endif
219%endmacro
220
221%macro MYPUSHAD32 0
222 pushad
223%endmacro
224%macro MYPOPAD32 0
225 popad
226%endmacro
227
228%macro MYPUSHSEGS32 2
229 push ds
230 push es
231 push fs
232 push gs
233%endmacro
234%macro MYPOPSEGS32 2
235 pop gs
236 pop fs
237 pop es
238 pop ds
239%endmacro
240
241
242;*******************************************************************************
243;* External Symbols *
244;*******************************************************************************
245%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
246extern NAME(SUPR0AbsIs64bit)
247extern NAME(SUPR0Abs64bitKernelCS)
248extern NAME(SUPR0Abs64bitKernelSS)
249extern NAME(SUPR0Abs64bitKernelDS)
250extern NAME(SUPR0AbsKernelCS)
251%endif
252
253
254;*******************************************************************************
255;* Global Variables *
256;*******************************************************************************
257%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
258BEGINDATA
259;;
260; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
261; needing to clobber a register. (This trick doesn't quite work for PE btw.
262; but that's not relevant atm.)
263GLOBALNAME g_fVMXIs64bitHost
264 dd NAME(SUPR0AbsIs64bit)
265%endif
266
267
268BEGINCODE
269
270
271;/**
272; * Executes VMWRITE, 64-bit value.
273; *
274; * @returns VBox status code
275; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
276; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
277; */
278ALIGNCODE(16)
279BEGINPROC VMXWriteVMCS64
280%ifdef RT_ARCH_AMD64
281 %ifdef ASM_CALL64_GCC
282 and edi, 0ffffffffh
283 xor rax, rax
284 vmwrite rdi, rsi
285 %else
286 and ecx, 0ffffffffh
287 xor rax, rax
288 vmwrite rcx, rdx
289 %endif
290%else ; RT_ARCH_X86
291 mov ecx, [esp + 4] ; idxField
292 lea edx, [esp + 8] ; &u64Data
293 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
294 cmp byte [NAME(g_fVMXIs64bitHost)], 0
295 jz .legacy_mode
296 db 0xea ; jmp far .sixtyfourbit_mode
297 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
298.legacy_mode:
299 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
300 vmwrite ecx, [edx] ; low dword
301 jz .done
302 jc .done
303 inc ecx
304 xor eax, eax
305 vmwrite ecx, [edx + 4] ; high dword
306.done:
307%endif ; RT_ARCH_X86
308 jnc .valid_vmcs
309 mov eax, VERR_VMX_INVALID_VMCS_PTR
310 ret
311.valid_vmcs:
312 jnz .the_end
313 mov eax, VERR_VMX_INVALID_VMCS_FIELD
314.the_end:
315 ret
316
317%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
318ALIGNCODE(16)
319BITS 64
320.sixtyfourbit_mode:
321 and edx, 0ffffffffh
322 and ecx, 0ffffffffh
323 xor eax, eax
324 vmwrite rcx, [rdx]
325 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
326 cmovz eax, r8d
327 mov r9d, VERR_VMX_INVALID_VMCS_PTR
328 cmovc eax, r9d
329 jmp far [.fpret wrt rip]
330.fpret: ; 16:32 Pointer to .the_end.
331 dd .the_end, NAME(SUPR0AbsKernelCS)
332BITS 32
333%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
334ENDPROC VMXWriteVMCS64
335
336
337;/**
338; * Executes VMREAD, 64-bit value
339; *
340; * @returns VBox status code
341; * @param idxField VMCS index
342; * @param pData Ptr to store VM field value
343; */
344;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
345ALIGNCODE(16)
346BEGINPROC VMXReadVMCS64
347%ifdef RT_ARCH_AMD64
348 %ifdef ASM_CALL64_GCC
349 and edi, 0ffffffffh
350 xor rax, rax
351 vmread [rsi], rdi
352 %else
353 and ecx, 0ffffffffh
354 xor rax, rax
355 vmread [rdx], rcx
356 %endif
357%else ; RT_ARCH_X86
358 mov ecx, [esp + 4] ; idxField
359 mov edx, [esp + 8] ; pData
360 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
361 cmp byte [NAME(g_fVMXIs64bitHost)], 0
362 jz .legacy_mode
363 db 0xea ; jmp far .sixtyfourbit_mode
364 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
365.legacy_mode:
366 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
367 vmread [edx], ecx ; low dword
368 jz .done
369 jc .done
370 inc ecx
371 xor eax, eax
372 vmread [edx + 4], ecx ; high dword
373.done:
374%endif ; RT_ARCH_X86
375 jnc .valid_vmcs
376 mov eax, VERR_VMX_INVALID_VMCS_PTR
377 ret
378.valid_vmcs:
379 jnz .the_end
380 mov eax, VERR_VMX_INVALID_VMCS_FIELD
381.the_end:
382 ret
383
384%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
385ALIGNCODE(16)
386BITS 64
387.sixtyfourbit_mode:
388 and edx, 0ffffffffh
389 and ecx, 0ffffffffh
390 xor eax, eax
391 vmread [rdx], rcx
392 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
393 cmovz eax, r8d
394 mov r9d, VERR_VMX_INVALID_VMCS_PTR
395 cmovc eax, r9d
396 jmp far [.fpret wrt rip]
397.fpret: ; 16:32 Pointer to .the_end.
398 dd .the_end, NAME(SUPR0AbsKernelCS)
399BITS 32
400%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
401ENDPROC VMXReadVMCS64
402
403
404;/**
405; * Executes VMREAD, 32-bit value.
406; *
407; * @returns VBox status code
408; * @param idxField VMCS index
409; * @param pu32Data Ptr to store VM field value
410; */
411;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
412ALIGNCODE(16)
413BEGINPROC VMXReadVMCS32
414%ifdef RT_ARCH_AMD64
415 %ifdef ASM_CALL64_GCC
416 and edi, 0ffffffffh
417 xor rax, rax
418 vmread r10, rdi
419 mov [rsi], r10d
420 %else
421 and ecx, 0ffffffffh
422 xor rax, rax
423 vmread r10, rcx
424 mov [rdx], r10d
425 %endif
426%else ; RT_ARCH_X86
427 mov ecx, [esp + 4] ; idxField
428 mov edx, [esp + 8] ; pu32Data
429 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
430 cmp byte [NAME(g_fVMXIs64bitHost)], 0
431 jz .legacy_mode
432 db 0xea ; jmp far .sixtyfourbit_mode
433 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
434.legacy_mode:
435 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
436 xor eax, eax
437 vmread [edx], ecx
438%endif ; RT_ARCH_X86
439 jnc .valid_vmcs
440 mov eax, VERR_VMX_INVALID_VMCS_PTR
441 ret
442.valid_vmcs:
443 jnz .the_end
444 mov eax, VERR_VMX_INVALID_VMCS_FIELD
445.the_end:
446 ret
447
448%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
449ALIGNCODE(16)
450BITS 64
451.sixtyfourbit_mode:
452 and edx, 0ffffffffh
453 and ecx, 0ffffffffh
454 xor eax, eax
455 vmread r10, rcx
456 mov [rdx], r10d
457 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
458 cmovz eax, r8d
459 mov r9d, VERR_VMX_INVALID_VMCS_PTR
460 cmovc eax, r9d
461 jmp far [.fpret wrt rip]
462.fpret: ; 16:32 Pointer to .the_end.
463 dd .the_end, NAME(SUPR0AbsKernelCS)
464BITS 32
465%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
466ENDPROC VMXReadVMCS32
467
468
469;/**
470; * Executes VMWRITE, 32-bit value.
471; *
472; * @returns VBox status code
473; * @param idxField VMCS index
474; * @param u32Data Ptr to store VM field value
475; */
476;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
477ALIGNCODE(16)
478BEGINPROC VMXWriteVMCS32
479%ifdef RT_ARCH_AMD64
480 %ifdef ASM_CALL64_GCC
481 and edi, 0ffffffffh
482 and esi, 0ffffffffh
483 xor rax, rax
484 vmwrite rdi, rsi
485 %else
486 and ecx, 0ffffffffh
487 and edx, 0ffffffffh
488 xor rax, rax
489 vmwrite rcx, rdx
490 %endif
491%else ; RT_ARCH_X86
492 mov ecx, [esp + 4] ; idxField
493 mov edx, [esp + 8] ; u32Data
494 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
495 cmp byte [NAME(g_fVMXIs64bitHost)], 0
496 jz .legacy_mode
497 db 0xea ; jmp far .sixtyfourbit_mode
498 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
499.legacy_mode:
500 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
501 xor eax, eax
502 vmwrite ecx, edx
503%endif ; RT_ARCH_X86
504 jnc .valid_vmcs
505 mov eax, VERR_VMX_INVALID_VMCS_PTR
506 ret
507.valid_vmcs:
508 jnz .the_end
509 mov eax, VERR_VMX_INVALID_VMCS_FIELD
510.the_end:
511 ret
512
513%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
514ALIGNCODE(16)
515BITS 64
516.sixtyfourbit_mode:
517 and edx, 0ffffffffh
518 and ecx, 0ffffffffh
519 xor eax, eax
520 vmwrite rcx, rdx
521 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
522 cmovz eax, r8d
523 mov r9d, VERR_VMX_INVALID_VMCS_PTR
524 cmovc eax, r9d
525 jmp far [.fpret wrt rip]
526.fpret: ; 16:32 Pointer to .the_end.
527 dd .the_end, NAME(SUPR0AbsKernelCS)
528BITS 32
529%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
530ENDPROC VMXWriteVMCS32
531
532
533;/**
534; * Executes VMXON
535; *
536; * @returns VBox status code
537; * @param HCPhysVMXOn Physical address of VMXON structure
538; */
539;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
540BEGINPROC VMXEnable
541%ifdef RT_ARCH_AMD64
542 xor rax, rax
543 %ifdef ASM_CALL64_GCC
544 push rdi
545 %else
546 push rcx
547 %endif
548 vmxon [rsp]
549%else ; RT_ARCH_X86
550 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
551 cmp byte [NAME(g_fVMXIs64bitHost)], 0
552 jz .legacy_mode
553 db 0xea ; jmp far .sixtyfourbit_mode
554 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
555.legacy_mode:
556 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
557 xor eax, eax
558 vmxon [esp + 4]
559%endif ; RT_ARCH_X86
560 jnc .good
561 mov eax, VERR_VMX_INVALID_VMXON_PTR
562 jmp .the_end
563
564.good:
565 jnz .the_end
566 mov eax, VERR_VMX_GENERIC
567
568.the_end:
569%ifdef RT_ARCH_AMD64
570 add rsp, 8
571%endif
572 ret
573
574%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
575ALIGNCODE(16)
576BITS 64
577.sixtyfourbit_mode:
578 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
579 and edx, 0ffffffffh
580 xor eax, eax
581 vmxon [rdx]
582 mov r8d, VERR_INVALID_PARAMETER
583 cmovz eax, r8d
584 mov r9d, VERR_VMX_INVALID_VMCS_PTR
585 cmovc eax, r9d
586 jmp far [.fpret wrt rip]
587.fpret: ; 16:32 Pointer to .the_end.
588 dd .the_end, NAME(SUPR0AbsKernelCS)
589BITS 32
590%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
591ENDPROC VMXEnable
592
593
594;/**
595; * Executes VMXOFF
596; */
597;DECLASM(void) VMXDisable(void);
598BEGINPROC VMXDisable
599%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
600 cmp byte [NAME(g_fVMXIs64bitHost)], 0
601 jz .legacy_mode
602 db 0xea ; jmp far .sixtyfourbit_mode
603 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
604.legacy_mode:
605%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
606 vmxoff
607.the_end:
608 ret
609
610%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
611ALIGNCODE(16)
612BITS 64
613.sixtyfourbit_mode:
614 vmxoff
615 jmp far [.fpret wrt rip]
616.fpret: ; 16:32 Pointer to .the_end.
617 dd .the_end, NAME(SUPR0AbsKernelCS)
618BITS 32
619%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
620ENDPROC VMXDisable
621
622
623;/**
624; * Executes VMCLEAR
625; *
626; * @returns VBox status code
627; * @param HCPhysVMCS Physical address of VM control structure
628; */
629;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
630ALIGNCODE(16)
631BEGINPROC VMXClearVMCS
632%ifdef RT_ARCH_AMD64
633 xor rax, rax
634 %ifdef ASM_CALL64_GCC
635 push rdi
636 %else
637 push rcx
638 %endif
639 vmclear [rsp]
640%else ; RT_ARCH_X86
641 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
642 cmp byte [NAME(g_fVMXIs64bitHost)], 0
643 jz .legacy_mode
644 db 0xea ; jmp far .sixtyfourbit_mode
645 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
646.legacy_mode:
647 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
648 xor eax, eax
649 vmclear [esp + 4]
650%endif ; RT_ARCH_X86
651 jnc .the_end
652 mov eax, VERR_VMX_INVALID_VMCS_PTR
653.the_end:
654%ifdef RT_ARCH_AMD64
655 add rsp, 8
656%endif
657 ret
658
659%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
660ALIGNCODE(16)
661BITS 64
662.sixtyfourbit_mode:
663 lea rdx, [rsp + 4] ; &HCPhysVMCS
664 and edx, 0ffffffffh
665 xor eax, eax
666 vmclear [rdx]
667 mov r9d, VERR_VMX_INVALID_VMCS_PTR
668 cmovc eax, r9d
669 jmp far [.fpret wrt rip]
670.fpret: ; 16:32 Pointer to .the_end.
671 dd .the_end, NAME(SUPR0AbsKernelCS)
672BITS 32
673%endif
674ENDPROC VMXClearVMCS
675
676
677;/**
678; * Executes VMPTRLD
679; *
680; * @returns VBox status code
681; * @param HCPhysVMCS Physical address of VMCS structure
682; */
683;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
684ALIGNCODE(16)
685BEGINPROC VMXActivateVMCS
686%ifdef RT_ARCH_AMD64
687 xor rax, rax
688 %ifdef ASM_CALL64_GCC
689 push rdi
690 %else
691 push rcx
692 %endif
693 vmptrld [rsp]
694%else
695 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
696 cmp byte [NAME(g_fVMXIs64bitHost)], 0
697 jz .legacy_mode
698 db 0xea ; jmp far .sixtyfourbit_mode
699 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
700.legacy_mode:
701 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
702 xor eax, eax
703 vmptrld [esp + 4]
704%endif
705 jnc .the_end
706 mov eax, VERR_VMX_INVALID_VMCS_PTR
707.the_end:
708%ifdef RT_ARCH_AMD64
709 add rsp, 8
710%endif
711 ret
712
713%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
714ALIGNCODE(16)
715BITS 64
716.sixtyfourbit_mode:
717 lea rdx, [rsp + 4] ; &HCPhysVMCS
718 and edx, 0ffffffffh
719 xor eax, eax
720 vmptrld [rdx]
721 mov r9d, VERR_VMX_INVALID_VMCS_PTR
722 cmovc eax, r9d
723 jmp far [.fpret wrt rip]
724.fpret: ; 16:32 Pointer to .the_end.
725 dd .the_end, NAME(SUPR0AbsKernelCS)
726BITS 32
727%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
728ENDPROC VMXActivateVMCS
729
730
731;/**
732; * Executes VMPTRST
733; *
734; * @returns VBox status code
735; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
736; */
737;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
738BEGINPROC VMXGetActivateVMCS
739%ifdef RT_OS_OS2
740 mov eax, VERR_NOT_SUPPORTED
741 ret
742%else
743 %ifdef RT_ARCH_AMD64
744 %ifdef ASM_CALL64_GCC
745 vmptrst qword [rdi]
746 %else
747 vmptrst qword [rcx]
748 %endif
749 %else
750 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
751 cmp byte [NAME(g_fVMXIs64bitHost)], 0
752 jz .legacy_mode
753 db 0xea ; jmp far .sixtyfourbit_mode
754 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
755.legacy_mode:
756 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
757 vmptrst qword [esp+04h]
758 %endif
759 xor eax, eax
760.the_end:
761 ret
762
763 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
764ALIGNCODE(16)
765BITS 64
766.sixtyfourbit_mode:
767 lea rdx, [rsp + 4] ; &HCPhysVMCS
768 and edx, 0ffffffffh
769 vmptrst qword [rdx]
770 xor eax, eax
771 jmp far [.fpret wrt rip]
772.fpret: ; 16:32 Pointer to .the_end.
773 dd .the_end, NAME(SUPR0AbsKernelCS)
774BITS 32
775 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
776%endif
777ENDPROC VMXGetActivateVMCS
778
779;/**
780; * Invalidate a page using invept
781; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
782; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
783; */
784;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
785BEGINPROC VMXR0InvEPT
786%ifdef RT_ARCH_AMD64
787 %ifdef ASM_CALL64_GCC
788 and edi, 0ffffffffh
789 xor rax, rax
790; invept rdi, qword [rsi]
791 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
792 %else
793 and ecx, 0ffffffffh
794 xor rax, rax
795; invept rcx, qword [rdx]
796 DB 0x66, 0x0F, 0x38, 0x80, 0xA
797 %endif
798%else
799 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
800 cmp byte [NAME(g_fVMXIs64bitHost)], 0
801 jz .legacy_mode
802 db 0xea ; jmp far .sixtyfourbit_mode
803 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
804.legacy_mode:
805 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
806 mov eax, [esp + 4]
807 mov ecx, [esp + 8]
808; invept eax, qword [ecx]
809 DB 0x66, 0x0F, 0x38, 0x80, 0x1
810%endif
811 jnc .valid_vmcs
812 mov eax, VERR_VMX_INVALID_VMCS_PTR
813 ret
814.valid_vmcs:
815 jnz .the_end
816 mov eax, VERR_INVALID_PARAMETER
817.the_end:
818 ret
819
820%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
821ALIGNCODE(16)
822BITS 64
823.sixtyfourbit_mode:
824 and esp, 0ffffffffh
825 mov ecx, [rsp + 4] ; enmFlush
826 mov edx, [rsp + 8] ; pDescriptor
827 xor eax, eax
828; invept rcx, qword [rdx]
829 DB 0x66, 0x0F, 0x38, 0x80, 0xA
830 mov r8d, VERR_INVALID_PARAMETER
831 cmovz eax, r8d
832 mov r9d, VERR_VMX_INVALID_VMCS_PTR
833 cmovc eax, r9d
834 jmp far [.fpret wrt rip]
835.fpret: ; 16:32 Pointer to .the_end.
836 dd .the_end, NAME(SUPR0AbsKernelCS)
837BITS 32
838%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
839ENDPROC VMXR0InvEPT
840
841
842;/**
843; * Invalidate a page using invvpid
844; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
845; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
846; */
847;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
848BEGINPROC VMXR0InvVPID
849%ifdef RT_ARCH_AMD64
850 %ifdef ASM_CALL64_GCC
851 and edi, 0ffffffffh
852 xor rax, rax
853 ;invvpid rdi, qword [rsi]
854 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
855 %else
856 and ecx, 0ffffffffh
857 xor rax, rax
858; invvpid rcx, qword [rdx]
859 DB 0x66, 0x0F, 0x38, 0x81, 0xA
860 %endif
861%else
862 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
863 cmp byte [NAME(g_fVMXIs64bitHost)], 0
864 jz .legacy_mode
865 db 0xea ; jmp far .sixtyfourbit_mode
866 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
867.legacy_mode:
868 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
869 mov eax, [esp + 4]
870 mov ecx, [esp + 8]
871; invept eax, qword [ecx]
872 DB 0x66, 0x0F, 0x38, 0x81, 0x1
873%endif
874 jnc .valid_vmcs
875 mov eax, VERR_VMX_INVALID_VMCS_PTR
876 ret
877.valid_vmcs:
878 jnz .the_end
879 mov eax, VERR_INVALID_PARAMETER
880.the_end:
881 ret
882
883%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
884ALIGNCODE(16)
885BITS 64
886.sixtyfourbit_mode:
887 and esp, 0ffffffffh
888 mov ecx, [rsp + 4] ; enmFlush
889 mov edx, [rsp + 8] ; pDescriptor
890 xor eax, eax
891; invvpid rcx, qword [rdx]
892 DB 0x66, 0x0F, 0x38, 0x81, 0xA
893 mov r8d, VERR_INVALID_PARAMETER
894 cmovz eax, r8d
895 mov r9d, VERR_VMX_INVALID_VMCS_PTR
896 cmovc eax, r9d
897 jmp far [.fpret wrt rip]
898.fpret: ; 16:32 Pointer to .the_end.
899 dd .the_end, NAME(SUPR0AbsKernelCS)
900BITS 32
901%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
902ENDPROC VMXR0InvVPID
903
904
905%if GC_ARCH_BITS == 64
906;;
907; Executes INVLPGA
908;
909; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
910; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
911;
912;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
913BEGINPROC SVMR0InvlpgA
914%ifdef RT_ARCH_AMD64
915 %ifdef ASM_CALL64_GCC
916 mov rax, rdi
917 mov rcx, rsi
918 %else
919 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
920 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
921 ; values also set the upper 32 bits of the register to zero. Consequently
922 ; there is no need for an instruction movzlq.''
923 mov eax, ecx
924 mov rcx, rdx
925 %endif
926%else
927 mov eax, [esp + 4]
928 mov ecx, [esp + 0Ch]
929%endif
930 invlpga [xAX], ecx
931 ret
932ENDPROC SVMR0InvlpgA
933
934%else ; GC_ARCH_BITS != 64
935;;
936; Executes INVLPGA
937;
938; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
939; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
940;
941;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
942BEGINPROC SVMR0InvlpgA
943%ifdef RT_ARCH_AMD64
944 %ifdef ASM_CALL64_GCC
945 movzx rax, edi
946 mov ecx, esi
947 %else
948 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
949 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
950 ; values also set the upper 32 bits of the register to zero. Consequently
951 ; there is no need for an instruction movzlq.''
952 mov eax, ecx
953 mov ecx, edx
954 %endif
955%else
956 mov eax, [esp + 4]
957 mov ecx, [esp + 8]
958%endif
959 invlpga [xAX], ecx
960 ret
961ENDPROC SVMR0InvlpgA
962
963%endif ; GC_ARCH_BITS != 64
964
965%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
966
967;/**
968; * Gets 64-bit GDTR and IDTR on darwin.
969; * @param pGdtr Where to store the 64-bit GDTR.
970; * @param pIdtr Where to store the 64-bit IDTR.
971; */
972;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
973ALIGNCODE(16)
974BEGINPROC hwaccmR0Get64bitGDTRandIDTR
975 db 0xea ; jmp far .sixtyfourbit_mode
976 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
977.the_end:
978 ret
979
980ALIGNCODE(16)
981BITS 64
982.sixtyfourbit_mode:
983 and esp, 0ffffffffh
984 mov ecx, [rsp + 4] ; pGdtr
985 mov edx, [rsp + 8] ; pIdtr
986 sgdt [rcx]
987 sidt [rdx]
988 jmp far [.fpret wrt rip]
989.fpret: ; 16:32 Pointer to .the_end.
990 dd .the_end, NAME(SUPR0AbsKernelCS)
991BITS 32
992ENDPROC hwaccmR0Get64bitGDTRandIDTR
993
994
995;/**
996; * Gets 64-bit CR3 on darwin.
997; * @returns CR3
998; */
999;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
1000ALIGNCODE(16)
1001BEGINPROC hwaccmR0Get64bitCR3
1002 db 0xea ; jmp far .sixtyfourbit_mode
1003 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1004.the_end:
1005 ret
1006
1007ALIGNCODE(16)
1008BITS 64
1009.sixtyfourbit_mode:
1010 mov rax, cr3
1011 mov rdx, rax
1012 shr rdx, 32
1013 jmp far [.fpret wrt rip]
1014.fpret: ; 16:32 Pointer to .the_end.
1015 dd .the_end, NAME(SUPR0AbsKernelCS)
1016BITS 32
1017ENDPROC hwaccmR0Get64bitCR3
1018
1019%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1020
1021
1022
1023;
1024; The default setup of the StartVM routines.
1025;
1026%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1027 %define MY_NAME(name) name %+ _32
1028%else
1029 %define MY_NAME(name) name
1030%endif
1031%ifdef RT_ARCH_AMD64
1032 %define MYPUSHAD MYPUSHAD64
1033 %define MYPOPAD MYPOPAD64
1034 %define MYPUSHSEGS MYPUSHSEGS64
1035 %define MYPOPSEGS MYPOPSEGS64
1036%else
1037 %define MYPUSHAD MYPUSHAD32
1038 %define MYPOPAD MYPOPAD32
1039 %define MYPUSHSEGS MYPUSHSEGS32
1040 %define MYPOPSEGS MYPOPSEGS32
1041%endif
1042
1043%include "HWACCMR0Mixed.mac"
1044
1045
1046%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1047 ;
1048 ; Write the wrapper procedures.
1049 ;
1050 ; These routines are probably being too paranoid about selector
1051 ; restoring, but better safe than sorry...
1052 ;
1053
1054; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1055ALIGNCODE(16)
1056BEGINPROC VMXR0StartVM32
1057 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1058 je near NAME(VMXR0StartVM32_32)
1059
1060 ; stack frame
1061 push esi
1062 push edi
1063 push fs
1064 push gs
1065
1066 ; jmp far .thunk64
1067 db 0xea
1068 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1069
1070ALIGNCODE(16)
1071BITS 64
1072.thunk64:
1073 sub esp, 20h
1074 mov edi, [rsp + 20h + 14h] ; fResume
1075 mov esi, [rsp + 20h + 18h] ; pCtx
1076 mov edx, [rsp + 20h + 1Ch] ; pCache
1077 call NAME(VMXR0StartVM32_64)
1078 add esp, 20h
1079 jmp far [.fpthunk32 wrt rip]
1080.fpthunk32: ; 16:32 Pointer to .thunk32.
1081 dd .thunk32, NAME(SUPR0AbsKernelCS)
1082
1083BITS 32
1084ALIGNCODE(16)
1085.thunk32:
1086 pop gs
1087 pop fs
1088 pop edi
1089 pop esi
1090 ret
1091ENDPROC VMXR0StartVM32
1092
1093
1094; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1095ALIGNCODE(16)
1096BEGINPROC VMXR0StartVM64
1097 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1098 je .not_in_long_mode
1099
1100 ; stack frame
1101 push esi
1102 push edi
1103 push fs
1104 push gs
1105
1106 ; jmp far .thunk64
1107 db 0xea
1108 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1109
1110ALIGNCODE(16)
1111BITS 64
1112.thunk64:
1113 sub esp, 20h
1114 mov edi, [rsp + 20h + 14h] ; fResume
1115 mov esi, [rsp + 20h + 18h] ; pCtx
1116 mov edx, [rsp + 20h + 1Ch] ; pCache
1117 call NAME(VMXR0StartVM64_64)
1118 add esp, 20h
1119 jmp far [.fpthunk32 wrt rip]
1120.fpthunk32: ; 16:32 Pointer to .thunk32.
1121 dd .thunk32, NAME(SUPR0AbsKernelCS)
1122
1123BITS 32
1124ALIGNCODE(16)
1125.thunk32:
1126 pop gs
1127 pop fs
1128 pop edi
1129 pop esi
1130 ret
1131
1132.not_in_long_mode:
1133 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1134 ret
1135ENDPROC VMXR0StartVM64
1136
1137;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1138ALIGNCODE(16)
1139BEGINPROC SVMR0VMRun
1140 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1141 je near NAME(SVMR0VMRun_32)
1142
1143 ; stack frame
1144 push esi
1145 push edi
1146 push fs
1147 push gs
1148
1149 ; jmp far .thunk64
1150 db 0xea
1151 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1152
1153ALIGNCODE(16)
1154BITS 64
1155.thunk64:
1156 sub esp, 20h
1157 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1158 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1159 mov edx, [rsp + 20h + 24h] ; pCtx
1160 call NAME(SVMR0VMRun_64)
1161 add esp, 20h
1162 jmp far [.fpthunk32 wrt rip]
1163.fpthunk32: ; 16:32 Pointer to .thunk32.
1164 dd .thunk32, NAME(SUPR0AbsKernelCS)
1165
1166BITS 32
1167ALIGNCODE(16)
1168.thunk32:
1169 pop gs
1170 pop fs
1171 pop edi
1172 pop esi
1173 ret
1174ENDPROC SVMR0VMRun
1175
1176
1177; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1178ALIGNCODE(16)
1179BEGINPROC SVMR0VMRun64
1180 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1181 je .not_in_long_mode
1182
1183 ; stack frame
1184 push esi
1185 push edi
1186 push fs
1187 push gs
1188
1189 ; jmp far .thunk64
1190 db 0xea
1191 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1192
1193ALIGNCODE(16)
1194BITS 64
1195.thunk64:
1196 sub esp, 20h
1197 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1198 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1199 mov edx, [rbp + 20h + 24h] ; pCtx
1200 call NAME(SVMR0VMRun64_64)
1201 add esp, 20h
1202 jmp far [.fpthunk32 wrt rip]
1203.fpthunk32: ; 16:32 Pointer to .thunk32.
1204 dd .thunk32, NAME(SUPR0AbsKernelCS)
1205
1206BITS 32
1207ALIGNCODE(16)
1208.thunk32:
1209 pop gs
1210 pop fs
1211 pop edi
1212 pop esi
1213 ret
1214
1215.not_in_long_mode:
1216 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1217 ret
1218ENDPROC SVMR0VMRun64
1219
1220 ;
1221 ; Do it a second time pretending we're a 64-bit host.
1222 ;
1223 ; This *HAS* to be done at the very end of the file to avoid restoring
1224 ; macros. So, add new code *BEFORE* this mess.
1225 ;
1226 BITS 64
1227 %undef RT_ARCH_X86
1228 %define RT_ARCH_AMD64
1229 %undef ASM_CALL64_MSC
1230 %define ASM_CALL64_GCC
1231 %define xS 8
1232 %define xSP rsp
1233 %define xBP rbp
1234 %define xAX rax
1235 %define xBX rbx
1236 %define xCX rcx
1237 %define xDX rdx
1238 %define xDI rdi
1239 %define xSI rsi
1240 %define MY_NAME(name) name %+ _64
1241 %define MYPUSHAD MYPUSHAD64
1242 %define MYPOPAD MYPOPAD64
1243 %define MYPUSHSEGS MYPUSHSEGS64
1244 %define MYPOPSEGS MYPOPSEGS64
1245
1246 %include "HWACCMR0Mixed.mac"
1247%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette