VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 18768

Last change on this file since 18768 was 15415, checked in by vboxsync, 16 years ago

HWACCMR0.asm: strimmed down the three other Run wrappers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.7 KB
Line 
1; $Id: HWACCMR0A.asm 15415 2008-12-13 05:29:54Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VBox/err.mac"
27%include "VBox/hwacc_vmx.mac"
28%include "VBox/cpum.mac"
29%include "VBox/x86.mac"
30%include "../HWACCMInternal.mac"
31
32%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
33 %macro vmwrite 2,
34 int3
35 %endmacro
36 %define vmlaunch int3
37 %define vmresume int3
38 %define vmsave int3
39 %define vmload int3
40 %define vmrun int3
41 %define clgi int3
42 %define stgi int3
43 %macro invlpga 2,
44 int3
45 %endmacro
46%endif
47
48;*******************************************************************************
49;* Defined Constants And Macros *
50;*******************************************************************************
51%ifdef RT_ARCH_AMD64
52 %define MAYBE_64_BIT
53%endif
54%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
55 %define MAYBE_64_BIT
56%endif
57
58
59;; This is too risky wrt. stability, performance and correctness.
60;%define VBOX_WITH_DR6_EXPERIMENT 1
61
62;; @def MYPUSHAD
63; Macro generating an equivalent to pushad
64
65;; @def MYPOPAD
66; Macro generating an equivalent to popad
67
68;; @def MYPUSHSEGS
69; Macro saving all segment registers on the stack.
70; @param 1 full width register name
71; @param 2 16-bit regsiter name for \a 1.
72
73;; @def MYPOPSEGS
74; Macro restoring all segment registers on the stack
75; @param 1 full width register name
76; @param 2 16-bit regsiter name for \a 1.
77
78%ifdef MAYBE_64_BIT
79 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
80 %macro LOADGUESTMSR 2
81 mov rcx, %1
82 rdmsr
83 push rdx
84 push rax
85 mov edx, dword [xSI + %2 + 4]
86 mov eax, dword [xSI + %2]
87 wrmsr
88 %endmacro
89
90 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
91 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
92 %macro LOADHOSTMSREX 2
93 mov rcx, %1
94 rdmsr
95 mov dword [xSI + %2], eax
96 mov dword [xSI + %2 + 4], edx
97 pop rax
98 pop rdx
99 wrmsr
100 %endmacro
101
102 ; Load the corresponding host MSR (trashes rdx & rcx)
103 %macro LOADHOSTMSR 1
104 mov rcx, %1
105 pop rax
106 pop rdx
107 wrmsr
108 %endmacro
109%endif
110
111%ifdef ASM_CALL64_GCC
112 %macro MYPUSHAD64 0
113 push r15
114 push r14
115 push r13
116 push r12
117 push rbx
118 %endmacro
119 %macro MYPOPAD64 0
120 pop rbx
121 pop r12
122 pop r13
123 pop r14
124 pop r15
125 %endmacro
126
127%else ; ASM_CALL64_MSC
128 %macro MYPUSHAD64 0
129 push r15
130 push r14
131 push r13
132 push r12
133 push rbx
134 push rsi
135 push rdi
136 %endmacro
137 %macro MYPOPAD64 0
138 pop rdi
139 pop rsi
140 pop rbx
141 pop r12
142 pop r13
143 pop r14
144 pop r15
145 %endmacro
146%endif
147
148; trashes, rax, rdx & rcx
149%macro MYPUSHSEGS64 2
150 mov %2, es
151 push %1
152 mov %2, ds
153 push %1
154
155 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
156 mov ecx, MSR_K8_FS_BASE
157 rdmsr
158 push rdx
159 push rax
160 push fs
161
162 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
163 mov ecx, MSR_K8_GS_BASE
164 rdmsr
165 push rdx
166 push rax
167 push gs
168%endmacro
169
170; trashes, rax, rdx & rcx
171%macro MYPOPSEGS64 2
172 ; Note: do not step through this code with a debugger!
173 pop gs
174 pop rax
175 pop rdx
176 mov ecx, MSR_K8_GS_BASE
177 wrmsr
178
179 pop fs
180 pop rax
181 pop rdx
182 mov ecx, MSR_K8_FS_BASE
183 wrmsr
184 ; Now it's safe to step again
185
186 pop %1
187 mov ds, %2
188 pop %1
189 mov es, %2
190%endmacro
191
192%macro MYPUSHAD32 0
193 pushad
194%endmacro
195%macro MYPOPAD32 0
196 popad
197%endmacro
198
199%macro MYPUSHSEGS32 2
200 push ds
201 push es
202 push fs
203 push gs
204%endmacro
205%macro MYPOPSEGS32 2
206 pop gs
207 pop fs
208 pop es
209 pop ds
210%endmacro
211
212
213;*******************************************************************************
214;* External Symbols *
215;*******************************************************************************
216%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
217extern NAME(SUPR0AbsIs64bit)
218extern NAME(SUPR0Abs64bitKernelCS)
219extern NAME(SUPR0Abs64bitKernelSS)
220extern NAME(SUPR0Abs64bitKernelDS)
221extern NAME(SUPR0AbsKernelCS)
222%endif
223
224
225;*******************************************************************************
226;* Global Variables *
227;*******************************************************************************
228%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
229BEGINDATA
230;;
231; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
232; needing to clobber a register. (This trick doesn't quite work for PE btw.
233; but that's not relevant atm.)
234GLOBALNAME g_fVMXIs64bitHost
235 dd NAME(SUPR0AbsIs64bit)
236%endif
237
238
239BEGINCODE
240
241
242;/**
243; * Executes VMWRITE, 64-bit value.
244; *
245; * @returns VBox status code
246; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
247; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
248; */
249ALIGNCODE(16)
250BEGINPROC VMXWriteVMCS64
251%ifdef RT_ARCH_AMD64
252 %ifdef ASM_CALL64_GCC
253 and edi, 0ffffffffh
254 xor rax, rax
255 vmwrite rdi, rsi
256 %else
257 and ecx, 0ffffffffh
258 xor rax, rax
259 vmwrite rcx, rdx
260 %endif
261%else ; RT_ARCH_X86
262 mov ecx, [esp + 4] ; idxField
263 lea edx, [esp + 8] ; &u64Data
264 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
265 cmp byte [NAME(g_fVMXIs64bitHost)], 0
266 jz .legacy_mode
267 db 0xea ; jmp far .sixtyfourbit_mode
268 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
269.legacy_mode:
270 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
271 vmwrite ecx, [edx] ; low dword
272 jz .done
273 jc .done
274 inc ecx
275 xor eax, eax
276 vmwrite ecx, [edx + 4] ; high dword
277.done:
278%endif ; RT_ARCH_X86
279 jnc .valid_vmcs
280 mov eax, VERR_VMX_INVALID_VMCS_PTR
281 ret
282.valid_vmcs:
283 jnz .the_end
284 mov eax, VERR_VMX_INVALID_VMCS_FIELD
285.the_end:
286 ret
287
288%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
289ALIGNCODE(16)
290BITS 64
291.sixtyfourbit_mode:
292 and edx, 0ffffffffh
293 and ecx, 0ffffffffh
294 xor eax, eax
295 vmwrite rcx, [rdx]
296 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
297 cmovz eax, r8d
298 mov r9d, VERR_VMX_INVALID_VMCS_PTR
299 cmovc eax, r9d
300 jmp far [.fpret wrt rip]
301.fpret: ; 16:32 Pointer to .the_end.
302 dd .the_end, NAME(SUPR0AbsKernelCS)
303BITS 32
304%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
305ENDPROC VMXWriteVMCS64
306
307
308;/**
309; * Executes VMREAD, 64-bit value
310; *
311; * @returns VBox status code
312; * @param idxField VMCS index
313; * @param pData Ptr to store VM field value
314; */
315;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
316ALIGNCODE(16)
317BEGINPROC VMXReadVMCS64
318%ifdef RT_ARCH_AMD64
319 %ifdef ASM_CALL64_GCC
320 and edi, 0ffffffffh
321 xor rax, rax
322 vmread [rsi], rdi
323 %else
324 and ecx, 0ffffffffh
325 xor rax, rax
326 vmread [rdx], rcx
327 %endif
328%else ; RT_ARCH_X86
329 mov ecx, [esp + 4] ; idxField
330 mov edx, [esp + 8] ; pData
331 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
332 cmp byte [NAME(g_fVMXIs64bitHost)], 0
333 jz .legacy_mode
334 db 0xea ; jmp far .sixtyfourbit_mode
335 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
336.legacy_mode:
337 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
338 vmread [edx], ecx ; low dword
339 jz .done
340 jc .done
341 inc ecx
342 xor eax, eax
343 vmread [edx + 4], ecx ; high dword
344.done:
345%endif ; RT_ARCH_X86
346 jnc .valid_vmcs
347 mov eax, VERR_VMX_INVALID_VMCS_PTR
348 ret
349.valid_vmcs:
350 jnz .the_end
351 mov eax, VERR_VMX_INVALID_VMCS_FIELD
352.the_end:
353 ret
354
355%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
356ALIGNCODE(16)
357BITS 64
358.sixtyfourbit_mode:
359 and edx, 0ffffffffh
360 and ecx, 0ffffffffh
361 xor eax, eax
362 vmread [rdx], rcx
363 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
364 cmovz eax, r8d
365 mov r9d, VERR_VMX_INVALID_VMCS_PTR
366 cmovc eax, r9d
367 jmp far [.fpret wrt rip]
368.fpret: ; 16:32 Pointer to .the_end.
369 dd .the_end, NAME(SUPR0AbsKernelCS)
370BITS 32
371%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
372ENDPROC VMXReadVMCS64
373
374
375;/**
376; * Executes VMREAD, 32-bit value.
377; *
378; * @returns VBox status code
379; * @param idxField VMCS index
380; * @param pu32Data Ptr to store VM field value
381; */
382;DECLASM(int) VMXReadVMCS32(uint32_t idxField, uint32_t *pu32Data);
383ALIGNCODE(16)
384BEGINPROC VMXReadVMCS32
385%ifdef RT_ARCH_AMD64
386 %ifdef ASM_CALL64_GCC
387 and edi, 0ffffffffh
388 xor rax, rax
389 vmread r10, rdi
390 mov [rsi], r10d
391 %else
392 and ecx, 0ffffffffh
393 xor rax, rax
394 vmread r10, rcx
395 mov [rdx], r10d
396 %endif
397%else ; RT_ARCH_X86
398 mov ecx, [esp + 4] ; idxField
399 mov edx, [esp + 8] ; pu32Data
400 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
401 cmp byte [NAME(g_fVMXIs64bitHost)], 0
402 jz .legacy_mode
403 db 0xea ; jmp far .sixtyfourbit_mode
404 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
405.legacy_mode:
406 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
407 xor eax, eax
408 vmread [edx], ecx
409%endif ; RT_ARCH_X86
410 jnc .valid_vmcs
411 mov eax, VERR_VMX_INVALID_VMCS_PTR
412 ret
413.valid_vmcs:
414 jnz .the_end
415 mov eax, VERR_VMX_INVALID_VMCS_FIELD
416.the_end:
417 ret
418
419%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
420ALIGNCODE(16)
421BITS 64
422.sixtyfourbit_mode:
423 and edx, 0ffffffffh
424 and ecx, 0ffffffffh
425 xor eax, eax
426 vmread r10, rcx
427 mov [rdx], r10d
428 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
429 cmovz eax, r8d
430 mov r9d, VERR_VMX_INVALID_VMCS_PTR
431 cmovc eax, r9d
432 jmp far [.fpret wrt rip]
433.fpret: ; 16:32 Pointer to .the_end.
434 dd .the_end, NAME(SUPR0AbsKernelCS)
435BITS 32
436%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
437ENDPROC VMXReadVMCS32
438
439
440;/**
441; * Executes VMWRITE, 32-bit value.
442; *
443; * @returns VBox status code
444; * @param idxField VMCS index
445; * @param u32Data Ptr to store VM field value
446; */
447;DECLASM(int) VMXWriteVMCS32(uint32_t idxField, uint32_t u32Data);
448ALIGNCODE(16)
449BEGINPROC VMXWriteVMCS32
450%ifdef RT_ARCH_AMD64
451 %ifdef ASM_CALL64_GCC
452 and edi, 0ffffffffh
453 and esi, 0ffffffffh
454 xor rax, rax
455 vmwrite rdi, rsi
456 %else
457 and ecx, 0ffffffffh
458 and edx, 0ffffffffh
459 xor rax, rax
460 vmwrite rcx, rdx
461 %endif
462%else ; RT_ARCH_X86
463 mov ecx, [esp + 4] ; idxField
464 mov edx, [esp + 8] ; u32Data
465 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
466 cmp byte [NAME(g_fVMXIs64bitHost)], 0
467 jz .legacy_mode
468 db 0xea ; jmp far .sixtyfourbit_mode
469 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
470.legacy_mode:
471 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
472 xor eax, eax
473 vmwrite ecx, edx
474%endif ; RT_ARCH_X86
475 jnc .valid_vmcs
476 mov eax, VERR_VMX_INVALID_VMCS_PTR
477 ret
478.valid_vmcs:
479 jnz .the_end
480 mov eax, VERR_VMX_INVALID_VMCS_FIELD
481.the_end:
482 ret
483
484%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
485ALIGNCODE(16)
486BITS 64
487.sixtyfourbit_mode:
488 and edx, 0ffffffffh
489 and ecx, 0ffffffffh
490 xor eax, eax
491 vmwrite rcx, rdx
492 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
493 cmovz eax, r8d
494 mov r9d, VERR_VMX_INVALID_VMCS_PTR
495 cmovc eax, r9d
496 jmp far [.fpret wrt rip]
497.fpret: ; 16:32 Pointer to .the_end.
498 dd .the_end, NAME(SUPR0AbsKernelCS)
499BITS 32
500%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
501ENDPROC VMXWriteVMCS32
502
503
504;/**
505; * Executes VMXON
506; *
507; * @returns VBox status code
508; * @param HCPhysVMXOn Physical address of VMXON structure
509; */
510;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
511BEGINPROC VMXEnable
512%ifdef RT_ARCH_AMD64
513 xor rax, rax
514 %ifdef ASM_CALL64_GCC
515 push rdi
516 %else
517 push rcx
518 %endif
519 vmxon [rsp]
520%else ; RT_ARCH_X86
521 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
522 cmp byte [NAME(g_fVMXIs64bitHost)], 0
523 jz .legacy_mode
524 db 0xea ; jmp far .sixtyfourbit_mode
525 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
526.legacy_mode:
527 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
528 xor eax, eax
529 vmxon [esp + 4]
530%endif ; RT_ARCH_X86
531 jnc .good
532 mov eax, VERR_VMX_INVALID_VMXON_PTR
533 jmp .the_end
534
535.good:
536 jnz .the_end
537 mov eax, VERR_VMX_GENERIC
538
539.the_end:
540%ifdef RT_ARCH_AMD64
541 add rsp, 8
542%endif
543 ret
544
545%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
546ALIGNCODE(16)
547BITS 64
548.sixtyfourbit_mode:
549 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
550 and edx, 0ffffffffh
551 xor eax, eax
552 vmxon [rdx]
553 mov r8d, VERR_INVALID_PARAMETER
554 cmovz eax, r8d
555 mov r9d, VERR_VMX_INVALID_VMCS_PTR
556 cmovc eax, r9d
557 jmp far [.fpret wrt rip]
558.fpret: ; 16:32 Pointer to .the_end.
559 dd .the_end, NAME(SUPR0AbsKernelCS)
560BITS 32
561%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
562ENDPROC VMXEnable
563
564
565;/**
566; * Executes VMXOFF
567; */
568;DECLASM(void) VMXDisable(void);
569BEGINPROC VMXDisable
570%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
571 cmp byte [NAME(g_fVMXIs64bitHost)], 0
572 jz .legacy_mode
573 db 0xea ; jmp far .sixtyfourbit_mode
574 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
575.legacy_mode:
576%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
577 vmxoff
578.the_end:
579 ret
580
581%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
582ALIGNCODE(16)
583BITS 64
584.sixtyfourbit_mode:
585 vmxoff
586 jmp far [.fpret wrt rip]
587.fpret: ; 16:32 Pointer to .the_end.
588 dd .the_end, NAME(SUPR0AbsKernelCS)
589BITS 32
590%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
591ENDPROC VMXDisable
592
593
594;/**
595; * Executes VMCLEAR
596; *
597; * @returns VBox status code
598; * @param HCPhysVMCS Physical address of VM control structure
599; */
600;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
601ALIGNCODE(16)
602BEGINPROC VMXClearVMCS
603%ifdef RT_ARCH_AMD64
604 xor rax, rax
605 %ifdef ASM_CALL64_GCC
606 push rdi
607 %else
608 push rcx
609 %endif
610 vmclear [rsp]
611%else ; RT_ARCH_X86
612 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
613 cmp byte [NAME(g_fVMXIs64bitHost)], 0
614 jz .legacy_mode
615 db 0xea ; jmp far .sixtyfourbit_mode
616 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
617.legacy_mode:
618 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
619 xor eax, eax
620 vmclear [esp + 4]
621%endif ; RT_ARCH_X86
622 jnc .the_end
623 mov eax, VERR_VMX_INVALID_VMCS_PTR
624.the_end:
625%ifdef RT_ARCH_AMD64
626 add rsp, 8
627%endif
628 ret
629
630%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
631ALIGNCODE(16)
632BITS 64
633.sixtyfourbit_mode:
634 lea rdx, [rsp + 4] ; &HCPhysVMCS
635 and edx, 0ffffffffh
636 xor eax, eax
637 vmclear [rdx]
638 mov r9d, VERR_VMX_INVALID_VMCS_PTR
639 cmovc eax, r9d
640 jmp far [.fpret wrt rip]
641.fpret: ; 16:32 Pointer to .the_end.
642 dd .the_end, NAME(SUPR0AbsKernelCS)
643BITS 32
644%endif
645ENDPROC VMXClearVMCS
646
647
648;/**
649; * Executes VMPTRLD
650; *
651; * @returns VBox status code
652; * @param HCPhysVMCS Physical address of VMCS structure
653; */
654;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
655ALIGNCODE(16)
656BEGINPROC VMXActivateVMCS
657%ifdef RT_ARCH_AMD64
658 xor rax, rax
659 %ifdef ASM_CALL64_GCC
660 push rdi
661 %else
662 push rcx
663 %endif
664 vmptrld [rsp]
665%else
666 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
667 cmp byte [NAME(g_fVMXIs64bitHost)], 0
668 jz .legacy_mode
669 db 0xea ; jmp far .sixtyfourbit_mode
670 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
671.legacy_mode:
672 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
673 xor eax, eax
674 vmptrld [esp + 4]
675%endif
676 jnc .the_end
677 mov eax, VERR_VMX_INVALID_VMCS_PTR
678.the_end:
679%ifdef RT_ARCH_AMD64
680 add rsp, 8
681%endif
682 ret
683
684%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
685ALIGNCODE(16)
686BITS 64
687.sixtyfourbit_mode:
688 lea rdx, [rsp + 4] ; &HCPhysVMCS
689 and edx, 0ffffffffh
690 xor eax, eax
691 vmptrld [rdx]
692 mov r9d, VERR_VMX_INVALID_VMCS_PTR
693 cmovc eax, r9d
694 jmp far [.fpret wrt rip]
695.fpret: ; 16:32 Pointer to .the_end.
696 dd .the_end, NAME(SUPR0AbsKernelCS)
697BITS 32
698%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
699ENDPROC VMXActivateVMCS
700
701
702;/**
703; * Executes VMPTRST
704; *
705; * @returns VBox status code
706; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
707; */
708;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
709BEGINPROC VMXGetActivateVMCS
710%ifdef RT_OS_OS2
711 mov eax, VERR_NOT_SUPPORTED
712 ret
713%else
714 %ifdef RT_ARCH_AMD64
715 %ifdef ASM_CALL64_GCC
716 vmptrst qword [rdi]
717 %else
718 vmptrst qword [rcx]
719 %endif
720 %else
721 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
722 cmp byte [NAME(g_fVMXIs64bitHost)], 0
723 jz .legacy_mode
724 db 0xea ; jmp far .sixtyfourbit_mode
725 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
726.legacy_mode:
727 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
728 vmptrst qword [esp+04h]
729 %endif
730 xor eax, eax
731.the_end:
732 ret
733
734 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
735ALIGNCODE(16)
736BITS 64
737.sixtyfourbit_mode:
738 lea rdx, [rsp + 4] ; &HCPhysVMCS
739 and edx, 0ffffffffh
740 vmptrst qword [rdx]
741 xor eax, eax
742 jmp far [.fpret wrt rip]
743.fpret: ; 16:32 Pointer to .the_end.
744 dd .the_end, NAME(SUPR0AbsKernelCS)
745BITS 32
746 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
747%endif
748ENDPROC VMXGetActivateVMCS
749
750;/**
751; * Invalidate a page using invept
752; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
753; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
754; */
755;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
756BEGINPROC VMXR0InvEPT
757%ifdef RT_ARCH_AMD64
758 %ifdef ASM_CALL64_GCC
759 and edi, 0ffffffffh
760 xor rax, rax
761; invept rdi, qword [rsi]
762 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
763 %else
764 and ecx, 0ffffffffh
765 xor rax, rax
766; invept rcx, qword [rdx]
767 DB 0x66, 0x0F, 0x38, 0x80, 0xA
768 %endif
769%else
770 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
771 cmp byte [NAME(g_fVMXIs64bitHost)], 0
772 jz .legacy_mode
773 db 0xea ; jmp far .sixtyfourbit_mode
774 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
775.legacy_mode:
776 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
777 mov eax, [esp + 4]
778 mov ecx, [esp + 8]
779; invept eax, qword [ecx]
780 DB 0x66, 0x0F, 0x38, 0x80, 0x1
781%endif
782 jnc .valid_vmcs
783 mov eax, VERR_VMX_INVALID_VMCS_PTR
784 ret
785.valid_vmcs:
786 jnz .the_end
787 mov eax, VERR_INVALID_PARAMETER
788.the_end:
789 ret
790
791%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
792ALIGNCODE(16)
793BITS 64
794.sixtyfourbit_mode:
795 and esp, 0ffffffffh
796 mov ecx, [rsp + 4] ; enmFlush
797 mov edx, [rsp + 8] ; pDescriptor
798 xor eax, eax
799; invept rcx, qword [rdx]
800 DB 0x66, 0x0F, 0x38, 0x80, 0xA
801 mov r8d, VERR_INVALID_PARAMETER
802 cmovz eax, r8d
803 mov r9d, VERR_VMX_INVALID_VMCS_PTR
804 cmovc eax, r9d
805 jmp far [.fpret wrt rip]
806.fpret: ; 16:32 Pointer to .the_end.
807 dd .the_end, NAME(SUPR0AbsKernelCS)
808BITS 32
809%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
810ENDPROC VMXR0InvEPT
811
812
813;/**
814; * Invalidate a page using invvpid
815; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
816; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
817; */
818;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
819BEGINPROC VMXR0InvVPID
820%ifdef RT_ARCH_AMD64
821 %ifdef ASM_CALL64_GCC
822 and edi, 0ffffffffh
823 xor rax, rax
824 ;invvpid rdi, qword [rsi]
825 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
826 %else
827 and ecx, 0ffffffffh
828 xor rax, rax
829; invvpid rcx, qword [rdx]
830 DB 0x66, 0x0F, 0x38, 0x81, 0xA
831 %endif
832%else
833 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
834 cmp byte [NAME(g_fVMXIs64bitHost)], 0
835 jz .legacy_mode
836 db 0xea ; jmp far .sixtyfourbit_mode
837 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
838.legacy_mode:
839 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
840 mov eax, [esp + 4]
841 mov ecx, [esp + 8]
842; invept eax, qword [ecx]
843 DB 0x66, 0x0F, 0x38, 0x81, 0x1
844%endif
845 jnc .valid_vmcs
846 mov eax, VERR_VMX_INVALID_VMCS_PTR
847 ret
848.valid_vmcs:
849 jnz .the_end
850 mov eax, VERR_INVALID_PARAMETER
851.the_end:
852 ret
853
854%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
855ALIGNCODE(16)
856BITS 64
857.sixtyfourbit_mode:
858 and esp, 0ffffffffh
859 mov ecx, [rsp + 4] ; enmFlush
860 mov edx, [rsp + 8] ; pDescriptor
861 xor eax, eax
862; invvpid rcx, qword [rdx]
863 DB 0x66, 0x0F, 0x38, 0x81, 0xA
864 mov r8d, VERR_INVALID_PARAMETER
865 cmovz eax, r8d
866 mov r9d, VERR_VMX_INVALID_VMCS_PTR
867 cmovc eax, r9d
868 jmp far [.fpret wrt rip]
869.fpret: ; 16:32 Pointer to .the_end.
870 dd .the_end, NAME(SUPR0AbsKernelCS)
871BITS 32
872%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
873ENDPROC VMXR0InvVPID
874
875
876%if GC_ARCH_BITS == 64
877;;
878; Executes INVLPGA
879;
880; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
881; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
882;
883;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
884BEGINPROC SVMR0InvlpgA
885%ifdef RT_ARCH_AMD64
886 %ifdef ASM_CALL64_GCC
887 mov rax, rdi
888 mov rcx, rsi
889 %else
890 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
891 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
892 ; values also set the upper 32 bits of the register to zero. Consequently
893 ; there is no need for an instruction movzlq.''
894 mov eax, ecx
895 mov rcx, rdx
896 %endif
897%else
898 mov eax, [esp + 4]
899 mov ecx, [esp + 0Ch]
900%endif
901 invlpga [xAX], ecx
902 ret
903ENDPROC SVMR0InvlpgA
904
905%else ; GC_ARCH_BITS != 64
906;;
907; Executes INVLPGA
908;
909; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
910; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
911;
912;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
913BEGINPROC SVMR0InvlpgA
914%ifdef RT_ARCH_AMD64
915 %ifdef ASM_CALL64_GCC
916 movzx rax, edi
917 mov ecx, esi
918 %else
919 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
920 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
921 ; values also set the upper 32 bits of the register to zero. Consequently
922 ; there is no need for an instruction movzlq.''
923 mov eax, ecx
924 mov ecx, edx
925 %endif
926%else
927 mov eax, [esp + 4]
928 mov ecx, [esp + 8]
929%endif
930 invlpga [xAX], ecx
931 ret
932ENDPROC SVMR0InvlpgA
933
934%endif ; GC_ARCH_BITS != 64
935
936%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
937
938;/**
939; * Gets 64-bit GDTR and IDTR on darwin.
940; * @param pGdtr Where to store the 64-bit GDTR.
941; * @param pIdtr Where to store the 64-bit IDTR.
942; */
943;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
944ALIGNCODE(16)
945BEGINPROC hwaccmR0Get64bitGDTRandIDTR
946 db 0xea ; jmp far .sixtyfourbit_mode
947 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
948.the_end:
949 ret
950
951ALIGNCODE(16)
952BITS 64
953.sixtyfourbit_mode:
954 and esp, 0ffffffffh
955 mov ecx, [rsp + 4] ; pGdtr
956 mov edx, [rsp + 8] ; pIdtr
957 sgdt [rcx]
958 sidt [rdx]
959 jmp far [.fpret wrt rip]
960.fpret: ; 16:32 Pointer to .the_end.
961 dd .the_end, NAME(SUPR0AbsKernelCS)
962BITS 32
963ENDPROC hwaccmR0Get64bitGDTRandIDTR
964
965
966;/**
967; * Gets 64-bit CR3 on darwin.
968; * @returns CR3
969; */
970;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
971ALIGNCODE(16)
972BEGINPROC hwaccmR0Get64bitCR3
973 db 0xea ; jmp far .sixtyfourbit_mode
974 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
975.the_end:
976 ret
977
978ALIGNCODE(16)
979BITS 64
980.sixtyfourbit_mode:
981 mov rax, cr3
982 mov rdx, rax
983 shr rdx, 32
984 jmp far [.fpret wrt rip]
985.fpret: ; 16:32 Pointer to .the_end.
986 dd .the_end, NAME(SUPR0AbsKernelCS)
987BITS 32
988ENDPROC hwaccmR0Get64bitCR3
989
990%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
991
992
993
994;
995; The default setup of the StartVM routines.
996;
997%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
998 %define MY_NAME(name) name %+ _32
999%else
1000 %define MY_NAME(name) name
1001%endif
1002%ifdef RT_ARCH_AMD64
1003 %define MYPUSHAD MYPUSHAD64
1004 %define MYPOPAD MYPOPAD64
1005 %define MYPUSHSEGS MYPUSHSEGS64
1006 %define MYPOPSEGS MYPOPSEGS64
1007%else
1008 %define MYPUSHAD MYPUSHAD32
1009 %define MYPOPAD MYPOPAD32
1010 %define MYPUSHSEGS MYPUSHSEGS32
1011 %define MYPOPSEGS MYPOPSEGS32
1012%endif
1013
1014%include "HWACCMR0Mixed.mac"
1015
1016
1017%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1018 ;
1019 ; Write the wrapper procedures.
1020 ;
1021 ; These routines are probably being too paranoid about selector
1022 ; restoring, but better safe than sorry...
1023 ;
1024
1025; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1026ALIGNCODE(16)
1027BEGINPROC VMXR0StartVM32
1028 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1029 je near NAME(VMXR0StartVM32_32)
1030
1031 ; stack frame
1032 push esi
1033 push edi
1034 push fs
1035 push gs
1036
1037 ; jmp far .thunk64
1038 db 0xea
1039 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1040
1041ALIGNCODE(16)
1042BITS 64
1043.thunk64:
1044 sub esp, 20h
1045 mov edi, [rsp + 20h + 14h] ; fResume
1046 mov esi, [rsp + 20h + 18h] ; pCtx
1047 mov edx, [rsp + 20h + 1Ch] ; pCache
1048 call NAME(VMXR0StartVM32_64)
1049 add esp, 20h
1050 jmp far [.fpthunk32 wrt rip]
1051.fpthunk32: ; 16:32 Pointer to .thunk32.
1052 dd .thunk32, NAME(SUPR0AbsKernelCS)
1053
1054BITS 32
1055ALIGNCODE(16)
1056.thunk32:
1057 pop gs
1058 pop fs
1059 pop edi
1060 pop esi
1061 ret
1062ENDPROC VMXR0StartVM32
1063
1064
1065; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1066ALIGNCODE(16)
1067BEGINPROC VMXR0StartVM64
1068 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1069 je .not_in_long_mode
1070
1071 ; stack frame
1072 push esi
1073 push edi
1074 push fs
1075 push gs
1076
1077 ; jmp far .thunk64
1078 db 0xea
1079 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1080
1081ALIGNCODE(16)
1082BITS 64
1083.thunk64:
1084 sub esp, 20h
1085 mov edi, [rsp + 20h + 14h] ; fResume
1086 mov esi, [rsp + 20h + 18h] ; pCtx
1087 mov edx, [rsp + 20h + 1Ch] ; pCache
1088 call NAME(VMXR0StartVM64_64)
1089 add esp, 20h
1090 jmp far [.fpthunk32 wrt rip]
1091.fpthunk32: ; 16:32 Pointer to .thunk32.
1092 dd .thunk32, NAME(SUPR0AbsKernelCS)
1093
1094BITS 32
1095ALIGNCODE(16)
1096.thunk32:
1097 pop gs
1098 pop fs
1099 pop edi
1100 pop esi
1101 ret
1102
1103.not_in_long_mode:
1104 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1105 ret
1106ENDPROC VMXR0StartVM64
1107
1108;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1109ALIGNCODE(16)
1110BEGINPROC SVMR0VMRun
1111 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1112 je near NAME(SVMR0VMRun_32)
1113
1114 ; stack frame
1115 push esi
1116 push edi
1117 push fs
1118 push gs
1119
1120 ; jmp far .thunk64
1121 db 0xea
1122 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1123
1124ALIGNCODE(16)
1125BITS 64
1126.thunk64:
1127 sub esp, 20h
1128 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1129 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1130 mov edx, [rsp + 20h + 24h] ; pCtx
1131 call NAME(SVMR0VMRun_64)
1132 add esp, 20h
1133 jmp far [.fpthunk32 wrt rip]
1134.fpthunk32: ; 16:32 Pointer to .thunk32.
1135 dd .thunk32, NAME(SUPR0AbsKernelCS)
1136
1137BITS 32
1138ALIGNCODE(16)
1139.thunk32:
1140 pop gs
1141 pop fs
1142 pop edi
1143 pop esi
1144 ret
1145ENDPROC SVMR0VMRun
1146
1147
1148; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1149ALIGNCODE(16)
1150BEGINPROC SVMR0VMRun64
1151 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1152 je .not_in_long_mode
1153
1154 ; stack frame
1155 push esi
1156 push edi
1157 push fs
1158 push gs
1159
1160 ; jmp far .thunk64
1161 db 0xea
1162 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1163
1164ALIGNCODE(16)
1165BITS 64
1166.thunk64:
1167 sub esp, 20h
1168 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1169 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1170 mov edx, [rbp + 20h + 24h] ; pCtx
1171 call NAME(SVMR0VMRun64_64)
1172 add esp, 20h
1173 jmp far [.fpthunk32 wrt rip]
1174.fpthunk32: ; 16:32 Pointer to .thunk32.
1175 dd .thunk32, NAME(SUPR0AbsKernelCS)
1176
1177BITS 32
1178ALIGNCODE(16)
1179.thunk32:
1180 pop gs
1181 pop fs
1182 pop edi
1183 pop esi
1184 ret
1185
1186.not_in_long_mode:
1187 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1188 ret
1189ENDPROC SVMR0VMRun64
1190
1191 ;
1192 ; Do it a second time pretending we're a 64-bit host.
1193 ;
1194 ; This *HAS* to be done at the very end of the file to avoid restoring
1195 ; macros. So, add new code *BEFORE* this mess.
1196 ;
1197 BITS 64
1198 %undef RT_ARCH_X86
1199 %define RT_ARCH_AMD64
1200 %undef ASM_CALL64_MSC
1201 %define ASM_CALL64_GCC
1202 %define xS 8
1203 %define xSP rsp
1204 %define xBP rbp
1205 %define xAX rax
1206 %define xBX rbx
1207 %define xCX rcx
1208 %define xDX rdx
1209 %define xDI rdi
1210 %define xSI rsi
1211 %define MY_NAME(name) name %+ _64
1212 %define MYPUSHAD MYPUSHAD64
1213 %define MYPOPAD MYPOPAD64
1214 %define MYPUSHSEGS MYPUSHSEGS64
1215 %define MYPOPSEGS MYPOPSEGS64
1216
1217 %include "HWACCMR0Mixed.mac"
1218%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette