1 | ; $Id: HMR0A.asm 49739 2013-12-02 13:59:40Z vboxsync $
|
---|
2 | ;; @file
|
---|
3 | ; HM - Ring-0 VMX, SVM world-switch and helper routines
|
---|
4 | ;
|
---|
5 |
|
---|
6 | ;
|
---|
7 | ; Copyright (C) 2006-2013 Oracle Corporation
|
---|
8 | ;
|
---|
9 | ; This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | ; available from http://www.virtualbox.org. This file is free software;
|
---|
11 | ; you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | ; General Public License (GPL) as published by the Free Software
|
---|
13 | ; Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | ; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | ; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | ;
|
---|
17 |
|
---|
18 | ;*******************************************************************************
|
---|
19 | ;* Header Files *
|
---|
20 | ;*******************************************************************************
|
---|
21 | %include "VBox/asmdefs.mac"
|
---|
22 | %include "VBox/err.mac"
|
---|
23 | %include "VBox/vmm/hm_vmx.mac"
|
---|
24 | %include "VBox/vmm/cpum.mac"
|
---|
25 | %include "iprt/x86.mac"
|
---|
26 | %include "HMInternal.mac"
|
---|
27 |
|
---|
28 | %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
|
---|
29 | %macro vmwrite 2,
|
---|
30 | int3
|
---|
31 | %endmacro
|
---|
32 | %define vmlaunch int3
|
---|
33 | %define vmresume int3
|
---|
34 | %define vmsave int3
|
---|
35 | %define vmload int3
|
---|
36 | %define vmrun int3
|
---|
37 | %define clgi int3
|
---|
38 | %define stgi int3
|
---|
39 | %macro invlpga 2,
|
---|
40 | int3
|
---|
41 | %endmacro
|
---|
42 | %endif
|
---|
43 |
|
---|
44 | ;*******************************************************************************
|
---|
45 | ;* Defined Constants And Macros *
|
---|
46 | ;*******************************************************************************
|
---|
47 | %ifdef RT_ARCH_AMD64
|
---|
48 | %define MAYBE_64_BIT
|
---|
49 | %endif
|
---|
50 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
51 | %define MAYBE_64_BIT
|
---|
52 | %else
|
---|
53 | %ifdef RT_OS_DARWIN
|
---|
54 | %ifdef RT_ARCH_AMD64
|
---|
55 | ;;
|
---|
56 | ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
|
---|
57 | ; risk loading a stale LDT value or something invalid.
|
---|
58 | %define HM_64_BIT_USE_NULL_SEL
|
---|
59 | %endif
|
---|
60 | %endif
|
---|
61 | %endif
|
---|
62 |
|
---|
63 | %ifdef RT_ARCH_AMD64
|
---|
64 | %define VBOX_SKIP_RESTORE_SEG
|
---|
65 | %endif
|
---|
66 |
|
---|
67 | ;; The offset of the XMM registers in X86FXSTATE.
|
---|
68 | ; Use define because I'm too lazy to convert the struct.
|
---|
69 | %define XMM_OFF_IN_X86FXSTATE 160
|
---|
70 |
|
---|
71 | ;; @def MYPUSHAD
|
---|
72 | ; Macro generating an equivalent to pushad
|
---|
73 |
|
---|
74 | ;; @def MYPOPAD
|
---|
75 | ; Macro generating an equivalent to popad
|
---|
76 |
|
---|
77 | ;; @def MYPUSHSEGS
|
---|
78 | ; Macro saving all segment registers on the stack.
|
---|
79 | ; @param 1 full width register name
|
---|
80 | ; @param 2 16-bit register name for \a 1.
|
---|
81 |
|
---|
82 | ;; @def MYPOPSEGS
|
---|
83 | ; Macro restoring all segment registers on the stack
|
---|
84 | ; @param 1 full width register name
|
---|
85 | ; @param 2 16-bit register name for \a 1.
|
---|
86 |
|
---|
87 | %ifdef ASM_CALL64_GCC
|
---|
88 | %macro MYPUSHAD64 0
|
---|
89 | push r15
|
---|
90 | push r14
|
---|
91 | push r13
|
---|
92 | push r12
|
---|
93 | push rbx
|
---|
94 | %endmacro
|
---|
95 | %macro MYPOPAD64 0
|
---|
96 | pop rbx
|
---|
97 | pop r12
|
---|
98 | pop r13
|
---|
99 | pop r14
|
---|
100 | pop r15
|
---|
101 | %endmacro
|
---|
102 |
|
---|
103 | %else ; ASM_CALL64_MSC
|
---|
104 | %macro MYPUSHAD64 0
|
---|
105 | push r15
|
---|
106 | push r14
|
---|
107 | push r13
|
---|
108 | push r12
|
---|
109 | push rbx
|
---|
110 | push rsi
|
---|
111 | push rdi
|
---|
112 | %endmacro
|
---|
113 | %macro MYPOPAD64 0
|
---|
114 | pop rdi
|
---|
115 | pop rsi
|
---|
116 | pop rbx
|
---|
117 | pop r12
|
---|
118 | pop r13
|
---|
119 | pop r14
|
---|
120 | pop r15
|
---|
121 | %endmacro
|
---|
122 | %endif
|
---|
123 |
|
---|
124 | %ifdef VBOX_SKIP_RESTORE_SEG
|
---|
125 | %macro MYPUSHSEGS64 2
|
---|
126 | %endmacro
|
---|
127 |
|
---|
128 | %macro MYPOPSEGS64 2
|
---|
129 | %endmacro
|
---|
130 | %else ; !VBOX_SKIP_RESTORE_SEG
|
---|
131 | ; trashes, rax, rdx & rcx
|
---|
132 | %macro MYPUSHSEGS64 2
|
---|
133 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
134 | mov %2, es
|
---|
135 | push %1
|
---|
136 | mov %2, ds
|
---|
137 | push %1
|
---|
138 | %endif
|
---|
139 |
|
---|
140 | ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
|
---|
141 | mov ecx, MSR_K8_FS_BASE
|
---|
142 | rdmsr
|
---|
143 | push rdx
|
---|
144 | push rax
|
---|
145 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
146 | push fs
|
---|
147 | %endif
|
---|
148 |
|
---|
149 | ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
|
---|
150 | mov ecx, MSR_K8_GS_BASE
|
---|
151 | rdmsr
|
---|
152 | push rdx
|
---|
153 | push rax
|
---|
154 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
155 | push gs
|
---|
156 | %endif
|
---|
157 | %endmacro
|
---|
158 |
|
---|
159 | ; trashes, rax, rdx & rcx
|
---|
160 | %macro MYPOPSEGS64 2
|
---|
161 | ; Note: do not step through this code with a debugger!
|
---|
162 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
163 | xor eax, eax
|
---|
164 | mov ds, ax
|
---|
165 | mov es, ax
|
---|
166 | mov fs, ax
|
---|
167 | mov gs, ax
|
---|
168 | %endif
|
---|
169 |
|
---|
170 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
171 | pop gs
|
---|
172 | %endif
|
---|
173 | pop rax
|
---|
174 | pop rdx
|
---|
175 | mov ecx, MSR_K8_GS_BASE
|
---|
176 | wrmsr
|
---|
177 |
|
---|
178 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
179 | pop fs
|
---|
180 | %endif
|
---|
181 | pop rax
|
---|
182 | pop rdx
|
---|
183 | mov ecx, MSR_K8_FS_BASE
|
---|
184 | wrmsr
|
---|
185 | ; Now it's safe to step again
|
---|
186 |
|
---|
187 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
188 | pop %1
|
---|
189 | mov ds, %2
|
---|
190 | pop %1
|
---|
191 | mov es, %2
|
---|
192 | %endif
|
---|
193 | %endmacro
|
---|
194 | %endif ; VBOX_SKIP_RESTORE_SEG
|
---|
195 |
|
---|
196 | %macro MYPUSHAD32 0
|
---|
197 | pushad
|
---|
198 | %endmacro
|
---|
199 | %macro MYPOPAD32 0
|
---|
200 | popad
|
---|
201 | %endmacro
|
---|
202 |
|
---|
203 | %macro MYPUSHSEGS32 2
|
---|
204 | push ds
|
---|
205 | push es
|
---|
206 | push fs
|
---|
207 | push gs
|
---|
208 | %endmacro
|
---|
209 | %macro MYPOPSEGS32 2
|
---|
210 | pop gs
|
---|
211 | pop fs
|
---|
212 | pop es
|
---|
213 | pop ds
|
---|
214 | %endmacro
|
---|
215 |
|
---|
216 |
|
---|
217 | ;*******************************************************************************
|
---|
218 | ;* External Symbols *
|
---|
219 | ;*******************************************************************************
|
---|
220 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
221 | extern NAME(SUPR0AbsIs64bit)
|
---|
222 | extern NAME(SUPR0Abs64bitKernelCS)
|
---|
223 | extern NAME(SUPR0Abs64bitKernelSS)
|
---|
224 | extern NAME(SUPR0Abs64bitKernelDS)
|
---|
225 | extern NAME(SUPR0AbsKernelCS)
|
---|
226 | %endif
|
---|
227 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
228 | extern NAME(CPUMIsGuestFPUStateActive)
|
---|
229 | %endif
|
---|
230 |
|
---|
231 |
|
---|
232 | ;*******************************************************************************
|
---|
233 | ;* Global Variables *
|
---|
234 | ;*******************************************************************************
|
---|
235 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
236 | BEGINDATA
|
---|
237 | ;;
|
---|
238 | ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
|
---|
239 | ; needing to clobber a register. (This trick doesn't quite work for PE btw.
|
---|
240 | ; but that's not relevant atm.)
|
---|
241 | GLOBALNAME g_fVMXIs64bitHost
|
---|
242 | dd NAME(SUPR0AbsIs64bit)
|
---|
243 | %endif
|
---|
244 |
|
---|
245 |
|
---|
246 | BEGINCODE
|
---|
247 |
|
---|
248 |
|
---|
249 | ;/**
|
---|
250 | ; * Restores host-state fields.
|
---|
251 | ; *
|
---|
252 | ; * @returns VBox status code
|
---|
253 | ; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
|
---|
254 | ; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
|
---|
255 | ; */
|
---|
256 | ALIGNCODE(16)
|
---|
257 | BEGINPROC VMXRestoreHostState
|
---|
258 | %ifdef RT_ARCH_AMD64
|
---|
259 | %ifndef ASM_CALL64_GCC
|
---|
260 | ; Use GCC's input registers since we'll be needing both rcx and rdx further
|
---|
261 | ; down with the wrmsr instruction. Use the R10 and R11 register for saving
|
---|
262 | ; RDI and RSI since MSC preserve the two latter registers.
|
---|
263 | mov r10, rdi
|
---|
264 | mov r11, rsi
|
---|
265 | mov rdi, rcx
|
---|
266 | mov rsi, rdx
|
---|
267 | %endif
|
---|
268 |
|
---|
269 | test edi, VMX_RESTORE_HOST_GDTR
|
---|
270 | jz .test_idtr
|
---|
271 | lgdt [rsi + VMXRESTOREHOST.HostGdtr]
|
---|
272 |
|
---|
273 | .test_idtr:
|
---|
274 | test edi, VMX_RESTORE_HOST_IDTR
|
---|
275 | jz .test_ds
|
---|
276 | lidt [rsi + VMXRESTOREHOST.HostIdtr]
|
---|
277 |
|
---|
278 | .test_ds:
|
---|
279 | test edi, VMX_RESTORE_HOST_SEL_DS
|
---|
280 | jz .test_es
|
---|
281 | mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
|
---|
282 | mov ds, eax
|
---|
283 |
|
---|
284 | .test_es:
|
---|
285 | test edi, VMX_RESTORE_HOST_SEL_ES
|
---|
286 | jz .test_tr
|
---|
287 | mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
|
---|
288 | mov es, eax
|
---|
289 |
|
---|
290 | .test_tr:
|
---|
291 | test edi, VMX_RESTORE_HOST_SEL_TR
|
---|
292 | jz .test_fs
|
---|
293 | ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
|
---|
294 | mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
|
---|
295 | mov ax, dx
|
---|
296 | and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
|
---|
297 | add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
|
---|
298 | and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
|
---|
299 | ltr dx
|
---|
300 |
|
---|
301 | .test_fs:
|
---|
302 | ;
|
---|
303 | ; When restoring the selector values for FS and GS, we'll temporarily trash
|
---|
304 | ; the base address (at least the high 32-bit bits, but quite possibly the
|
---|
305 | ; whole base address), the wrmsr will restore it correctly. (VT-x actually
|
---|
306 | ; restores the base correctly when leaving guest mode, but not the selector
|
---|
307 | ; value, so there is little problem with interrupts being enabled prior to
|
---|
308 | ; this restore job.)
|
---|
309 | ; We'll disable ints once for both FS and GS as that's probably faster.
|
---|
310 | ;
|
---|
311 | test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
|
---|
312 | jz .restore_success
|
---|
313 | pushfq
|
---|
314 | cli ; (see above)
|
---|
315 |
|
---|
316 | test edi, VMX_RESTORE_HOST_SEL_FS
|
---|
317 | jz .test_gs
|
---|
318 | mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
|
---|
319 | mov fs, eax
|
---|
320 | mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
|
---|
321 | mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
|
---|
322 | mov ecx, MSR_K8_FS_BASE
|
---|
323 | wrmsr
|
---|
324 |
|
---|
325 | .test_gs:
|
---|
326 | test edi, VMX_RESTORE_HOST_SEL_GS
|
---|
327 | jz .restore_flags
|
---|
328 | mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
|
---|
329 | mov gs, eax
|
---|
330 | mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
|
---|
331 | mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
|
---|
332 | mov ecx, MSR_K8_GS_BASE
|
---|
333 | wrmsr
|
---|
334 |
|
---|
335 | .restore_flags:
|
---|
336 | popfq
|
---|
337 |
|
---|
338 | .restore_success:
|
---|
339 | mov eax, VINF_SUCCESS
|
---|
340 | %ifndef ASM_CALL64_GCC
|
---|
341 | ; Restore RDI and RSI on MSC.
|
---|
342 | mov rdi, r10
|
---|
343 | mov rsi, r11
|
---|
344 | %endif
|
---|
345 | %else ; RT_ARCH_X86
|
---|
346 | mov eax, VERR_NOT_IMPLEMENTED
|
---|
347 | %endif
|
---|
348 | ret
|
---|
349 | ENDPROC VMXRestoreHostState
|
---|
350 |
|
---|
351 |
|
---|
352 | ;/**
|
---|
353 | ; * Dispatches an NMI to the host.
|
---|
354 | ; */
|
---|
355 | ALIGNCODE(16)
|
---|
356 | BEGINPROC VMXDispatchHostNmi
|
---|
357 | int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
|
---|
358 | ret
|
---|
359 | ENDPROC VMXDispatchHostNmi
|
---|
360 |
|
---|
361 |
|
---|
362 | ;/**
|
---|
363 | ; * Executes VMWRITE, 64-bit value.
|
---|
364 | ; *
|
---|
365 | ; * @returns VBox status code
|
---|
366 | ; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
|
---|
367 | ; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
|
---|
368 | ; */
|
---|
369 | ALIGNCODE(16)
|
---|
370 | BEGINPROC VMXWriteVmcs64
|
---|
371 | %ifdef RT_ARCH_AMD64
|
---|
372 | %ifdef ASM_CALL64_GCC
|
---|
373 | and edi, 0ffffffffh
|
---|
374 | xor rax, rax
|
---|
375 | vmwrite rdi, rsi
|
---|
376 | %else
|
---|
377 | and ecx, 0ffffffffh
|
---|
378 | xor rax, rax
|
---|
379 | vmwrite rcx, rdx
|
---|
380 | %endif
|
---|
381 | %else ; RT_ARCH_X86
|
---|
382 | mov ecx, [esp + 4] ; idxField
|
---|
383 | lea edx, [esp + 8] ; &u64Data
|
---|
384 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
385 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
386 | jz .legacy_mode
|
---|
387 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
388 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
389 | .legacy_mode:
|
---|
390 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
391 | vmwrite ecx, [edx] ; low dword
|
---|
392 | jz .done
|
---|
393 | jc .done
|
---|
394 | inc ecx
|
---|
395 | xor eax, eax
|
---|
396 | vmwrite ecx, [edx + 4] ; high dword
|
---|
397 | .done:
|
---|
398 | %endif ; RT_ARCH_X86
|
---|
399 | jnc .valid_vmcs
|
---|
400 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
401 | ret
|
---|
402 | .valid_vmcs:
|
---|
403 | jnz .the_end
|
---|
404 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
405 | .the_end:
|
---|
406 | ret
|
---|
407 |
|
---|
408 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
409 | ALIGNCODE(16)
|
---|
410 | BITS 64
|
---|
411 | .sixtyfourbit_mode:
|
---|
412 | and edx, 0ffffffffh
|
---|
413 | and ecx, 0ffffffffh
|
---|
414 | xor eax, eax
|
---|
415 | vmwrite rcx, [rdx]
|
---|
416 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
417 | cmovz eax, r8d
|
---|
418 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
419 | cmovc eax, r9d
|
---|
420 | jmp far [.fpret wrt rip]
|
---|
421 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
422 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
423 | BITS 32
|
---|
424 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
425 | ENDPROC VMXWriteVmcs64
|
---|
426 |
|
---|
427 |
|
---|
428 | ;/**
|
---|
429 | ; * Executes VMREAD, 64-bit value
|
---|
430 | ; *
|
---|
431 | ; * @returns VBox status code
|
---|
432 | ; * @param idxField VMCS index
|
---|
433 | ; * @param pData Ptr to store VM field value
|
---|
434 | ; */
|
---|
435 | ;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
|
---|
436 | ALIGNCODE(16)
|
---|
437 | BEGINPROC VMXReadVmcs64
|
---|
438 | %ifdef RT_ARCH_AMD64
|
---|
439 | %ifdef ASM_CALL64_GCC
|
---|
440 | and edi, 0ffffffffh
|
---|
441 | xor rax, rax
|
---|
442 | vmread [rsi], rdi
|
---|
443 | %else
|
---|
444 | and ecx, 0ffffffffh
|
---|
445 | xor rax, rax
|
---|
446 | vmread [rdx], rcx
|
---|
447 | %endif
|
---|
448 | %else ; RT_ARCH_X86
|
---|
449 | mov ecx, [esp + 4] ; idxField
|
---|
450 | mov edx, [esp + 8] ; pData
|
---|
451 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
452 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
453 | jz .legacy_mode
|
---|
454 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
455 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
456 | .legacy_mode:
|
---|
457 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
458 | vmread [edx], ecx ; low dword
|
---|
459 | jz .done
|
---|
460 | jc .done
|
---|
461 | inc ecx
|
---|
462 | xor eax, eax
|
---|
463 | vmread [edx + 4], ecx ; high dword
|
---|
464 | .done:
|
---|
465 | %endif ; RT_ARCH_X86
|
---|
466 | jnc .valid_vmcs
|
---|
467 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
468 | ret
|
---|
469 | .valid_vmcs:
|
---|
470 | jnz .the_end
|
---|
471 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
472 | .the_end:
|
---|
473 | ret
|
---|
474 |
|
---|
475 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
476 | ALIGNCODE(16)
|
---|
477 | BITS 64
|
---|
478 | .sixtyfourbit_mode:
|
---|
479 | and edx, 0ffffffffh
|
---|
480 | and ecx, 0ffffffffh
|
---|
481 | xor eax, eax
|
---|
482 | vmread [rdx], rcx
|
---|
483 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
484 | cmovz eax, r8d
|
---|
485 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
486 | cmovc eax, r9d
|
---|
487 | jmp far [.fpret wrt rip]
|
---|
488 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
489 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
490 | BITS 32
|
---|
491 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
492 | ENDPROC VMXReadVmcs64
|
---|
493 |
|
---|
494 |
|
---|
495 | ;/**
|
---|
496 | ; * Executes VMREAD, 32-bit value.
|
---|
497 | ; *
|
---|
498 | ; * @returns VBox status code
|
---|
499 | ; * @param idxField VMCS index
|
---|
500 | ; * @param pu32Data Ptr to store VM field value
|
---|
501 | ; */
|
---|
502 | ;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
|
---|
503 | ALIGNCODE(16)
|
---|
504 | BEGINPROC VMXReadVmcs32
|
---|
505 | %ifdef RT_ARCH_AMD64
|
---|
506 | %ifdef ASM_CALL64_GCC
|
---|
507 | and edi, 0ffffffffh
|
---|
508 | xor rax, rax
|
---|
509 | vmread r10, rdi
|
---|
510 | mov [rsi], r10d
|
---|
511 | %else
|
---|
512 | and ecx, 0ffffffffh
|
---|
513 | xor rax, rax
|
---|
514 | vmread r10, rcx
|
---|
515 | mov [rdx], r10d
|
---|
516 | %endif
|
---|
517 | %else ; RT_ARCH_X86
|
---|
518 | mov ecx, [esp + 4] ; idxField
|
---|
519 | mov edx, [esp + 8] ; pu32Data
|
---|
520 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
521 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
522 | jz .legacy_mode
|
---|
523 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
524 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
525 | .legacy_mode:
|
---|
526 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
527 | xor eax, eax
|
---|
528 | vmread [edx], ecx
|
---|
529 | %endif ; RT_ARCH_X86
|
---|
530 | jnc .valid_vmcs
|
---|
531 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
532 | ret
|
---|
533 | .valid_vmcs:
|
---|
534 | jnz .the_end
|
---|
535 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
536 | .the_end:
|
---|
537 | ret
|
---|
538 |
|
---|
539 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
540 | ALIGNCODE(16)
|
---|
541 | BITS 64
|
---|
542 | .sixtyfourbit_mode:
|
---|
543 | and edx, 0ffffffffh
|
---|
544 | and ecx, 0ffffffffh
|
---|
545 | xor eax, eax
|
---|
546 | vmread r10, rcx
|
---|
547 | mov [rdx], r10d
|
---|
548 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
549 | cmovz eax, r8d
|
---|
550 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
551 | cmovc eax, r9d
|
---|
552 | jmp far [.fpret wrt rip]
|
---|
553 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
554 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
555 | BITS 32
|
---|
556 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
557 | ENDPROC VMXReadVmcs32
|
---|
558 |
|
---|
559 |
|
---|
560 | ;/**
|
---|
561 | ; * Executes VMWRITE, 32-bit value.
|
---|
562 | ; *
|
---|
563 | ; * @returns VBox status code
|
---|
564 | ; * @param idxField VMCS index
|
---|
565 | ; * @param u32Data Ptr to store VM field value
|
---|
566 | ; */
|
---|
567 | ;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
|
---|
568 | ALIGNCODE(16)
|
---|
569 | BEGINPROC VMXWriteVmcs32
|
---|
570 | %ifdef RT_ARCH_AMD64
|
---|
571 | %ifdef ASM_CALL64_GCC
|
---|
572 | and edi, 0ffffffffh
|
---|
573 | and esi, 0ffffffffh
|
---|
574 | xor rax, rax
|
---|
575 | vmwrite rdi, rsi
|
---|
576 | %else
|
---|
577 | and ecx, 0ffffffffh
|
---|
578 | and edx, 0ffffffffh
|
---|
579 | xor rax, rax
|
---|
580 | vmwrite rcx, rdx
|
---|
581 | %endif
|
---|
582 | %else ; RT_ARCH_X86
|
---|
583 | mov ecx, [esp + 4] ; idxField
|
---|
584 | mov edx, [esp + 8] ; u32Data
|
---|
585 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
586 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
587 | jz .legacy_mode
|
---|
588 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
589 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
590 | .legacy_mode:
|
---|
591 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
592 | xor eax, eax
|
---|
593 | vmwrite ecx, edx
|
---|
594 | %endif ; RT_ARCH_X86
|
---|
595 | jnc .valid_vmcs
|
---|
596 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
597 | ret
|
---|
598 | .valid_vmcs:
|
---|
599 | jnz .the_end
|
---|
600 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
601 | .the_end:
|
---|
602 | ret
|
---|
603 |
|
---|
604 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
605 | ALIGNCODE(16)
|
---|
606 | BITS 64
|
---|
607 | .sixtyfourbit_mode:
|
---|
608 | and edx, 0ffffffffh
|
---|
609 | and ecx, 0ffffffffh
|
---|
610 | xor eax, eax
|
---|
611 | vmwrite rcx, rdx
|
---|
612 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
613 | cmovz eax, r8d
|
---|
614 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
615 | cmovc eax, r9d
|
---|
616 | jmp far [.fpret wrt rip]
|
---|
617 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
618 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
619 | BITS 32
|
---|
620 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
621 | ENDPROC VMXWriteVmcs32
|
---|
622 |
|
---|
623 |
|
---|
624 | ;/**
|
---|
625 | ; * Executes VMXON
|
---|
626 | ; *
|
---|
627 | ; * @returns VBox status code
|
---|
628 | ; * @param HCPhysVMXOn Physical address of VMXON structure
|
---|
629 | ; */
|
---|
630 | ;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
|
---|
631 | BEGINPROC VMXEnable
|
---|
632 | %ifdef RT_ARCH_AMD64
|
---|
633 | xor rax, rax
|
---|
634 | %ifdef ASM_CALL64_GCC
|
---|
635 | push rdi
|
---|
636 | %else
|
---|
637 | push rcx
|
---|
638 | %endif
|
---|
639 | vmxon [rsp]
|
---|
640 | %else ; RT_ARCH_X86
|
---|
641 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
642 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
643 | jz .legacy_mode
|
---|
644 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
645 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
646 | .legacy_mode:
|
---|
647 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
648 | xor eax, eax
|
---|
649 | vmxon [esp + 4]
|
---|
650 | %endif ; RT_ARCH_X86
|
---|
651 | jnc .good
|
---|
652 | mov eax, VERR_VMX_INVALID_VMXON_PTR
|
---|
653 | jmp .the_end
|
---|
654 |
|
---|
655 | .good:
|
---|
656 | jnz .the_end
|
---|
657 | mov eax, VERR_VMX_VMXON_FAILED
|
---|
658 |
|
---|
659 | .the_end:
|
---|
660 | %ifdef RT_ARCH_AMD64
|
---|
661 | add rsp, 8
|
---|
662 | %endif
|
---|
663 | ret
|
---|
664 |
|
---|
665 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
666 | ALIGNCODE(16)
|
---|
667 | BITS 64
|
---|
668 | .sixtyfourbit_mode:
|
---|
669 | lea rdx, [rsp + 4] ; &HCPhysVMXOn.
|
---|
670 | and edx, 0ffffffffh
|
---|
671 | xor eax, eax
|
---|
672 | vmxon [rdx]
|
---|
673 | mov r8d, VERR_VMX_VMXON_FAILED
|
---|
674 | cmovz eax, r8d
|
---|
675 | mov r9d, VERR_VMX_INVALID_VMXON_PTR
|
---|
676 | cmovc eax, r9d
|
---|
677 | jmp far [.fpret wrt rip]
|
---|
678 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
679 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
680 | BITS 32
|
---|
681 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
682 | ENDPROC VMXEnable
|
---|
683 |
|
---|
684 |
|
---|
685 | ;/**
|
---|
686 | ; * Executes VMXOFF
|
---|
687 | ; */
|
---|
688 | ;DECLASM(void) VMXDisable(void);
|
---|
689 | BEGINPROC VMXDisable
|
---|
690 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
691 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
692 | jz .legacy_mode
|
---|
693 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
694 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
695 | .legacy_mode:
|
---|
696 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
697 | vmxoff
|
---|
698 | .the_end:
|
---|
699 | ret
|
---|
700 |
|
---|
701 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
702 | ALIGNCODE(16)
|
---|
703 | BITS 64
|
---|
704 | .sixtyfourbit_mode:
|
---|
705 | vmxoff
|
---|
706 | jmp far [.fpret wrt rip]
|
---|
707 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
708 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
709 | BITS 32
|
---|
710 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
711 | ENDPROC VMXDisable
|
---|
712 |
|
---|
713 |
|
---|
714 | ;/**
|
---|
715 | ; * Executes VMCLEAR
|
---|
716 | ; *
|
---|
717 | ; * @returns VBox status code
|
---|
718 | ; * @param HCPhysVmcs Physical address of VM control structure
|
---|
719 | ; */
|
---|
720 | ;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
|
---|
721 | ALIGNCODE(16)
|
---|
722 | BEGINPROC VMXClearVmcs
|
---|
723 | %ifdef RT_ARCH_AMD64
|
---|
724 | xor rax, rax
|
---|
725 | %ifdef ASM_CALL64_GCC
|
---|
726 | push rdi
|
---|
727 | %else
|
---|
728 | push rcx
|
---|
729 | %endif
|
---|
730 | vmclear [rsp]
|
---|
731 | %else ; RT_ARCH_X86
|
---|
732 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
733 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
734 | jz .legacy_mode
|
---|
735 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
736 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
737 | .legacy_mode:
|
---|
738 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
739 | xor eax, eax
|
---|
740 | vmclear [esp + 4]
|
---|
741 | %endif ; RT_ARCH_X86
|
---|
742 | jnc .the_end
|
---|
743 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
744 | .the_end:
|
---|
745 | %ifdef RT_ARCH_AMD64
|
---|
746 | add rsp, 8
|
---|
747 | %endif
|
---|
748 | ret
|
---|
749 |
|
---|
750 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
751 | ALIGNCODE(16)
|
---|
752 | BITS 64
|
---|
753 | .sixtyfourbit_mode:
|
---|
754 | lea rdx, [rsp + 4] ; &HCPhysVmcs
|
---|
755 | and edx, 0ffffffffh
|
---|
756 | xor eax, eax
|
---|
757 | vmclear [rdx]
|
---|
758 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
759 | cmovc eax, r9d
|
---|
760 | jmp far [.fpret wrt rip]
|
---|
761 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
762 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
763 | BITS 32
|
---|
764 | %endif
|
---|
765 | ENDPROC VMXClearVmcs
|
---|
766 |
|
---|
767 |
|
---|
768 | ;/**
|
---|
769 | ; * Executes VMPTRLD
|
---|
770 | ; *
|
---|
771 | ; * @returns VBox status code
|
---|
772 | ; * @param HCPhysVmcs Physical address of VMCS structure
|
---|
773 | ; */
|
---|
774 | ;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
|
---|
775 | ALIGNCODE(16)
|
---|
776 | BEGINPROC VMXActivateVmcs
|
---|
777 | %ifdef RT_ARCH_AMD64
|
---|
778 | xor rax, rax
|
---|
779 | %ifdef ASM_CALL64_GCC
|
---|
780 | push rdi
|
---|
781 | %else
|
---|
782 | push rcx
|
---|
783 | %endif
|
---|
784 | vmptrld [rsp]
|
---|
785 | %else
|
---|
786 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
787 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
788 | jz .legacy_mode
|
---|
789 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
790 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
791 | .legacy_mode:
|
---|
792 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
793 | xor eax, eax
|
---|
794 | vmptrld [esp + 4]
|
---|
795 | %endif
|
---|
796 | jnc .the_end
|
---|
797 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
798 | .the_end:
|
---|
799 | %ifdef RT_ARCH_AMD64
|
---|
800 | add rsp, 8
|
---|
801 | %endif
|
---|
802 | ret
|
---|
803 |
|
---|
804 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
805 | ALIGNCODE(16)
|
---|
806 | BITS 64
|
---|
807 | .sixtyfourbit_mode:
|
---|
808 | lea rdx, [rsp + 4] ; &HCPhysVmcs
|
---|
809 | and edx, 0ffffffffh
|
---|
810 | xor eax, eax
|
---|
811 | vmptrld [rdx]
|
---|
812 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
813 | cmovc eax, r9d
|
---|
814 | jmp far [.fpret wrt rip]
|
---|
815 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
816 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
817 | BITS 32
|
---|
818 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
819 | ENDPROC VMXActivateVmcs
|
---|
820 |
|
---|
821 |
|
---|
822 | ;/**
|
---|
823 | ; * Executes VMPTRST
|
---|
824 | ; *
|
---|
825 | ; * @returns VBox status code
|
---|
826 | ; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
|
---|
827 | ; */
|
---|
828 | ;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
|
---|
829 | BEGINPROC VMXGetActivatedVmcs
|
---|
830 | %ifdef RT_OS_OS2
|
---|
831 | mov eax, VERR_NOT_SUPPORTED
|
---|
832 | ret
|
---|
833 | %else
|
---|
834 | %ifdef RT_ARCH_AMD64
|
---|
835 | %ifdef ASM_CALL64_GCC
|
---|
836 | vmptrst qword [rdi]
|
---|
837 | %else
|
---|
838 | vmptrst qword [rcx]
|
---|
839 | %endif
|
---|
840 | %else
|
---|
841 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
842 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
843 | jz .legacy_mode
|
---|
844 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
845 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
846 | .legacy_mode:
|
---|
847 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
848 | vmptrst qword [esp+04h]
|
---|
849 | %endif
|
---|
850 | xor eax, eax
|
---|
851 | .the_end:
|
---|
852 | ret
|
---|
853 |
|
---|
854 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
855 | ALIGNCODE(16)
|
---|
856 | BITS 64
|
---|
857 | .sixtyfourbit_mode:
|
---|
858 | lea rdx, [rsp + 4] ; &HCPhysVmcs
|
---|
859 | and edx, 0ffffffffh
|
---|
860 | vmptrst qword [rdx]
|
---|
861 | xor eax, eax
|
---|
862 | jmp far [.fpret wrt rip]
|
---|
863 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
864 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
865 | BITS 32
|
---|
866 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
867 | %endif
|
---|
868 | ENDPROC VMXGetActivatedVmcs
|
---|
869 |
|
---|
870 | ;/**
|
---|
871 | ; * Invalidate a page using invept
|
---|
872 | ; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
|
---|
873 | ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
|
---|
874 | ; */
|
---|
875 | ;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
|
---|
876 | BEGINPROC VMXR0InvEPT
|
---|
877 | %ifdef RT_ARCH_AMD64
|
---|
878 | %ifdef ASM_CALL64_GCC
|
---|
879 | and edi, 0ffffffffh
|
---|
880 | xor rax, rax
|
---|
881 | ; invept rdi, qword [rsi]
|
---|
882 | DB 0x66, 0x0F, 0x38, 0x80, 0x3E
|
---|
883 | %else
|
---|
884 | and ecx, 0ffffffffh
|
---|
885 | xor rax, rax
|
---|
886 | ; invept rcx, qword [rdx]
|
---|
887 | DB 0x66, 0x0F, 0x38, 0x80, 0xA
|
---|
888 | %endif
|
---|
889 | %else
|
---|
890 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
891 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
892 | jz .legacy_mode
|
---|
893 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
894 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
895 | .legacy_mode:
|
---|
896 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
897 | mov ecx, [esp + 4]
|
---|
898 | mov edx, [esp + 8]
|
---|
899 | xor eax, eax
|
---|
900 | ; invept ecx, qword [edx]
|
---|
901 | DB 0x66, 0x0F, 0x38, 0x80, 0xA
|
---|
902 | %endif
|
---|
903 | jnc .valid_vmcs
|
---|
904 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
905 | ret
|
---|
906 | .valid_vmcs:
|
---|
907 | jnz .the_end
|
---|
908 | mov eax, VERR_INVALID_PARAMETER
|
---|
909 | .the_end:
|
---|
910 | ret
|
---|
911 |
|
---|
912 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
913 | ALIGNCODE(16)
|
---|
914 | BITS 64
|
---|
915 | .sixtyfourbit_mode:
|
---|
916 | and esp, 0ffffffffh
|
---|
917 | mov ecx, [rsp + 4] ; enmFlush
|
---|
918 | mov edx, [rsp + 8] ; pDescriptor
|
---|
919 | xor eax, eax
|
---|
920 | ; invept rcx, qword [rdx]
|
---|
921 | DB 0x66, 0x0F, 0x38, 0x80, 0xA
|
---|
922 | mov r8d, VERR_INVALID_PARAMETER
|
---|
923 | cmovz eax, r8d
|
---|
924 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
925 | cmovc eax, r9d
|
---|
926 | jmp far [.fpret wrt rip]
|
---|
927 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
928 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
929 | BITS 32
|
---|
930 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
931 | ENDPROC VMXR0InvEPT
|
---|
932 |
|
---|
933 |
|
---|
934 | ;/**
|
---|
935 | ; * Invalidate a page using invvpid
|
---|
936 | ; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
|
---|
937 | ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
|
---|
938 | ; */
|
---|
939 | ;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
|
---|
940 | BEGINPROC VMXR0InvVPID
|
---|
941 | %ifdef RT_ARCH_AMD64
|
---|
942 | %ifdef ASM_CALL64_GCC
|
---|
943 | and edi, 0ffffffffh
|
---|
944 | xor rax, rax
|
---|
945 | ; invvpid rdi, qword [rsi]
|
---|
946 | DB 0x66, 0x0F, 0x38, 0x81, 0x3E
|
---|
947 | %else
|
---|
948 | and ecx, 0ffffffffh
|
---|
949 | xor rax, rax
|
---|
950 | ; invvpid rcx, qword [rdx]
|
---|
951 | DB 0x66, 0x0F, 0x38, 0x81, 0xA
|
---|
952 | %endif
|
---|
953 | %else
|
---|
954 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
955 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
956 | jz .legacy_mode
|
---|
957 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
958 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
959 | .legacy_mode:
|
---|
960 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
961 | mov ecx, [esp + 4]
|
---|
962 | mov edx, [esp + 8]
|
---|
963 | xor eax, eax
|
---|
964 | ; invvpid ecx, qword [edx]
|
---|
965 | DB 0x66, 0x0F, 0x38, 0x81, 0xA
|
---|
966 | %endif
|
---|
967 | jnc .valid_vmcs
|
---|
968 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
969 | ret
|
---|
970 | .valid_vmcs:
|
---|
971 | jnz .the_end
|
---|
972 | mov eax, VERR_INVALID_PARAMETER
|
---|
973 | .the_end:
|
---|
974 | ret
|
---|
975 |
|
---|
976 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
977 | ALIGNCODE(16)
|
---|
978 | BITS 64
|
---|
979 | .sixtyfourbit_mode:
|
---|
980 | and esp, 0ffffffffh
|
---|
981 | mov ecx, [rsp + 4] ; enmFlush
|
---|
982 | mov edx, [rsp + 8] ; pDescriptor
|
---|
983 | xor eax, eax
|
---|
984 | ; invvpid rcx, qword [rdx]
|
---|
985 | DB 0x66, 0x0F, 0x38, 0x81, 0xA
|
---|
986 | mov r8d, VERR_INVALID_PARAMETER
|
---|
987 | cmovz eax, r8d
|
---|
988 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
989 | cmovc eax, r9d
|
---|
990 | jmp far [.fpret wrt rip]
|
---|
991 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
992 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
993 | BITS 32
|
---|
994 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
995 | ENDPROC VMXR0InvVPID
|
---|
996 |
|
---|
997 |
|
---|
998 | %if GC_ARCH_BITS == 64
|
---|
999 | ;;
|
---|
1000 | ; Executes INVLPGA
|
---|
1001 | ;
|
---|
1002 | ; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
|
---|
1003 | ; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
|
---|
1004 | ;
|
---|
1005 | ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
|
---|
1006 | BEGINPROC SVMR0InvlpgA
|
---|
1007 | %ifdef RT_ARCH_AMD64
|
---|
1008 | %ifdef ASM_CALL64_GCC
|
---|
1009 | mov rax, rdi
|
---|
1010 | mov rcx, rsi
|
---|
1011 | %else
|
---|
1012 | mov rax, rcx
|
---|
1013 | mov rcx, rdx
|
---|
1014 | %endif
|
---|
1015 | %else
|
---|
1016 | mov eax, [esp + 4]
|
---|
1017 | mov ecx, [esp + 0Ch]
|
---|
1018 | %endif
|
---|
1019 | invlpga [xAX], ecx
|
---|
1020 | ret
|
---|
1021 | ENDPROC SVMR0InvlpgA
|
---|
1022 |
|
---|
1023 | %else ; GC_ARCH_BITS != 64
|
---|
1024 | ;;
|
---|
1025 | ; Executes INVLPGA
|
---|
1026 | ;
|
---|
1027 | ; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
|
---|
1028 | ; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
|
---|
1029 | ;
|
---|
1030 | ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
|
---|
1031 | BEGINPROC SVMR0InvlpgA
|
---|
1032 | %ifdef RT_ARCH_AMD64
|
---|
1033 | %ifdef ASM_CALL64_GCC
|
---|
1034 | movzx rax, edi
|
---|
1035 | mov ecx, esi
|
---|
1036 | %else
|
---|
1037 | ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
|
---|
1038 | ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
|
---|
1039 | ; values also set the upper 32 bits of the register to zero. Consequently
|
---|
1040 | ; there is no need for an instruction movzlq.''
|
---|
1041 | mov eax, ecx
|
---|
1042 | mov ecx, edx
|
---|
1043 | %endif
|
---|
1044 | %else
|
---|
1045 | mov eax, [esp + 4]
|
---|
1046 | mov ecx, [esp + 8]
|
---|
1047 | %endif
|
---|
1048 | invlpga [xAX], ecx
|
---|
1049 | ret
|
---|
1050 | ENDPROC SVMR0InvlpgA
|
---|
1051 |
|
---|
1052 | %endif ; GC_ARCH_BITS != 64
|
---|
1053 |
|
---|
1054 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1055 |
|
---|
1056 | ;/**
|
---|
1057 | ; * Gets 64-bit GDTR and IDTR on darwin.
|
---|
1058 | ; * @param pGdtr Where to store the 64-bit GDTR.
|
---|
1059 | ; * @param pIdtr Where to store the 64-bit IDTR.
|
---|
1060 | ; */
|
---|
1061 | ;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
|
---|
1062 | ALIGNCODE(16)
|
---|
1063 | BEGINPROC HMR0Get64bitGdtrAndIdtr
|
---|
1064 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
1065 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
1066 | .the_end:
|
---|
1067 | ret
|
---|
1068 |
|
---|
1069 | ALIGNCODE(16)
|
---|
1070 | BITS 64
|
---|
1071 | .sixtyfourbit_mode:
|
---|
1072 | and esp, 0ffffffffh
|
---|
1073 | mov ecx, [rsp + 4] ; pGdtr
|
---|
1074 | mov edx, [rsp + 8] ; pIdtr
|
---|
1075 | sgdt [rcx]
|
---|
1076 | sidt [rdx]
|
---|
1077 | jmp far [.fpret wrt rip]
|
---|
1078 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
1079 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
1080 | BITS 32
|
---|
1081 | ENDPROC HMR0Get64bitGdtrAndIdtr
|
---|
1082 |
|
---|
1083 |
|
---|
1084 | ;/**
|
---|
1085 | ; * Gets 64-bit CR3 on darwin.
|
---|
1086 | ; * @returns CR3
|
---|
1087 | ; */
|
---|
1088 | ;DECLASM(uint64_t) HMR0Get64bitCR3(void);
|
---|
1089 | ALIGNCODE(16)
|
---|
1090 | BEGINPROC HMR0Get64bitCR3
|
---|
1091 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
1092 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
1093 | .the_end:
|
---|
1094 | ret
|
---|
1095 |
|
---|
1096 | ALIGNCODE(16)
|
---|
1097 | BITS 64
|
---|
1098 | .sixtyfourbit_mode:
|
---|
1099 | mov rax, cr3
|
---|
1100 | mov rdx, rax
|
---|
1101 | shr rdx, 32
|
---|
1102 | jmp far [.fpret wrt rip]
|
---|
1103 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
1104 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
1105 | BITS 32
|
---|
1106 | ENDPROC HMR0Get64bitCR3
|
---|
1107 |
|
---|
1108 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1109 |
|
---|
1110 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
1111 |
|
---|
1112 | ;;
|
---|
1113 | ; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
|
---|
1114 | ; load the guest ones when necessary.
|
---|
1115 | ;
|
---|
1116 | ; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
|
---|
1117 | ;
|
---|
1118 | ; @returns eax
|
---|
1119 | ;
|
---|
1120 | ; @param fResumeVM msc:rcx
|
---|
1121 | ; @param pCtx msc:rdx
|
---|
1122 | ; @param pVMCSCache msc:r8
|
---|
1123 | ; @param pVM msc:r9
|
---|
1124 | ; @param pVCpu msc:[rbp+30h]
|
---|
1125 | ; @param pfnStartVM msc:[rbp+38h]
|
---|
1126 | ;
|
---|
1127 | ; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
|
---|
1128 | ;
|
---|
1129 | ; ASSUMING 64-bit and windows for now.
|
---|
1130 | ALIGNCODE(16)
|
---|
1131 | BEGINPROC HMR0VMXStartVMWrapXMM
|
---|
1132 | push xBP
|
---|
1133 | mov xBP, xSP
|
---|
1134 | sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
|
---|
1135 |
|
---|
1136 | ; spill input parameters.
|
---|
1137 | mov [xBP + 010h], rcx ; fResumeVM
|
---|
1138 | mov [xBP + 018h], rdx ; pCtx
|
---|
1139 | mov [xBP + 020h], r8 ; pVMCSCache
|
---|
1140 | mov [xBP + 028h], r9 ; pVM
|
---|
1141 |
|
---|
1142 | ; Ask CPUM whether we've started using the FPU yet.
|
---|
1143 | mov rcx, [xBP + 30h] ; pVCpu
|
---|
1144 | call NAME(CPUMIsGuestFPUStateActive)
|
---|
1145 | test al, al
|
---|
1146 | jnz .guest_fpu_state_active
|
---|
1147 |
|
---|
1148 | ; No need to mess with XMM registers just call the start routine and return.
|
---|
1149 | mov r11, [xBP + 38h] ; pfnStartVM
|
---|
1150 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1151 | mov [xSP + 020h], r10
|
---|
1152 | mov rcx, [xBP + 010h] ; fResumeVM
|
---|
1153 | mov rdx, [xBP + 018h] ; pCtx
|
---|
1154 | mov r8, [xBP + 020h] ; pVMCSCache
|
---|
1155 | mov r9, [xBP + 028h] ; pVM
|
---|
1156 | call r11
|
---|
1157 |
|
---|
1158 | leave
|
---|
1159 | ret
|
---|
1160 |
|
---|
1161 | ALIGNCODE(8)
|
---|
1162 | .guest_fpu_state_active:
|
---|
1163 | ; Save the host XMM registers.
|
---|
1164 | movdqa [rsp + 040h + 000h], xmm6
|
---|
1165 | movdqa [rsp + 040h + 010h], xmm7
|
---|
1166 | movdqa [rsp + 040h + 020h], xmm8
|
---|
1167 | movdqa [rsp + 040h + 030h], xmm9
|
---|
1168 | movdqa [rsp + 040h + 040h], xmm10
|
---|
1169 | movdqa [rsp + 040h + 050h], xmm11
|
---|
1170 | movdqa [rsp + 040h + 060h], xmm12
|
---|
1171 | movdqa [rsp + 040h + 070h], xmm13
|
---|
1172 | movdqa [rsp + 040h + 080h], xmm14
|
---|
1173 | movdqa [rsp + 040h + 090h], xmm15
|
---|
1174 |
|
---|
1175 | ; Load the full guest XMM register state.
|
---|
1176 | mov r10, [xBP + 018h] ; pCtx
|
---|
1177 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1178 | movdqa xmm0, [r10 + 000h]
|
---|
1179 | movdqa xmm1, [r10 + 010h]
|
---|
1180 | movdqa xmm2, [r10 + 020h]
|
---|
1181 | movdqa xmm3, [r10 + 030h]
|
---|
1182 | movdqa xmm4, [r10 + 040h]
|
---|
1183 | movdqa xmm5, [r10 + 050h]
|
---|
1184 | movdqa xmm6, [r10 + 060h]
|
---|
1185 | movdqa xmm7, [r10 + 070h]
|
---|
1186 | movdqa xmm8, [r10 + 080h]
|
---|
1187 | movdqa xmm9, [r10 + 090h]
|
---|
1188 | movdqa xmm10, [r10 + 0a0h]
|
---|
1189 | movdqa xmm11, [r10 + 0b0h]
|
---|
1190 | movdqa xmm12, [r10 + 0c0h]
|
---|
1191 | movdqa xmm13, [r10 + 0d0h]
|
---|
1192 | movdqa xmm14, [r10 + 0e0h]
|
---|
1193 | movdqa xmm15, [r10 + 0f0h]
|
---|
1194 |
|
---|
1195 | ; Make the call (same as in the other case ).
|
---|
1196 | mov r11, [xBP + 38h] ; pfnStartVM
|
---|
1197 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1198 | mov [xSP + 020h], r10
|
---|
1199 | mov rcx, [xBP + 010h] ; fResumeVM
|
---|
1200 | mov rdx, [xBP + 018h] ; pCtx
|
---|
1201 | mov r8, [xBP + 020h] ; pVMCSCache
|
---|
1202 | mov r9, [xBP + 028h] ; pVM
|
---|
1203 | call r11
|
---|
1204 |
|
---|
1205 | ; Save the guest XMM registers.
|
---|
1206 | mov r10, [xBP + 018h] ; pCtx
|
---|
1207 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1208 | movdqa [r10 + 000h], xmm0
|
---|
1209 | movdqa [r10 + 010h], xmm1
|
---|
1210 | movdqa [r10 + 020h], xmm2
|
---|
1211 | movdqa [r10 + 030h], xmm3
|
---|
1212 | movdqa [r10 + 040h], xmm4
|
---|
1213 | movdqa [r10 + 050h], xmm5
|
---|
1214 | movdqa [r10 + 060h], xmm6
|
---|
1215 | movdqa [r10 + 070h], xmm7
|
---|
1216 | movdqa [r10 + 080h], xmm8
|
---|
1217 | movdqa [r10 + 090h], xmm9
|
---|
1218 | movdqa [r10 + 0a0h], xmm10
|
---|
1219 | movdqa [r10 + 0b0h], xmm11
|
---|
1220 | movdqa [r10 + 0c0h], xmm12
|
---|
1221 | movdqa [r10 + 0d0h], xmm13
|
---|
1222 | movdqa [r10 + 0e0h], xmm14
|
---|
1223 | movdqa [r10 + 0f0h], xmm15
|
---|
1224 |
|
---|
1225 | ; Load the host XMM registers.
|
---|
1226 | movdqa xmm6, [rsp + 040h + 000h]
|
---|
1227 | movdqa xmm7, [rsp + 040h + 010h]
|
---|
1228 | movdqa xmm8, [rsp + 040h + 020h]
|
---|
1229 | movdqa xmm9, [rsp + 040h + 030h]
|
---|
1230 | movdqa xmm10, [rsp + 040h + 040h]
|
---|
1231 | movdqa xmm11, [rsp + 040h + 050h]
|
---|
1232 | movdqa xmm12, [rsp + 040h + 060h]
|
---|
1233 | movdqa xmm13, [rsp + 040h + 070h]
|
---|
1234 | movdqa xmm14, [rsp + 040h + 080h]
|
---|
1235 | movdqa xmm15, [rsp + 040h + 090h]
|
---|
1236 | leave
|
---|
1237 | ret
|
---|
1238 | ENDPROC HMR0VMXStartVMWrapXMM
|
---|
1239 |
|
---|
1240 | ;;
|
---|
1241 | ; Wrapper around svm.pfnVMRun that preserves host XMM registers and
|
---|
1242 | ; load the guest ones when necessary.
|
---|
1243 | ;
|
---|
1244 | ; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
|
---|
1245 | ;
|
---|
1246 | ; @returns eax
|
---|
1247 | ;
|
---|
1248 | ; @param pVMCBHostPhys msc:rcx
|
---|
1249 | ; @param pVMCBPhys msc:rdx
|
---|
1250 | ; @param pCtx msc:r8
|
---|
1251 | ; @param pVM msc:r9
|
---|
1252 | ; @param pVCpu msc:[rbp+30h]
|
---|
1253 | ; @param pfnVMRun msc:[rbp+38h]
|
---|
1254 | ;
|
---|
1255 | ; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
|
---|
1256 | ;
|
---|
1257 | ; ASSUMING 64-bit and windows for now.
|
---|
1258 | ALIGNCODE(16)
|
---|
1259 | BEGINPROC HMR0SVMRunWrapXMM
|
---|
1260 | push xBP
|
---|
1261 | mov xBP, xSP
|
---|
1262 | sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
|
---|
1263 |
|
---|
1264 | ; spill input parameters.
|
---|
1265 | mov [xBP + 010h], rcx ; pVMCBHostPhys
|
---|
1266 | mov [xBP + 018h], rdx ; pVMCBPhys
|
---|
1267 | mov [xBP + 020h], r8 ; pCtx
|
---|
1268 | mov [xBP + 028h], r9 ; pVM
|
---|
1269 |
|
---|
1270 | ; Ask CPUM whether we've started using the FPU yet.
|
---|
1271 | mov rcx, [xBP + 30h] ; pVCpu
|
---|
1272 | call NAME(CPUMIsGuestFPUStateActive)
|
---|
1273 | test al, al
|
---|
1274 | jnz .guest_fpu_state_active
|
---|
1275 |
|
---|
1276 | ; No need to mess with XMM registers just call the start routine and return.
|
---|
1277 | mov r11, [xBP + 38h] ; pfnVMRun
|
---|
1278 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1279 | mov [xSP + 020h], r10
|
---|
1280 | mov rcx, [xBP + 010h] ; pVMCBHostPhys
|
---|
1281 | mov rdx, [xBP + 018h] ; pVMCBPhys
|
---|
1282 | mov r8, [xBP + 020h] ; pCtx
|
---|
1283 | mov r9, [xBP + 028h] ; pVM
|
---|
1284 | call r11
|
---|
1285 |
|
---|
1286 | leave
|
---|
1287 | ret
|
---|
1288 |
|
---|
1289 | ALIGNCODE(8)
|
---|
1290 | .guest_fpu_state_active:
|
---|
1291 | ; Save the host XMM registers.
|
---|
1292 | movdqa [rsp + 040h + 000h], xmm6
|
---|
1293 | movdqa [rsp + 040h + 010h], xmm7
|
---|
1294 | movdqa [rsp + 040h + 020h], xmm8
|
---|
1295 | movdqa [rsp + 040h + 030h], xmm9
|
---|
1296 | movdqa [rsp + 040h + 040h], xmm10
|
---|
1297 | movdqa [rsp + 040h + 050h], xmm11
|
---|
1298 | movdqa [rsp + 040h + 060h], xmm12
|
---|
1299 | movdqa [rsp + 040h + 070h], xmm13
|
---|
1300 | movdqa [rsp + 040h + 080h], xmm14
|
---|
1301 | movdqa [rsp + 040h + 090h], xmm15
|
---|
1302 |
|
---|
1303 | ; Load the full guest XMM register state.
|
---|
1304 | mov r10, [xBP + 020h] ; pCtx
|
---|
1305 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1306 | movdqa xmm0, [r10 + 000h]
|
---|
1307 | movdqa xmm1, [r10 + 010h]
|
---|
1308 | movdqa xmm2, [r10 + 020h]
|
---|
1309 | movdqa xmm3, [r10 + 030h]
|
---|
1310 | movdqa xmm4, [r10 + 040h]
|
---|
1311 | movdqa xmm5, [r10 + 050h]
|
---|
1312 | movdqa xmm6, [r10 + 060h]
|
---|
1313 | movdqa xmm7, [r10 + 070h]
|
---|
1314 | movdqa xmm8, [r10 + 080h]
|
---|
1315 | movdqa xmm9, [r10 + 090h]
|
---|
1316 | movdqa xmm10, [r10 + 0a0h]
|
---|
1317 | movdqa xmm11, [r10 + 0b0h]
|
---|
1318 | movdqa xmm12, [r10 + 0c0h]
|
---|
1319 | movdqa xmm13, [r10 + 0d0h]
|
---|
1320 | movdqa xmm14, [r10 + 0e0h]
|
---|
1321 | movdqa xmm15, [r10 + 0f0h]
|
---|
1322 |
|
---|
1323 | ; Make the call (same as in the other case ).
|
---|
1324 | mov r11, [xBP + 38h] ; pfnVMRun
|
---|
1325 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1326 | mov [xSP + 020h], r10
|
---|
1327 | mov rcx, [xBP + 010h] ; pVMCBHostPhys
|
---|
1328 | mov rdx, [xBP + 018h] ; pVMCBPhys
|
---|
1329 | mov r8, [xBP + 020h] ; pCtx
|
---|
1330 | mov r9, [xBP + 028h] ; pVM
|
---|
1331 | call r11
|
---|
1332 |
|
---|
1333 | ; Save the guest XMM registers.
|
---|
1334 | mov r10, [xBP + 020h] ; pCtx
|
---|
1335 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1336 | movdqa [r10 + 000h], xmm0
|
---|
1337 | movdqa [r10 + 010h], xmm1
|
---|
1338 | movdqa [r10 + 020h], xmm2
|
---|
1339 | movdqa [r10 + 030h], xmm3
|
---|
1340 | movdqa [r10 + 040h], xmm4
|
---|
1341 | movdqa [r10 + 050h], xmm5
|
---|
1342 | movdqa [r10 + 060h], xmm6
|
---|
1343 | movdqa [r10 + 070h], xmm7
|
---|
1344 | movdqa [r10 + 080h], xmm8
|
---|
1345 | movdqa [r10 + 090h], xmm9
|
---|
1346 | movdqa [r10 + 0a0h], xmm10
|
---|
1347 | movdqa [r10 + 0b0h], xmm11
|
---|
1348 | movdqa [r10 + 0c0h], xmm12
|
---|
1349 | movdqa [r10 + 0d0h], xmm13
|
---|
1350 | movdqa [r10 + 0e0h], xmm14
|
---|
1351 | movdqa [r10 + 0f0h], xmm15
|
---|
1352 |
|
---|
1353 | ; Load the host XMM registers.
|
---|
1354 | movdqa xmm6, [rsp + 040h + 000h]
|
---|
1355 | movdqa xmm7, [rsp + 040h + 010h]
|
---|
1356 | movdqa xmm8, [rsp + 040h + 020h]
|
---|
1357 | movdqa xmm9, [rsp + 040h + 030h]
|
---|
1358 | movdqa xmm10, [rsp + 040h + 040h]
|
---|
1359 | movdqa xmm11, [rsp + 040h + 050h]
|
---|
1360 | movdqa xmm12, [rsp + 040h + 060h]
|
---|
1361 | movdqa xmm13, [rsp + 040h + 070h]
|
---|
1362 | movdqa xmm14, [rsp + 040h + 080h]
|
---|
1363 | movdqa xmm15, [rsp + 040h + 090h]
|
---|
1364 | leave
|
---|
1365 | ret
|
---|
1366 | ENDPROC HMR0SVMRunWrapXMM
|
---|
1367 |
|
---|
1368 | %endif ; VBOX_WITH_KERNEL_USING_XMM
|
---|
1369 |
|
---|
1370 | ;
|
---|
1371 | ; The default setup of the StartVM routines.
|
---|
1372 | ;
|
---|
1373 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1374 | %define MY_NAME(name) name %+ _32
|
---|
1375 | %else
|
---|
1376 | %define MY_NAME(name) name
|
---|
1377 | %endif
|
---|
1378 | %ifdef RT_ARCH_AMD64
|
---|
1379 | %define MYPUSHAD MYPUSHAD64
|
---|
1380 | %define MYPOPAD MYPOPAD64
|
---|
1381 | %define MYPUSHSEGS MYPUSHSEGS64
|
---|
1382 | %define MYPOPSEGS MYPOPSEGS64
|
---|
1383 | %else
|
---|
1384 | %define MYPUSHAD MYPUSHAD32
|
---|
1385 | %define MYPOPAD MYPOPAD32
|
---|
1386 | %define MYPUSHSEGS MYPUSHSEGS32
|
---|
1387 | %define MYPOPSEGS MYPOPSEGS32
|
---|
1388 | %endif
|
---|
1389 |
|
---|
1390 | %include "HMR0Mixed.mac"
|
---|
1391 |
|
---|
1392 |
|
---|
1393 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1394 | ;
|
---|
1395 | ; Write the wrapper procedures.
|
---|
1396 | ;
|
---|
1397 | ; These routines are probably being too paranoid about selector
|
---|
1398 | ; restoring, but better safe than sorry...
|
---|
1399 | ;
|
---|
1400 |
|
---|
1401 | ; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1402 | ALIGNCODE(16)
|
---|
1403 | BEGINPROC VMXR0StartVM32
|
---|
1404 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1405 | je near NAME(VMXR0StartVM32_32)
|
---|
1406 |
|
---|
1407 | ; stack frame
|
---|
1408 | push esi
|
---|
1409 | push edi
|
---|
1410 | push fs
|
---|
1411 | push gs
|
---|
1412 |
|
---|
1413 | ; jmp far .thunk64
|
---|
1414 | db 0xea
|
---|
1415 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1416 |
|
---|
1417 | ALIGNCODE(16)
|
---|
1418 | BITS 64
|
---|
1419 | .thunk64:
|
---|
1420 | sub esp, 20h
|
---|
1421 | mov edi, [rsp + 20h + 14h] ; fResume
|
---|
1422 | mov esi, [rsp + 20h + 18h] ; pCtx
|
---|
1423 | mov edx, [rsp + 20h + 1Ch] ; pCache
|
---|
1424 | call NAME(VMXR0StartVM32_64)
|
---|
1425 | add esp, 20h
|
---|
1426 | jmp far [.fpthunk32 wrt rip]
|
---|
1427 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1428 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1429 |
|
---|
1430 | BITS 32
|
---|
1431 | ALIGNCODE(16)
|
---|
1432 | .thunk32:
|
---|
1433 | pop gs
|
---|
1434 | pop fs
|
---|
1435 | pop edi
|
---|
1436 | pop esi
|
---|
1437 | ret
|
---|
1438 | ENDPROC VMXR0StartVM32
|
---|
1439 |
|
---|
1440 |
|
---|
1441 | ; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1442 | ALIGNCODE(16)
|
---|
1443 | BEGINPROC VMXR0StartVM64
|
---|
1444 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1445 | je .not_in_long_mode
|
---|
1446 |
|
---|
1447 | ; stack frame
|
---|
1448 | push esi
|
---|
1449 | push edi
|
---|
1450 | push fs
|
---|
1451 | push gs
|
---|
1452 |
|
---|
1453 | ; jmp far .thunk64
|
---|
1454 | db 0xea
|
---|
1455 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1456 |
|
---|
1457 | ALIGNCODE(16)
|
---|
1458 | BITS 64
|
---|
1459 | .thunk64:
|
---|
1460 | sub esp, 20h
|
---|
1461 | mov edi, [rsp + 20h + 14h] ; fResume
|
---|
1462 | mov esi, [rsp + 20h + 18h] ; pCtx
|
---|
1463 | mov edx, [rsp + 20h + 1Ch] ; pCache
|
---|
1464 | call NAME(VMXR0StartVM64_64)
|
---|
1465 | add esp, 20h
|
---|
1466 | jmp far [.fpthunk32 wrt rip]
|
---|
1467 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1468 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1469 |
|
---|
1470 | BITS 32
|
---|
1471 | ALIGNCODE(16)
|
---|
1472 | .thunk32:
|
---|
1473 | pop gs
|
---|
1474 | pop fs
|
---|
1475 | pop edi
|
---|
1476 | pop esi
|
---|
1477 | ret
|
---|
1478 |
|
---|
1479 | .not_in_long_mode:
|
---|
1480 | mov eax, VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE
|
---|
1481 | ret
|
---|
1482 | ENDPROC VMXR0StartVM64
|
---|
1483 |
|
---|
1484 | ;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1485 | ALIGNCODE(16)
|
---|
1486 | BEGINPROC SVMR0VMRun
|
---|
1487 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1488 | je near NAME(SVMR0VMRun_32)
|
---|
1489 |
|
---|
1490 | ; stack frame
|
---|
1491 | push esi
|
---|
1492 | push edi
|
---|
1493 | push fs
|
---|
1494 | push gs
|
---|
1495 |
|
---|
1496 | ; jmp far .thunk64
|
---|
1497 | db 0xea
|
---|
1498 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1499 |
|
---|
1500 | ALIGNCODE(16)
|
---|
1501 | BITS 64
|
---|
1502 | .thunk64:
|
---|
1503 | sub esp, 20h
|
---|
1504 | mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
|
---|
1505 | mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
|
---|
1506 | mov edx, [rsp + 20h + 24h] ; pCtx
|
---|
1507 | call NAME(SVMR0VMRun_64)
|
---|
1508 | add esp, 20h
|
---|
1509 | jmp far [.fpthunk32 wrt rip]
|
---|
1510 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1511 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1512 |
|
---|
1513 | BITS 32
|
---|
1514 | ALIGNCODE(16)
|
---|
1515 | .thunk32:
|
---|
1516 | pop gs
|
---|
1517 | pop fs
|
---|
1518 | pop edi
|
---|
1519 | pop esi
|
---|
1520 | ret
|
---|
1521 | ENDPROC SVMR0VMRun
|
---|
1522 |
|
---|
1523 |
|
---|
1524 | ; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1525 | ALIGNCODE(16)
|
---|
1526 | BEGINPROC SVMR0VMRun64
|
---|
1527 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1528 | je .not_in_long_mode
|
---|
1529 |
|
---|
1530 | ; stack frame
|
---|
1531 | push esi
|
---|
1532 | push edi
|
---|
1533 | push fs
|
---|
1534 | push gs
|
---|
1535 |
|
---|
1536 | ; jmp far .thunk64
|
---|
1537 | db 0xea
|
---|
1538 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1539 |
|
---|
1540 | ALIGNCODE(16)
|
---|
1541 | BITS 64
|
---|
1542 | .thunk64:
|
---|
1543 | sub esp, 20h
|
---|
1544 | mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
|
---|
1545 | mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
|
---|
1546 | mov edx, [rbp + 20h + 24h] ; pCtx
|
---|
1547 | call NAME(SVMR0VMRun64_64)
|
---|
1548 | add esp, 20h
|
---|
1549 | jmp far [.fpthunk32 wrt rip]
|
---|
1550 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1551 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1552 |
|
---|
1553 | BITS 32
|
---|
1554 | ALIGNCODE(16)
|
---|
1555 | .thunk32:
|
---|
1556 | pop gs
|
---|
1557 | pop fs
|
---|
1558 | pop edi
|
---|
1559 | pop esi
|
---|
1560 | ret
|
---|
1561 |
|
---|
1562 | .not_in_long_mode:
|
---|
1563 | mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
|
---|
1564 | ret
|
---|
1565 | ENDPROC SVMR0VMRun64
|
---|
1566 |
|
---|
1567 | ;
|
---|
1568 | ; Do it a second time pretending we're a 64-bit host.
|
---|
1569 | ;
|
---|
1570 | ; This *HAS* to be done at the very end of the file to avoid restoring
|
---|
1571 | ; macros. So, add new code *BEFORE* this mess.
|
---|
1572 | ;
|
---|
1573 | BITS 64
|
---|
1574 | %undef RT_ARCH_X86
|
---|
1575 | %define RT_ARCH_AMD64
|
---|
1576 | %undef ASM_CALL64_MSC
|
---|
1577 | %define ASM_CALL64_GCC
|
---|
1578 | %define xCB 8
|
---|
1579 | %define xSP rsp
|
---|
1580 | %define xBP rbp
|
---|
1581 | %define xAX rax
|
---|
1582 | %define xBX rbx
|
---|
1583 | %define xCX rcx
|
---|
1584 | %define xDX rdx
|
---|
1585 | %define xDI rdi
|
---|
1586 | %define xSI rsi
|
---|
1587 | %define MY_NAME(name) name %+ _64
|
---|
1588 | %define MYPUSHAD MYPUSHAD64
|
---|
1589 | %define MYPOPAD MYPOPAD64
|
---|
1590 | %define MYPUSHSEGS MYPUSHSEGS64
|
---|
1591 | %define MYPOPSEGS MYPOPSEGS64
|
---|
1592 |
|
---|
1593 | %include "HMR0Mixed.mac"
|
---|
1594 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1595 |
|
---|