1 | ; $Id: HMR0A.asm 48964 2013-10-08 08:42:56Z vboxsync $
|
---|
2 | ;; @file
|
---|
3 | ; HM - Ring-0 VMX, SVM world-switch and helper routines
|
---|
4 | ;
|
---|
5 |
|
---|
6 | ;
|
---|
7 | ; Copyright (C) 2006-2013 Oracle Corporation
|
---|
8 | ;
|
---|
9 | ; This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | ; available from http://www.virtualbox.org. This file is free software;
|
---|
11 | ; you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | ; General Public License (GPL) as published by the Free Software
|
---|
13 | ; Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | ; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | ; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | ;
|
---|
17 |
|
---|
18 | ;*******************************************************************************
|
---|
19 | ;* Header Files *
|
---|
20 | ;*******************************************************************************
|
---|
21 | %include "VBox/asmdefs.mac"
|
---|
22 | %include "VBox/err.mac"
|
---|
23 | %include "VBox/vmm/hm_vmx.mac"
|
---|
24 | %include "VBox/vmm/cpum.mac"
|
---|
25 | %include "iprt/x86.mac"
|
---|
26 | %include "HMInternal.mac"
|
---|
27 |
|
---|
28 | %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
|
---|
29 | %macro vmwrite 2,
|
---|
30 | int3
|
---|
31 | %endmacro
|
---|
32 | %define vmlaunch int3
|
---|
33 | %define vmresume int3
|
---|
34 | %define vmsave int3
|
---|
35 | %define vmload int3
|
---|
36 | %define vmrun int3
|
---|
37 | %define clgi int3
|
---|
38 | %define stgi int3
|
---|
39 | %macro invlpga 2,
|
---|
40 | int3
|
---|
41 | %endmacro
|
---|
42 | %endif
|
---|
43 |
|
---|
44 | ;*******************************************************************************
|
---|
45 | ;* Defined Constants And Macros *
|
---|
46 | ;*******************************************************************************
|
---|
47 | %ifdef RT_ARCH_AMD64
|
---|
48 | %define MAYBE_64_BIT
|
---|
49 | %endif
|
---|
50 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
51 | %define MAYBE_64_BIT
|
---|
52 | %else
|
---|
53 | %ifdef RT_OS_DARWIN
|
---|
54 | %ifdef RT_ARCH_AMD64
|
---|
55 | ;;
|
---|
56 | ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
|
---|
57 | ; risk loading a stale LDT value or something invalid.
|
---|
58 | %define HM_64_BIT_USE_NULL_SEL
|
---|
59 | %endif
|
---|
60 | %endif
|
---|
61 | %endif
|
---|
62 |
|
---|
63 | %ifdef RT_ARCH_AMD64
|
---|
64 | %define VBOX_SKIP_RESTORE_SEG
|
---|
65 | %endif
|
---|
66 |
|
---|
67 | ;; The offset of the XMM registers in X86FXSTATE.
|
---|
68 | ; Use define because I'm too lazy to convert the struct.
|
---|
69 | %define XMM_OFF_IN_X86FXSTATE 160
|
---|
70 |
|
---|
71 | ;; @def MYPUSHAD
|
---|
72 | ; Macro generating an equivalent to pushad
|
---|
73 |
|
---|
74 | ;; @def MYPOPAD
|
---|
75 | ; Macro generating an equivalent to popad
|
---|
76 |
|
---|
77 | ;; @def MYPUSHSEGS
|
---|
78 | ; Macro saving all segment registers on the stack.
|
---|
79 | ; @param 1 full width register name
|
---|
80 | ; @param 2 16-bit register name for \a 1.
|
---|
81 |
|
---|
82 | ;; @def MYPOPSEGS
|
---|
83 | ; Macro restoring all segment registers on the stack
|
---|
84 | ; @param 1 full width register name
|
---|
85 | ; @param 2 16-bit register name for \a 1.
|
---|
86 |
|
---|
87 | %ifdef MAYBE_64_BIT
|
---|
88 | ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
|
---|
89 | %macro LOADGUESTMSR 2
|
---|
90 | mov rcx, %1
|
---|
91 | rdmsr
|
---|
92 | push rdx
|
---|
93 | push rax
|
---|
94 | mov edx, dword [xSI + %2 + 4]
|
---|
95 | mov eax, dword [xSI + %2]
|
---|
96 | wrmsr
|
---|
97 | %endmacro
|
---|
98 |
|
---|
99 | ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
|
---|
100 | ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
|
---|
101 | %macro LOADHOSTMSREX 2
|
---|
102 | mov rcx, %1
|
---|
103 | rdmsr
|
---|
104 | mov dword [xSI + %2], eax
|
---|
105 | mov dword [xSI + %2 + 4], edx
|
---|
106 | pop rax
|
---|
107 | pop rdx
|
---|
108 | wrmsr
|
---|
109 | %endmacro
|
---|
110 |
|
---|
111 | ; Load the corresponding host MSR (trashes rdx & rcx)
|
---|
112 | %macro LOADHOSTMSR 1
|
---|
113 | mov rcx, %1
|
---|
114 | pop rax
|
---|
115 | pop rdx
|
---|
116 | wrmsr
|
---|
117 | %endmacro
|
---|
118 | %endif
|
---|
119 |
|
---|
120 | %ifdef ASM_CALL64_GCC
|
---|
121 | %macro MYPUSHAD64 0
|
---|
122 | push r15
|
---|
123 | push r14
|
---|
124 | push r13
|
---|
125 | push r12
|
---|
126 | push rbx
|
---|
127 | %endmacro
|
---|
128 | %macro MYPOPAD64 0
|
---|
129 | pop rbx
|
---|
130 | pop r12
|
---|
131 | pop r13
|
---|
132 | pop r14
|
---|
133 | pop r15
|
---|
134 | %endmacro
|
---|
135 |
|
---|
136 | %else ; ASM_CALL64_MSC
|
---|
137 | %macro MYPUSHAD64 0
|
---|
138 | push r15
|
---|
139 | push r14
|
---|
140 | push r13
|
---|
141 | push r12
|
---|
142 | push rbx
|
---|
143 | push rsi
|
---|
144 | push rdi
|
---|
145 | %endmacro
|
---|
146 | %macro MYPOPAD64 0
|
---|
147 | pop rdi
|
---|
148 | pop rsi
|
---|
149 | pop rbx
|
---|
150 | pop r12
|
---|
151 | pop r13
|
---|
152 | pop r14
|
---|
153 | pop r15
|
---|
154 | %endmacro
|
---|
155 | %endif
|
---|
156 |
|
---|
157 | %ifdef VBOX_SKIP_RESTORE_SEG
|
---|
158 | %macro MYPUSHSEGS64 2
|
---|
159 | %endmacro
|
---|
160 |
|
---|
161 | %macro MYPOPSEGS64 2
|
---|
162 | %endmacro
|
---|
163 | %else ; !VBOX_SKIP_RESTORE_SEG
|
---|
164 | ; trashes, rax, rdx & rcx
|
---|
165 | %macro MYPUSHSEGS64 2
|
---|
166 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
167 | mov %2, es
|
---|
168 | push %1
|
---|
169 | mov %2, ds
|
---|
170 | push %1
|
---|
171 | %endif
|
---|
172 |
|
---|
173 | ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
|
---|
174 | mov ecx, MSR_K8_FS_BASE
|
---|
175 | rdmsr
|
---|
176 | push rdx
|
---|
177 | push rax
|
---|
178 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
179 | push fs
|
---|
180 | %endif
|
---|
181 |
|
---|
182 | ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
|
---|
183 | mov ecx, MSR_K8_GS_BASE
|
---|
184 | rdmsr
|
---|
185 | push rdx
|
---|
186 | push rax
|
---|
187 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
188 | push gs
|
---|
189 | %endif
|
---|
190 | %endmacro
|
---|
191 |
|
---|
192 | ; trashes, rax, rdx & rcx
|
---|
193 | %macro MYPOPSEGS64 2
|
---|
194 | ; Note: do not step through this code with a debugger!
|
---|
195 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
196 | xor eax, eax
|
---|
197 | mov ds, ax
|
---|
198 | mov es, ax
|
---|
199 | mov fs, ax
|
---|
200 | mov gs, ax
|
---|
201 | %endif
|
---|
202 |
|
---|
203 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
204 | pop gs
|
---|
205 | %endif
|
---|
206 | pop rax
|
---|
207 | pop rdx
|
---|
208 | mov ecx, MSR_K8_GS_BASE
|
---|
209 | wrmsr
|
---|
210 |
|
---|
211 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
212 | pop fs
|
---|
213 | %endif
|
---|
214 | pop rax
|
---|
215 | pop rdx
|
---|
216 | mov ecx, MSR_K8_FS_BASE
|
---|
217 | wrmsr
|
---|
218 | ; Now it's safe to step again
|
---|
219 |
|
---|
220 | %ifndef HM_64_BIT_USE_NULL_SEL
|
---|
221 | pop %1
|
---|
222 | mov ds, %2
|
---|
223 | pop %1
|
---|
224 | mov es, %2
|
---|
225 | %endif
|
---|
226 | %endmacro
|
---|
227 | %endif ; VBOX_SKIP_RESTORE_SEG
|
---|
228 |
|
---|
229 | %macro MYPUSHAD32 0
|
---|
230 | pushad
|
---|
231 | %endmacro
|
---|
232 | %macro MYPOPAD32 0
|
---|
233 | popad
|
---|
234 | %endmacro
|
---|
235 |
|
---|
236 | %macro MYPUSHSEGS32 2
|
---|
237 | push ds
|
---|
238 | push es
|
---|
239 | push fs
|
---|
240 | push gs
|
---|
241 | %endmacro
|
---|
242 | %macro MYPOPSEGS32 2
|
---|
243 | pop gs
|
---|
244 | pop fs
|
---|
245 | pop es
|
---|
246 | pop ds
|
---|
247 | %endmacro
|
---|
248 |
|
---|
249 |
|
---|
250 | ;*******************************************************************************
|
---|
251 | ;* External Symbols *
|
---|
252 | ;*******************************************************************************
|
---|
253 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
254 | extern NAME(SUPR0AbsIs64bit)
|
---|
255 | extern NAME(SUPR0Abs64bitKernelCS)
|
---|
256 | extern NAME(SUPR0Abs64bitKernelSS)
|
---|
257 | extern NAME(SUPR0Abs64bitKernelDS)
|
---|
258 | extern NAME(SUPR0AbsKernelCS)
|
---|
259 | %endif
|
---|
260 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
261 | extern NAME(CPUMIsGuestFPUStateActive)
|
---|
262 | %endif
|
---|
263 |
|
---|
264 |
|
---|
265 | ;*******************************************************************************
|
---|
266 | ;* Global Variables *
|
---|
267 | ;*******************************************************************************
|
---|
268 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
269 | BEGINDATA
|
---|
270 | ;;
|
---|
271 | ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
|
---|
272 | ; needing to clobber a register. (This trick doesn't quite work for PE btw.
|
---|
273 | ; but that's not relevant atm.)
|
---|
274 | GLOBALNAME g_fVMXIs64bitHost
|
---|
275 | dd NAME(SUPR0AbsIs64bit)
|
---|
276 | %endif
|
---|
277 |
|
---|
278 |
|
---|
279 | BEGINCODE
|
---|
280 |
|
---|
281 |
|
---|
282 | ;/**
|
---|
283 | ; * Restores host-state fields.
|
---|
284 | ; *
|
---|
285 | ; * @returns VBox status code
|
---|
286 | ; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags.
|
---|
287 | ; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct.
|
---|
288 | ; */
|
---|
289 | ALIGNCODE(16)
|
---|
290 | BEGINPROC VMXRestoreHostState
|
---|
291 | %ifdef RT_ARCH_AMD64
|
---|
292 | %ifndef ASM_CALL64_GCC
|
---|
293 | ; Use GCC's input registers since we'll be needing both rcx and rdx further
|
---|
294 | ; down with the wrmsr instruction. Use the R10 and R11 register for saving
|
---|
295 | ; RDI and RSI since MSC preserve the two latter registers.
|
---|
296 | mov r10, rdi
|
---|
297 | mov r11, rsi
|
---|
298 | mov rdi, rcx
|
---|
299 | mov rsi, rdx
|
---|
300 | %endif
|
---|
301 |
|
---|
302 | test edi, VMX_RESTORE_HOST_GDTR
|
---|
303 | jz .test_idtr
|
---|
304 | lgdt [rsi + VMXRESTOREHOST.HostGdtr]
|
---|
305 |
|
---|
306 | .test_idtr:
|
---|
307 | test edi, VMX_RESTORE_HOST_IDTR
|
---|
308 | jz .test_ds
|
---|
309 | lidt [rsi + VMXRESTOREHOST.HostIdtr]
|
---|
310 |
|
---|
311 | .test_ds:
|
---|
312 | test edi, VMX_RESTORE_HOST_SEL_DS
|
---|
313 | jz .test_es
|
---|
314 | mov ax, [rsi + VMXRESTOREHOST.uHostSelDS]
|
---|
315 | mov ds, eax
|
---|
316 |
|
---|
317 | .test_es:
|
---|
318 | test edi, VMX_RESTORE_HOST_SEL_ES
|
---|
319 | jz .test_tr
|
---|
320 | mov ax, [rsi + VMXRESTOREHOST.uHostSelES]
|
---|
321 | mov es, eax
|
---|
322 |
|
---|
323 | .test_tr:
|
---|
324 | test edi, VMX_RESTORE_HOST_SEL_TR
|
---|
325 | jz .test_fs
|
---|
326 | ; When restoring the TR, we must first clear the busy flag or we'll end up faulting.
|
---|
327 | mov dx, [rsi + VMXRESTOREHOST.uHostSelTR]
|
---|
328 | mov ax, dx
|
---|
329 | and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
|
---|
330 | add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
|
---|
331 | and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
|
---|
332 | ltr dx
|
---|
333 |
|
---|
334 | .test_fs:
|
---|
335 | ;
|
---|
336 | ; When restoring the selector values for FS and GS, we'll temporarily trash
|
---|
337 | ; the base address (at least the high 32-bit bits, but quite possibly the
|
---|
338 | ; whole base address), the wrmsr will restore it correctly. (VT-x actually
|
---|
339 | ; restores the base correctly when leaving guest mode, but not the selector
|
---|
340 | ; value, so there is little problem with interrupts being enabled prior to
|
---|
341 | ; this restore job.)
|
---|
342 | ; We'll disable ints once for both FS and GS as that's probably faster.
|
---|
343 | ;
|
---|
344 | test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS
|
---|
345 | jz .restore_success
|
---|
346 | pushfq
|
---|
347 | cli ; (see above)
|
---|
348 |
|
---|
349 | test edi, VMX_RESTORE_HOST_SEL_FS
|
---|
350 | jz .test_gs
|
---|
351 | mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
|
---|
352 | mov fs, eax
|
---|
353 | mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
|
---|
354 | mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
|
---|
355 | mov ecx, MSR_K8_FS_BASE
|
---|
356 | wrmsr
|
---|
357 |
|
---|
358 | .test_gs:
|
---|
359 | test edi, VMX_RESTORE_HOST_SEL_GS
|
---|
360 | jz .restore_flags
|
---|
361 | mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
|
---|
362 | mov gs, eax
|
---|
363 | mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
|
---|
364 | mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
|
---|
365 | mov ecx, MSR_K8_GS_BASE
|
---|
366 | wrmsr
|
---|
367 |
|
---|
368 | .restore_flags:
|
---|
369 | popfq
|
---|
370 |
|
---|
371 | .restore_success:
|
---|
372 | mov eax, VINF_SUCCESS
|
---|
373 | %ifndef ASM_CALL64_GCC
|
---|
374 | ; Restore RDI and RSI on MSC.
|
---|
375 | mov rdi, r10
|
---|
376 | mov rsi, r11
|
---|
377 | %endif
|
---|
378 | %else ; RT_ARCH_X86
|
---|
379 | mov eax, VERR_NOT_IMPLEMENTED
|
---|
380 | %endif
|
---|
381 | ret
|
---|
382 | ENDPROC VMXRestoreHostState
|
---|
383 |
|
---|
384 |
|
---|
385 | ;/**
|
---|
386 | ; * Dispatches an NMI to the host.
|
---|
387 | ; */
|
---|
388 | ALIGNCODE(16)
|
---|
389 | BEGINPROC VMXDispatchHostNmi
|
---|
390 | int 2 ; NMI is always vector 2. The IDT[2] IRQ handler cannot be anything else. See Intel spec. 6.3.1 "External Interrupts".
|
---|
391 | ret
|
---|
392 | ENDPROC VMXDispatchHostNmi
|
---|
393 |
|
---|
394 |
|
---|
395 | ;/**
|
---|
396 | ; * Executes VMWRITE, 64-bit value.
|
---|
397 | ; *
|
---|
398 | ; * @returns VBox status code
|
---|
399 | ; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
|
---|
400 | ; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
|
---|
401 | ; */
|
---|
402 | ALIGNCODE(16)
|
---|
403 | BEGINPROC VMXWriteVmcs64
|
---|
404 | %ifdef RT_ARCH_AMD64
|
---|
405 | %ifdef ASM_CALL64_GCC
|
---|
406 | and edi, 0ffffffffh
|
---|
407 | xor rax, rax
|
---|
408 | vmwrite rdi, rsi
|
---|
409 | %else
|
---|
410 | and ecx, 0ffffffffh
|
---|
411 | xor rax, rax
|
---|
412 | vmwrite rcx, rdx
|
---|
413 | %endif
|
---|
414 | %else ; RT_ARCH_X86
|
---|
415 | mov ecx, [esp + 4] ; idxField
|
---|
416 | lea edx, [esp + 8] ; &u64Data
|
---|
417 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
418 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
419 | jz .legacy_mode
|
---|
420 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
421 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
422 | .legacy_mode:
|
---|
423 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
424 | vmwrite ecx, [edx] ; low dword
|
---|
425 | jz .done
|
---|
426 | jc .done
|
---|
427 | inc ecx
|
---|
428 | xor eax, eax
|
---|
429 | vmwrite ecx, [edx + 4] ; high dword
|
---|
430 | .done:
|
---|
431 | %endif ; RT_ARCH_X86
|
---|
432 | jnc .valid_vmcs
|
---|
433 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
434 | ret
|
---|
435 | .valid_vmcs:
|
---|
436 | jnz .the_end
|
---|
437 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
438 | .the_end:
|
---|
439 | ret
|
---|
440 |
|
---|
441 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
442 | ALIGNCODE(16)
|
---|
443 | BITS 64
|
---|
444 | .sixtyfourbit_mode:
|
---|
445 | and edx, 0ffffffffh
|
---|
446 | and ecx, 0ffffffffh
|
---|
447 | xor eax, eax
|
---|
448 | vmwrite rcx, [rdx]
|
---|
449 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
450 | cmovz eax, r8d
|
---|
451 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
452 | cmovc eax, r9d
|
---|
453 | jmp far [.fpret wrt rip]
|
---|
454 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
455 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
456 | BITS 32
|
---|
457 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
458 | ENDPROC VMXWriteVmcs64
|
---|
459 |
|
---|
460 |
|
---|
461 | ;/**
|
---|
462 | ; * Executes VMREAD, 64-bit value
|
---|
463 | ; *
|
---|
464 | ; * @returns VBox status code
|
---|
465 | ; * @param idxField VMCS index
|
---|
466 | ; * @param pData Ptr to store VM field value
|
---|
467 | ; */
|
---|
468 | ;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
|
---|
469 | ALIGNCODE(16)
|
---|
470 | BEGINPROC VMXReadVmcs64
|
---|
471 | %ifdef RT_ARCH_AMD64
|
---|
472 | %ifdef ASM_CALL64_GCC
|
---|
473 | and edi, 0ffffffffh
|
---|
474 | xor rax, rax
|
---|
475 | vmread [rsi], rdi
|
---|
476 | %else
|
---|
477 | and ecx, 0ffffffffh
|
---|
478 | xor rax, rax
|
---|
479 | vmread [rdx], rcx
|
---|
480 | %endif
|
---|
481 | %else ; RT_ARCH_X86
|
---|
482 | mov ecx, [esp + 4] ; idxField
|
---|
483 | mov edx, [esp + 8] ; pData
|
---|
484 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
485 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
486 | jz .legacy_mode
|
---|
487 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
488 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
489 | .legacy_mode:
|
---|
490 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
491 | vmread [edx], ecx ; low dword
|
---|
492 | jz .done
|
---|
493 | jc .done
|
---|
494 | inc ecx
|
---|
495 | xor eax, eax
|
---|
496 | vmread [edx + 4], ecx ; high dword
|
---|
497 | .done:
|
---|
498 | %endif ; RT_ARCH_X86
|
---|
499 | jnc .valid_vmcs
|
---|
500 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
501 | ret
|
---|
502 | .valid_vmcs:
|
---|
503 | jnz .the_end
|
---|
504 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
505 | .the_end:
|
---|
506 | ret
|
---|
507 |
|
---|
508 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
509 | ALIGNCODE(16)
|
---|
510 | BITS 64
|
---|
511 | .sixtyfourbit_mode:
|
---|
512 | and edx, 0ffffffffh
|
---|
513 | and ecx, 0ffffffffh
|
---|
514 | xor eax, eax
|
---|
515 | vmread [rdx], rcx
|
---|
516 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
517 | cmovz eax, r8d
|
---|
518 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
519 | cmovc eax, r9d
|
---|
520 | jmp far [.fpret wrt rip]
|
---|
521 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
522 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
523 | BITS 32
|
---|
524 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
525 | ENDPROC VMXReadVmcs64
|
---|
526 |
|
---|
527 |
|
---|
528 | ;/**
|
---|
529 | ; * Executes VMREAD, 32-bit value.
|
---|
530 | ; *
|
---|
531 | ; * @returns VBox status code
|
---|
532 | ; * @param idxField VMCS index
|
---|
533 | ; * @param pu32Data Ptr to store VM field value
|
---|
534 | ; */
|
---|
535 | ;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
|
---|
536 | ALIGNCODE(16)
|
---|
537 | BEGINPROC VMXReadVmcs32
|
---|
538 | %ifdef RT_ARCH_AMD64
|
---|
539 | %ifdef ASM_CALL64_GCC
|
---|
540 | and edi, 0ffffffffh
|
---|
541 | xor rax, rax
|
---|
542 | vmread r10, rdi
|
---|
543 | mov [rsi], r10d
|
---|
544 | %else
|
---|
545 | and ecx, 0ffffffffh
|
---|
546 | xor rax, rax
|
---|
547 | vmread r10, rcx
|
---|
548 | mov [rdx], r10d
|
---|
549 | %endif
|
---|
550 | %else ; RT_ARCH_X86
|
---|
551 | mov ecx, [esp + 4] ; idxField
|
---|
552 | mov edx, [esp + 8] ; pu32Data
|
---|
553 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
554 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
555 | jz .legacy_mode
|
---|
556 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
557 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
558 | .legacy_mode:
|
---|
559 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
560 | xor eax, eax
|
---|
561 | vmread [edx], ecx
|
---|
562 | %endif ; RT_ARCH_X86
|
---|
563 | jnc .valid_vmcs
|
---|
564 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
565 | ret
|
---|
566 | .valid_vmcs:
|
---|
567 | jnz .the_end
|
---|
568 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
569 | .the_end:
|
---|
570 | ret
|
---|
571 |
|
---|
572 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
573 | ALIGNCODE(16)
|
---|
574 | BITS 64
|
---|
575 | .sixtyfourbit_mode:
|
---|
576 | and edx, 0ffffffffh
|
---|
577 | and ecx, 0ffffffffh
|
---|
578 | xor eax, eax
|
---|
579 | vmread r10, rcx
|
---|
580 | mov [rdx], r10d
|
---|
581 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
582 | cmovz eax, r8d
|
---|
583 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
584 | cmovc eax, r9d
|
---|
585 | jmp far [.fpret wrt rip]
|
---|
586 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
587 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
588 | BITS 32
|
---|
589 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
590 | ENDPROC VMXReadVmcs32
|
---|
591 |
|
---|
592 |
|
---|
593 | ;/**
|
---|
594 | ; * Executes VMWRITE, 32-bit value.
|
---|
595 | ; *
|
---|
596 | ; * @returns VBox status code
|
---|
597 | ; * @param idxField VMCS index
|
---|
598 | ; * @param u32Data Ptr to store VM field value
|
---|
599 | ; */
|
---|
600 | ;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
|
---|
601 | ALIGNCODE(16)
|
---|
602 | BEGINPROC VMXWriteVmcs32
|
---|
603 | %ifdef RT_ARCH_AMD64
|
---|
604 | %ifdef ASM_CALL64_GCC
|
---|
605 | and edi, 0ffffffffh
|
---|
606 | and esi, 0ffffffffh
|
---|
607 | xor rax, rax
|
---|
608 | vmwrite rdi, rsi
|
---|
609 | %else
|
---|
610 | and ecx, 0ffffffffh
|
---|
611 | and edx, 0ffffffffh
|
---|
612 | xor rax, rax
|
---|
613 | vmwrite rcx, rdx
|
---|
614 | %endif
|
---|
615 | %else ; RT_ARCH_X86
|
---|
616 | mov ecx, [esp + 4] ; idxField
|
---|
617 | mov edx, [esp + 8] ; u32Data
|
---|
618 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
619 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
620 | jz .legacy_mode
|
---|
621 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
622 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
623 | .legacy_mode:
|
---|
624 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
625 | xor eax, eax
|
---|
626 | vmwrite ecx, edx
|
---|
627 | %endif ; RT_ARCH_X86
|
---|
628 | jnc .valid_vmcs
|
---|
629 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
630 | ret
|
---|
631 | .valid_vmcs:
|
---|
632 | jnz .the_end
|
---|
633 | mov eax, VERR_VMX_INVALID_VMCS_FIELD
|
---|
634 | .the_end:
|
---|
635 | ret
|
---|
636 |
|
---|
637 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
638 | ALIGNCODE(16)
|
---|
639 | BITS 64
|
---|
640 | .sixtyfourbit_mode:
|
---|
641 | and edx, 0ffffffffh
|
---|
642 | and ecx, 0ffffffffh
|
---|
643 | xor eax, eax
|
---|
644 | vmwrite rcx, rdx
|
---|
645 | mov r8d, VERR_VMX_INVALID_VMCS_FIELD
|
---|
646 | cmovz eax, r8d
|
---|
647 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
648 | cmovc eax, r9d
|
---|
649 | jmp far [.fpret wrt rip]
|
---|
650 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
651 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
652 | BITS 32
|
---|
653 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
654 | ENDPROC VMXWriteVmcs32
|
---|
655 |
|
---|
656 |
|
---|
657 | ;/**
|
---|
658 | ; * Executes VMXON
|
---|
659 | ; *
|
---|
660 | ; * @returns VBox status code
|
---|
661 | ; * @param HCPhysVMXOn Physical address of VMXON structure
|
---|
662 | ; */
|
---|
663 | ;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
|
---|
664 | BEGINPROC VMXEnable
|
---|
665 | %ifdef RT_ARCH_AMD64
|
---|
666 | xor rax, rax
|
---|
667 | %ifdef ASM_CALL64_GCC
|
---|
668 | push rdi
|
---|
669 | %else
|
---|
670 | push rcx
|
---|
671 | %endif
|
---|
672 | vmxon [rsp]
|
---|
673 | %else ; RT_ARCH_X86
|
---|
674 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
675 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
676 | jz .legacy_mode
|
---|
677 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
678 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
679 | .legacy_mode:
|
---|
680 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
681 | xor eax, eax
|
---|
682 | vmxon [esp + 4]
|
---|
683 | %endif ; RT_ARCH_X86
|
---|
684 | jnc .good
|
---|
685 | mov eax, VERR_VMX_INVALID_VMXON_PTR
|
---|
686 | jmp .the_end
|
---|
687 |
|
---|
688 | .good:
|
---|
689 | jnz .the_end
|
---|
690 | mov eax, VERR_VMX_VMXON_FAILED
|
---|
691 |
|
---|
692 | .the_end:
|
---|
693 | %ifdef RT_ARCH_AMD64
|
---|
694 | add rsp, 8
|
---|
695 | %endif
|
---|
696 | ret
|
---|
697 |
|
---|
698 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
699 | ALIGNCODE(16)
|
---|
700 | BITS 64
|
---|
701 | .sixtyfourbit_mode:
|
---|
702 | lea rdx, [rsp + 4] ; &HCPhysVMXOn.
|
---|
703 | and edx, 0ffffffffh
|
---|
704 | xor eax, eax
|
---|
705 | vmxon [rdx]
|
---|
706 | mov r8d, VERR_VMX_VMXON_FAILED
|
---|
707 | cmovz eax, r8d
|
---|
708 | mov r9d, VERR_VMX_INVALID_VMXON_PTR
|
---|
709 | cmovc eax, r9d
|
---|
710 | jmp far [.fpret wrt rip]
|
---|
711 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
712 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
713 | BITS 32
|
---|
714 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
715 | ENDPROC VMXEnable
|
---|
716 |
|
---|
717 |
|
---|
718 | ;/**
|
---|
719 | ; * Executes VMXOFF
|
---|
720 | ; */
|
---|
721 | ;DECLASM(void) VMXDisable(void);
|
---|
722 | BEGINPROC VMXDisable
|
---|
723 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
724 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
725 | jz .legacy_mode
|
---|
726 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
727 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
728 | .legacy_mode:
|
---|
729 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
730 | vmxoff
|
---|
731 | .the_end:
|
---|
732 | ret
|
---|
733 |
|
---|
734 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
735 | ALIGNCODE(16)
|
---|
736 | BITS 64
|
---|
737 | .sixtyfourbit_mode:
|
---|
738 | vmxoff
|
---|
739 | jmp far [.fpret wrt rip]
|
---|
740 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
741 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
742 | BITS 32
|
---|
743 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
744 | ENDPROC VMXDisable
|
---|
745 |
|
---|
746 |
|
---|
747 | ;/**
|
---|
748 | ; * Executes VMCLEAR
|
---|
749 | ; *
|
---|
750 | ; * @returns VBox status code
|
---|
751 | ; * @param HCPhysVmcs Physical address of VM control structure
|
---|
752 | ; */
|
---|
753 | ;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
|
---|
754 | ALIGNCODE(16)
|
---|
755 | BEGINPROC VMXClearVmcs
|
---|
756 | %ifdef RT_ARCH_AMD64
|
---|
757 | xor rax, rax
|
---|
758 | %ifdef ASM_CALL64_GCC
|
---|
759 | push rdi
|
---|
760 | %else
|
---|
761 | push rcx
|
---|
762 | %endif
|
---|
763 | vmclear [rsp]
|
---|
764 | %else ; RT_ARCH_X86
|
---|
765 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
766 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
767 | jz .legacy_mode
|
---|
768 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
769 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
770 | .legacy_mode:
|
---|
771 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
772 | xor eax, eax
|
---|
773 | vmclear [esp + 4]
|
---|
774 | %endif ; RT_ARCH_X86
|
---|
775 | jnc .the_end
|
---|
776 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
777 | .the_end:
|
---|
778 | %ifdef RT_ARCH_AMD64
|
---|
779 | add rsp, 8
|
---|
780 | %endif
|
---|
781 | ret
|
---|
782 |
|
---|
783 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
784 | ALIGNCODE(16)
|
---|
785 | BITS 64
|
---|
786 | .sixtyfourbit_mode:
|
---|
787 | lea rdx, [rsp + 4] ; &HCPhysVmcs
|
---|
788 | and edx, 0ffffffffh
|
---|
789 | xor eax, eax
|
---|
790 | vmclear [rdx]
|
---|
791 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
792 | cmovc eax, r9d
|
---|
793 | jmp far [.fpret wrt rip]
|
---|
794 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
795 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
796 | BITS 32
|
---|
797 | %endif
|
---|
798 | ENDPROC VMXClearVmcs
|
---|
799 |
|
---|
800 |
|
---|
801 | ;/**
|
---|
802 | ; * Executes VMPTRLD
|
---|
803 | ; *
|
---|
804 | ; * @returns VBox status code
|
---|
805 | ; * @param HCPhysVmcs Physical address of VMCS structure
|
---|
806 | ; */
|
---|
807 | ;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
|
---|
808 | ALIGNCODE(16)
|
---|
809 | BEGINPROC VMXActivateVmcs
|
---|
810 | %ifdef RT_ARCH_AMD64
|
---|
811 | xor rax, rax
|
---|
812 | %ifdef ASM_CALL64_GCC
|
---|
813 | push rdi
|
---|
814 | %else
|
---|
815 | push rcx
|
---|
816 | %endif
|
---|
817 | vmptrld [rsp]
|
---|
818 | %else
|
---|
819 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
820 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
821 | jz .legacy_mode
|
---|
822 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
823 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
824 | .legacy_mode:
|
---|
825 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
826 | xor eax, eax
|
---|
827 | vmptrld [esp + 4]
|
---|
828 | %endif
|
---|
829 | jnc .the_end
|
---|
830 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
831 | .the_end:
|
---|
832 | %ifdef RT_ARCH_AMD64
|
---|
833 | add rsp, 8
|
---|
834 | %endif
|
---|
835 | ret
|
---|
836 |
|
---|
837 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
838 | ALIGNCODE(16)
|
---|
839 | BITS 64
|
---|
840 | .sixtyfourbit_mode:
|
---|
841 | lea rdx, [rsp + 4] ; &HCPhysVmcs
|
---|
842 | and edx, 0ffffffffh
|
---|
843 | xor eax, eax
|
---|
844 | vmptrld [rdx]
|
---|
845 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
846 | cmovc eax, r9d
|
---|
847 | jmp far [.fpret wrt rip]
|
---|
848 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
849 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
850 | BITS 32
|
---|
851 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
852 | ENDPROC VMXActivateVmcs
|
---|
853 |
|
---|
854 |
|
---|
855 | ;/**
|
---|
856 | ; * Executes VMPTRST
|
---|
857 | ; *
|
---|
858 | ; * @returns VBox status code
|
---|
859 | ; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
|
---|
860 | ; */
|
---|
861 | ;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
|
---|
862 | BEGINPROC VMXGetActivatedVmcs
|
---|
863 | %ifdef RT_OS_OS2
|
---|
864 | mov eax, VERR_NOT_SUPPORTED
|
---|
865 | ret
|
---|
866 | %else
|
---|
867 | %ifdef RT_ARCH_AMD64
|
---|
868 | %ifdef ASM_CALL64_GCC
|
---|
869 | vmptrst qword [rdi]
|
---|
870 | %else
|
---|
871 | vmptrst qword [rcx]
|
---|
872 | %endif
|
---|
873 | %else
|
---|
874 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
875 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
876 | jz .legacy_mode
|
---|
877 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
878 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
879 | .legacy_mode:
|
---|
880 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
881 | vmptrst qword [esp+04h]
|
---|
882 | %endif
|
---|
883 | xor eax, eax
|
---|
884 | .the_end:
|
---|
885 | ret
|
---|
886 |
|
---|
887 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
888 | ALIGNCODE(16)
|
---|
889 | BITS 64
|
---|
890 | .sixtyfourbit_mode:
|
---|
891 | lea rdx, [rsp + 4] ; &HCPhysVmcs
|
---|
892 | and edx, 0ffffffffh
|
---|
893 | vmptrst qword [rdx]
|
---|
894 | xor eax, eax
|
---|
895 | jmp far [.fpret wrt rip]
|
---|
896 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
897 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
898 | BITS 32
|
---|
899 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
900 | %endif
|
---|
901 | ENDPROC VMXGetActivatedVmcs
|
---|
902 |
|
---|
903 | ;/**
|
---|
904 | ; * Invalidate a page using invept
|
---|
905 | ; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
|
---|
906 | ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
|
---|
907 | ; */
|
---|
908 | ;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
|
---|
909 | BEGINPROC VMXR0InvEPT
|
---|
910 | %ifdef RT_ARCH_AMD64
|
---|
911 | %ifdef ASM_CALL64_GCC
|
---|
912 | and edi, 0ffffffffh
|
---|
913 | xor rax, rax
|
---|
914 | ; invept rdi, qword [rsi]
|
---|
915 | DB 0x66, 0x0F, 0x38, 0x80, 0x3E
|
---|
916 | %else
|
---|
917 | and ecx, 0ffffffffh
|
---|
918 | xor rax, rax
|
---|
919 | ; invept rcx, qword [rdx]
|
---|
920 | DB 0x66, 0x0F, 0x38, 0x80, 0xA
|
---|
921 | %endif
|
---|
922 | %else
|
---|
923 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
924 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
925 | jz .legacy_mode
|
---|
926 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
927 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
928 | .legacy_mode:
|
---|
929 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
930 | mov ecx, [esp + 4]
|
---|
931 | mov edx, [esp + 8]
|
---|
932 | xor eax, eax
|
---|
933 | ; invept ecx, qword [edx]
|
---|
934 | DB 0x66, 0x0F, 0x38, 0x80, 0xA
|
---|
935 | %endif
|
---|
936 | jnc .valid_vmcs
|
---|
937 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
938 | ret
|
---|
939 | .valid_vmcs:
|
---|
940 | jnz .the_end
|
---|
941 | mov eax, VERR_INVALID_PARAMETER
|
---|
942 | .the_end:
|
---|
943 | ret
|
---|
944 |
|
---|
945 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
946 | ALIGNCODE(16)
|
---|
947 | BITS 64
|
---|
948 | .sixtyfourbit_mode:
|
---|
949 | and esp, 0ffffffffh
|
---|
950 | mov ecx, [rsp + 4] ; enmFlush
|
---|
951 | mov edx, [rsp + 8] ; pDescriptor
|
---|
952 | xor eax, eax
|
---|
953 | ; invept rcx, qword [rdx]
|
---|
954 | DB 0x66, 0x0F, 0x38, 0x80, 0xA
|
---|
955 | mov r8d, VERR_INVALID_PARAMETER
|
---|
956 | cmovz eax, r8d
|
---|
957 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
958 | cmovc eax, r9d
|
---|
959 | jmp far [.fpret wrt rip]
|
---|
960 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
961 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
962 | BITS 32
|
---|
963 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
964 | ENDPROC VMXR0InvEPT
|
---|
965 |
|
---|
966 |
|
---|
967 | ;/**
|
---|
968 | ; * Invalidate a page using invvpid
|
---|
969 | ; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
|
---|
970 | ; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
|
---|
971 | ; */
|
---|
972 | ;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
|
---|
973 | BEGINPROC VMXR0InvVPID
|
---|
974 | %ifdef RT_ARCH_AMD64
|
---|
975 | %ifdef ASM_CALL64_GCC
|
---|
976 | and edi, 0ffffffffh
|
---|
977 | xor rax, rax
|
---|
978 | ; invvpid rdi, qword [rsi]
|
---|
979 | DB 0x66, 0x0F, 0x38, 0x81, 0x3E
|
---|
980 | %else
|
---|
981 | and ecx, 0ffffffffh
|
---|
982 | xor rax, rax
|
---|
983 | ; invvpid rcx, qword [rdx]
|
---|
984 | DB 0x66, 0x0F, 0x38, 0x81, 0xA
|
---|
985 | %endif
|
---|
986 | %else
|
---|
987 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
988 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
989 | jz .legacy_mode
|
---|
990 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
991 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
992 | .legacy_mode:
|
---|
993 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
994 | mov ecx, [esp + 4]
|
---|
995 | mov edx, [esp + 8]
|
---|
996 | xor eax, eax
|
---|
997 | ; invvpid ecx, qword [edx]
|
---|
998 | DB 0x66, 0x0F, 0x38, 0x81, 0xA
|
---|
999 | %endif
|
---|
1000 | jnc .valid_vmcs
|
---|
1001 | mov eax, VERR_VMX_INVALID_VMCS_PTR
|
---|
1002 | ret
|
---|
1003 | .valid_vmcs:
|
---|
1004 | jnz .the_end
|
---|
1005 | mov eax, VERR_INVALID_PARAMETER
|
---|
1006 | .the_end:
|
---|
1007 | ret
|
---|
1008 |
|
---|
1009 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1010 | ALIGNCODE(16)
|
---|
1011 | BITS 64
|
---|
1012 | .sixtyfourbit_mode:
|
---|
1013 | and esp, 0ffffffffh
|
---|
1014 | mov ecx, [rsp + 4] ; enmFlush
|
---|
1015 | mov edx, [rsp + 8] ; pDescriptor
|
---|
1016 | xor eax, eax
|
---|
1017 | ; invvpid rcx, qword [rdx]
|
---|
1018 | DB 0x66, 0x0F, 0x38, 0x81, 0xA
|
---|
1019 | mov r8d, VERR_INVALID_PARAMETER
|
---|
1020 | cmovz eax, r8d
|
---|
1021 | mov r9d, VERR_VMX_INVALID_VMCS_PTR
|
---|
1022 | cmovc eax, r9d
|
---|
1023 | jmp far [.fpret wrt rip]
|
---|
1024 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
1025 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
1026 | BITS 32
|
---|
1027 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1028 | ENDPROC VMXR0InvVPID
|
---|
1029 |
|
---|
1030 |
|
---|
1031 | %if GC_ARCH_BITS == 64
|
---|
1032 | ;;
|
---|
1033 | ; Executes INVLPGA
|
---|
1034 | ;
|
---|
1035 | ; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
|
---|
1036 | ; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
|
---|
1037 | ;
|
---|
1038 | ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
|
---|
1039 | BEGINPROC SVMR0InvlpgA
|
---|
1040 | %ifdef RT_ARCH_AMD64
|
---|
1041 | %ifdef ASM_CALL64_GCC
|
---|
1042 | mov rax, rdi
|
---|
1043 | mov rcx, rsi
|
---|
1044 | %else
|
---|
1045 | mov rax, rcx
|
---|
1046 | mov rcx, rdx
|
---|
1047 | %endif
|
---|
1048 | %else
|
---|
1049 | mov eax, [esp + 4]
|
---|
1050 | mov ecx, [esp + 0Ch]
|
---|
1051 | %endif
|
---|
1052 | invlpga [xAX], ecx
|
---|
1053 | ret
|
---|
1054 | ENDPROC SVMR0InvlpgA
|
---|
1055 |
|
---|
1056 | %else ; GC_ARCH_BITS != 64
|
---|
1057 | ;;
|
---|
1058 | ; Executes INVLPGA
|
---|
1059 | ;
|
---|
1060 | ; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
|
---|
1061 | ; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
|
---|
1062 | ;
|
---|
1063 | ;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
|
---|
1064 | BEGINPROC SVMR0InvlpgA
|
---|
1065 | %ifdef RT_ARCH_AMD64
|
---|
1066 | %ifdef ASM_CALL64_GCC
|
---|
1067 | movzx rax, edi
|
---|
1068 | mov ecx, esi
|
---|
1069 | %else
|
---|
1070 | ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
|
---|
1071 | ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
|
---|
1072 | ; values also set the upper 32 bits of the register to zero. Consequently
|
---|
1073 | ; there is no need for an instruction movzlq.''
|
---|
1074 | mov eax, ecx
|
---|
1075 | mov ecx, edx
|
---|
1076 | %endif
|
---|
1077 | %else
|
---|
1078 | mov eax, [esp + 4]
|
---|
1079 | mov ecx, [esp + 8]
|
---|
1080 | %endif
|
---|
1081 | invlpga [xAX], ecx
|
---|
1082 | ret
|
---|
1083 | ENDPROC SVMR0InvlpgA
|
---|
1084 |
|
---|
1085 | %endif ; GC_ARCH_BITS != 64
|
---|
1086 |
|
---|
1087 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1088 |
|
---|
1089 | ;/**
|
---|
1090 | ; * Gets 64-bit GDTR and IDTR on darwin.
|
---|
1091 | ; * @param pGdtr Where to store the 64-bit GDTR.
|
---|
1092 | ; * @param pIdtr Where to store the 64-bit IDTR.
|
---|
1093 | ; */
|
---|
1094 | ;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
|
---|
1095 | ALIGNCODE(16)
|
---|
1096 | BEGINPROC HMR0Get64bitGdtrAndIdtr
|
---|
1097 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
1098 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
1099 | .the_end:
|
---|
1100 | ret
|
---|
1101 |
|
---|
1102 | ALIGNCODE(16)
|
---|
1103 | BITS 64
|
---|
1104 | .sixtyfourbit_mode:
|
---|
1105 | and esp, 0ffffffffh
|
---|
1106 | mov ecx, [rsp + 4] ; pGdtr
|
---|
1107 | mov edx, [rsp + 8] ; pIdtr
|
---|
1108 | sgdt [rcx]
|
---|
1109 | sidt [rdx]
|
---|
1110 | jmp far [.fpret wrt rip]
|
---|
1111 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
1112 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
1113 | BITS 32
|
---|
1114 | ENDPROC HMR0Get64bitGdtrAndIdtr
|
---|
1115 |
|
---|
1116 |
|
---|
1117 | ;/**
|
---|
1118 | ; * Gets 64-bit CR3 on darwin.
|
---|
1119 | ; * @returns CR3
|
---|
1120 | ; */
|
---|
1121 | ;DECLASM(uint64_t) HMR0Get64bitCR3(void);
|
---|
1122 | ALIGNCODE(16)
|
---|
1123 | BEGINPROC HMR0Get64bitCR3
|
---|
1124 | db 0xea ; jmp far .sixtyfourbit_mode
|
---|
1125 | dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
|
---|
1126 | .the_end:
|
---|
1127 | ret
|
---|
1128 |
|
---|
1129 | ALIGNCODE(16)
|
---|
1130 | BITS 64
|
---|
1131 | .sixtyfourbit_mode:
|
---|
1132 | mov rax, cr3
|
---|
1133 | mov rdx, rax
|
---|
1134 | shr rdx, 32
|
---|
1135 | jmp far [.fpret wrt rip]
|
---|
1136 | .fpret: ; 16:32 Pointer to .the_end.
|
---|
1137 | dd .the_end, NAME(SUPR0AbsKernelCS)
|
---|
1138 | BITS 32
|
---|
1139 | ENDPROC HMR0Get64bitCR3
|
---|
1140 |
|
---|
1141 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1142 |
|
---|
1143 | %ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
1144 |
|
---|
1145 | ;;
|
---|
1146 | ; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
|
---|
1147 | ; load the guest ones when necessary.
|
---|
1148 | ;
|
---|
1149 | ; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
|
---|
1150 | ;
|
---|
1151 | ; @returns eax
|
---|
1152 | ;
|
---|
1153 | ; @param fResumeVM msc:rcx
|
---|
1154 | ; @param pCtx msc:rdx
|
---|
1155 | ; @param pVMCSCache msc:r8
|
---|
1156 | ; @param pVM msc:r9
|
---|
1157 | ; @param pVCpu msc:[rbp+30h]
|
---|
1158 | ; @param pfnStartVM msc:[rbp+38h]
|
---|
1159 | ;
|
---|
1160 | ; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
|
---|
1161 | ;
|
---|
1162 | ; ASSUMING 64-bit and windows for now.
|
---|
1163 | ALIGNCODE(16)
|
---|
1164 | BEGINPROC HMR0VMXStartVMWrapXMM
|
---|
1165 | push xBP
|
---|
1166 | mov xBP, xSP
|
---|
1167 | sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
|
---|
1168 |
|
---|
1169 | ; spill input parameters.
|
---|
1170 | mov [xBP + 010h], rcx ; fResumeVM
|
---|
1171 | mov [xBP + 018h], rdx ; pCtx
|
---|
1172 | mov [xBP + 020h], r8 ; pVMCSCache
|
---|
1173 | mov [xBP + 028h], r9 ; pVM
|
---|
1174 |
|
---|
1175 | ; Ask CPUM whether we've started using the FPU yet.
|
---|
1176 | mov rcx, [xBP + 30h] ; pVCpu
|
---|
1177 | call NAME(CPUMIsGuestFPUStateActive)
|
---|
1178 | test al, al
|
---|
1179 | jnz .guest_fpu_state_active
|
---|
1180 |
|
---|
1181 | ; No need to mess with XMM registers just call the start routine and return.
|
---|
1182 | mov r11, [xBP + 38h] ; pfnStartVM
|
---|
1183 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1184 | mov [xSP + 020h], r10
|
---|
1185 | mov rcx, [xBP + 010h] ; fResumeVM
|
---|
1186 | mov rdx, [xBP + 018h] ; pCtx
|
---|
1187 | mov r8, [xBP + 020h] ; pVMCSCache
|
---|
1188 | mov r9, [xBP + 028h] ; pVM
|
---|
1189 | call r11
|
---|
1190 |
|
---|
1191 | leave
|
---|
1192 | ret
|
---|
1193 |
|
---|
1194 | ALIGNCODE(8)
|
---|
1195 | .guest_fpu_state_active:
|
---|
1196 | ; Save the host XMM registers.
|
---|
1197 | movdqa [rsp + 040h + 000h], xmm6
|
---|
1198 | movdqa [rsp + 040h + 010h], xmm7
|
---|
1199 | movdqa [rsp + 040h + 020h], xmm8
|
---|
1200 | movdqa [rsp + 040h + 030h], xmm9
|
---|
1201 | movdqa [rsp + 040h + 040h], xmm10
|
---|
1202 | movdqa [rsp + 040h + 050h], xmm11
|
---|
1203 | movdqa [rsp + 040h + 060h], xmm12
|
---|
1204 | movdqa [rsp + 040h + 070h], xmm13
|
---|
1205 | movdqa [rsp + 040h + 080h], xmm14
|
---|
1206 | movdqa [rsp + 040h + 090h], xmm15
|
---|
1207 |
|
---|
1208 | ; Load the full guest XMM register state.
|
---|
1209 | mov r10, [xBP + 018h] ; pCtx
|
---|
1210 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1211 | movdqa xmm0, [r10 + 000h]
|
---|
1212 | movdqa xmm1, [r10 + 010h]
|
---|
1213 | movdqa xmm2, [r10 + 020h]
|
---|
1214 | movdqa xmm3, [r10 + 030h]
|
---|
1215 | movdqa xmm4, [r10 + 040h]
|
---|
1216 | movdqa xmm5, [r10 + 050h]
|
---|
1217 | movdqa xmm6, [r10 + 060h]
|
---|
1218 | movdqa xmm7, [r10 + 070h]
|
---|
1219 | movdqa xmm8, [r10 + 080h]
|
---|
1220 | movdqa xmm9, [r10 + 090h]
|
---|
1221 | movdqa xmm10, [r10 + 0a0h]
|
---|
1222 | movdqa xmm11, [r10 + 0b0h]
|
---|
1223 | movdqa xmm12, [r10 + 0c0h]
|
---|
1224 | movdqa xmm13, [r10 + 0d0h]
|
---|
1225 | movdqa xmm14, [r10 + 0e0h]
|
---|
1226 | movdqa xmm15, [r10 + 0f0h]
|
---|
1227 |
|
---|
1228 | ; Make the call (same as in the other case ).
|
---|
1229 | mov r11, [xBP + 38h] ; pfnStartVM
|
---|
1230 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1231 | mov [xSP + 020h], r10
|
---|
1232 | mov rcx, [xBP + 010h] ; fResumeVM
|
---|
1233 | mov rdx, [xBP + 018h] ; pCtx
|
---|
1234 | mov r8, [xBP + 020h] ; pVMCSCache
|
---|
1235 | mov r9, [xBP + 028h] ; pVM
|
---|
1236 | call r11
|
---|
1237 |
|
---|
1238 | ; Save the guest XMM registers.
|
---|
1239 | mov r10, [xBP + 018h] ; pCtx
|
---|
1240 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1241 | movdqa [r10 + 000h], xmm0
|
---|
1242 | movdqa [r10 + 010h], xmm1
|
---|
1243 | movdqa [r10 + 020h], xmm2
|
---|
1244 | movdqa [r10 + 030h], xmm3
|
---|
1245 | movdqa [r10 + 040h], xmm4
|
---|
1246 | movdqa [r10 + 050h], xmm5
|
---|
1247 | movdqa [r10 + 060h], xmm6
|
---|
1248 | movdqa [r10 + 070h], xmm7
|
---|
1249 | movdqa [r10 + 080h], xmm8
|
---|
1250 | movdqa [r10 + 090h], xmm9
|
---|
1251 | movdqa [r10 + 0a0h], xmm10
|
---|
1252 | movdqa [r10 + 0b0h], xmm11
|
---|
1253 | movdqa [r10 + 0c0h], xmm12
|
---|
1254 | movdqa [r10 + 0d0h], xmm13
|
---|
1255 | movdqa [r10 + 0e0h], xmm14
|
---|
1256 | movdqa [r10 + 0f0h], xmm15
|
---|
1257 |
|
---|
1258 | ; Load the host XMM registers.
|
---|
1259 | movdqa xmm6, [rsp + 040h + 000h]
|
---|
1260 | movdqa xmm7, [rsp + 040h + 010h]
|
---|
1261 | movdqa xmm8, [rsp + 040h + 020h]
|
---|
1262 | movdqa xmm9, [rsp + 040h + 030h]
|
---|
1263 | movdqa xmm10, [rsp + 040h + 040h]
|
---|
1264 | movdqa xmm11, [rsp + 040h + 050h]
|
---|
1265 | movdqa xmm12, [rsp + 040h + 060h]
|
---|
1266 | movdqa xmm13, [rsp + 040h + 070h]
|
---|
1267 | movdqa xmm14, [rsp + 040h + 080h]
|
---|
1268 | movdqa xmm15, [rsp + 040h + 090h]
|
---|
1269 | leave
|
---|
1270 | ret
|
---|
1271 | ENDPROC HMR0VMXStartVMWrapXMM
|
---|
1272 |
|
---|
1273 | ;;
|
---|
1274 | ; Wrapper around svm.pfnVMRun that preserves host XMM registers and
|
---|
1275 | ; load the guest ones when necessary.
|
---|
1276 | ;
|
---|
1277 | ; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
|
---|
1278 | ;
|
---|
1279 | ; @returns eax
|
---|
1280 | ;
|
---|
1281 | ; @param pVMCBHostPhys msc:rcx
|
---|
1282 | ; @param pVMCBPhys msc:rdx
|
---|
1283 | ; @param pCtx msc:r8
|
---|
1284 | ; @param pVM msc:r9
|
---|
1285 | ; @param pVCpu msc:[rbp+30h]
|
---|
1286 | ; @param pfnVMRun msc:[rbp+38h]
|
---|
1287 | ;
|
---|
1288 | ; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
|
---|
1289 | ;
|
---|
1290 | ; ASSUMING 64-bit and windows for now.
|
---|
1291 | ALIGNCODE(16)
|
---|
1292 | BEGINPROC HMR0SVMRunWrapXMM
|
---|
1293 | push xBP
|
---|
1294 | mov xBP, xSP
|
---|
1295 | sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
|
---|
1296 |
|
---|
1297 | ; spill input parameters.
|
---|
1298 | mov [xBP + 010h], rcx ; pVMCBHostPhys
|
---|
1299 | mov [xBP + 018h], rdx ; pVMCBPhys
|
---|
1300 | mov [xBP + 020h], r8 ; pCtx
|
---|
1301 | mov [xBP + 028h], r9 ; pVM
|
---|
1302 |
|
---|
1303 | ; Ask CPUM whether we've started using the FPU yet.
|
---|
1304 | mov rcx, [xBP + 30h] ; pVCpu
|
---|
1305 | call NAME(CPUMIsGuestFPUStateActive)
|
---|
1306 | test al, al
|
---|
1307 | jnz .guest_fpu_state_active
|
---|
1308 |
|
---|
1309 | ; No need to mess with XMM registers just call the start routine and return.
|
---|
1310 | mov r11, [xBP + 38h] ; pfnVMRun
|
---|
1311 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1312 | mov [xSP + 020h], r10
|
---|
1313 | mov rcx, [xBP + 010h] ; pVMCBHostPhys
|
---|
1314 | mov rdx, [xBP + 018h] ; pVMCBPhys
|
---|
1315 | mov r8, [xBP + 020h] ; pCtx
|
---|
1316 | mov r9, [xBP + 028h] ; pVM
|
---|
1317 | call r11
|
---|
1318 |
|
---|
1319 | leave
|
---|
1320 | ret
|
---|
1321 |
|
---|
1322 | ALIGNCODE(8)
|
---|
1323 | .guest_fpu_state_active:
|
---|
1324 | ; Save the host XMM registers.
|
---|
1325 | movdqa [rsp + 040h + 000h], xmm6
|
---|
1326 | movdqa [rsp + 040h + 010h], xmm7
|
---|
1327 | movdqa [rsp + 040h + 020h], xmm8
|
---|
1328 | movdqa [rsp + 040h + 030h], xmm9
|
---|
1329 | movdqa [rsp + 040h + 040h], xmm10
|
---|
1330 | movdqa [rsp + 040h + 050h], xmm11
|
---|
1331 | movdqa [rsp + 040h + 060h], xmm12
|
---|
1332 | movdqa [rsp + 040h + 070h], xmm13
|
---|
1333 | movdqa [rsp + 040h + 080h], xmm14
|
---|
1334 | movdqa [rsp + 040h + 090h], xmm15
|
---|
1335 |
|
---|
1336 | ; Load the full guest XMM register state.
|
---|
1337 | mov r10, [xBP + 020h] ; pCtx
|
---|
1338 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1339 | movdqa xmm0, [r10 + 000h]
|
---|
1340 | movdqa xmm1, [r10 + 010h]
|
---|
1341 | movdqa xmm2, [r10 + 020h]
|
---|
1342 | movdqa xmm3, [r10 + 030h]
|
---|
1343 | movdqa xmm4, [r10 + 040h]
|
---|
1344 | movdqa xmm5, [r10 + 050h]
|
---|
1345 | movdqa xmm6, [r10 + 060h]
|
---|
1346 | movdqa xmm7, [r10 + 070h]
|
---|
1347 | movdqa xmm8, [r10 + 080h]
|
---|
1348 | movdqa xmm9, [r10 + 090h]
|
---|
1349 | movdqa xmm10, [r10 + 0a0h]
|
---|
1350 | movdqa xmm11, [r10 + 0b0h]
|
---|
1351 | movdqa xmm12, [r10 + 0c0h]
|
---|
1352 | movdqa xmm13, [r10 + 0d0h]
|
---|
1353 | movdqa xmm14, [r10 + 0e0h]
|
---|
1354 | movdqa xmm15, [r10 + 0f0h]
|
---|
1355 |
|
---|
1356 | ; Make the call (same as in the other case ).
|
---|
1357 | mov r11, [xBP + 38h] ; pfnVMRun
|
---|
1358 | mov r10, [xBP + 30h] ; pVCpu
|
---|
1359 | mov [xSP + 020h], r10
|
---|
1360 | mov rcx, [xBP + 010h] ; pVMCBHostPhys
|
---|
1361 | mov rdx, [xBP + 018h] ; pVMCBPhys
|
---|
1362 | mov r8, [xBP + 020h] ; pCtx
|
---|
1363 | mov r9, [xBP + 028h] ; pVM
|
---|
1364 | call r11
|
---|
1365 |
|
---|
1366 | ; Save the guest XMM registers.
|
---|
1367 | mov r10, [xBP + 020h] ; pCtx
|
---|
1368 | lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
|
---|
1369 | movdqa [r10 + 000h], xmm0
|
---|
1370 | movdqa [r10 + 010h], xmm1
|
---|
1371 | movdqa [r10 + 020h], xmm2
|
---|
1372 | movdqa [r10 + 030h], xmm3
|
---|
1373 | movdqa [r10 + 040h], xmm4
|
---|
1374 | movdqa [r10 + 050h], xmm5
|
---|
1375 | movdqa [r10 + 060h], xmm6
|
---|
1376 | movdqa [r10 + 070h], xmm7
|
---|
1377 | movdqa [r10 + 080h], xmm8
|
---|
1378 | movdqa [r10 + 090h], xmm9
|
---|
1379 | movdqa [r10 + 0a0h], xmm10
|
---|
1380 | movdqa [r10 + 0b0h], xmm11
|
---|
1381 | movdqa [r10 + 0c0h], xmm12
|
---|
1382 | movdqa [r10 + 0d0h], xmm13
|
---|
1383 | movdqa [r10 + 0e0h], xmm14
|
---|
1384 | movdqa [r10 + 0f0h], xmm15
|
---|
1385 |
|
---|
1386 | ; Load the host XMM registers.
|
---|
1387 | movdqa xmm6, [rsp + 040h + 000h]
|
---|
1388 | movdqa xmm7, [rsp + 040h + 010h]
|
---|
1389 | movdqa xmm8, [rsp + 040h + 020h]
|
---|
1390 | movdqa xmm9, [rsp + 040h + 030h]
|
---|
1391 | movdqa xmm10, [rsp + 040h + 040h]
|
---|
1392 | movdqa xmm11, [rsp + 040h + 050h]
|
---|
1393 | movdqa xmm12, [rsp + 040h + 060h]
|
---|
1394 | movdqa xmm13, [rsp + 040h + 070h]
|
---|
1395 | movdqa xmm14, [rsp + 040h + 080h]
|
---|
1396 | movdqa xmm15, [rsp + 040h + 090h]
|
---|
1397 | leave
|
---|
1398 | ret
|
---|
1399 | ENDPROC HMR0SVMRunWrapXMM
|
---|
1400 |
|
---|
1401 | %endif ; VBOX_WITH_KERNEL_USING_XMM
|
---|
1402 |
|
---|
1403 | ;
|
---|
1404 | ; The default setup of the StartVM routines.
|
---|
1405 | ;
|
---|
1406 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1407 | %define MY_NAME(name) name %+ _32
|
---|
1408 | %else
|
---|
1409 | %define MY_NAME(name) name
|
---|
1410 | %endif
|
---|
1411 | %ifdef RT_ARCH_AMD64
|
---|
1412 | %define MYPUSHAD MYPUSHAD64
|
---|
1413 | %define MYPOPAD MYPOPAD64
|
---|
1414 | %define MYPUSHSEGS MYPUSHSEGS64
|
---|
1415 | %define MYPOPSEGS MYPOPSEGS64
|
---|
1416 | %else
|
---|
1417 | %define MYPUSHAD MYPUSHAD32
|
---|
1418 | %define MYPOPAD MYPOPAD32
|
---|
1419 | %define MYPUSHSEGS MYPUSHSEGS32
|
---|
1420 | %define MYPOPSEGS MYPOPSEGS32
|
---|
1421 | %endif
|
---|
1422 |
|
---|
1423 | %include "HMR0Mixed.mac"
|
---|
1424 |
|
---|
1425 |
|
---|
1426 | %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1427 | ;
|
---|
1428 | ; Write the wrapper procedures.
|
---|
1429 | ;
|
---|
1430 | ; These routines are probably being too paranoid about selector
|
---|
1431 | ; restoring, but better safe than sorry...
|
---|
1432 | ;
|
---|
1433 |
|
---|
1434 | ; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1435 | ALIGNCODE(16)
|
---|
1436 | BEGINPROC VMXR0StartVM32
|
---|
1437 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1438 | je near NAME(VMXR0StartVM32_32)
|
---|
1439 |
|
---|
1440 | ; stack frame
|
---|
1441 | push esi
|
---|
1442 | push edi
|
---|
1443 | push fs
|
---|
1444 | push gs
|
---|
1445 |
|
---|
1446 | ; jmp far .thunk64
|
---|
1447 | db 0xea
|
---|
1448 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1449 |
|
---|
1450 | ALIGNCODE(16)
|
---|
1451 | BITS 64
|
---|
1452 | .thunk64:
|
---|
1453 | sub esp, 20h
|
---|
1454 | mov edi, [rsp + 20h + 14h] ; fResume
|
---|
1455 | mov esi, [rsp + 20h + 18h] ; pCtx
|
---|
1456 | mov edx, [rsp + 20h + 1Ch] ; pCache
|
---|
1457 | call NAME(VMXR0StartVM32_64)
|
---|
1458 | add esp, 20h
|
---|
1459 | jmp far [.fpthunk32 wrt rip]
|
---|
1460 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1461 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1462 |
|
---|
1463 | BITS 32
|
---|
1464 | ALIGNCODE(16)
|
---|
1465 | .thunk32:
|
---|
1466 | pop gs
|
---|
1467 | pop fs
|
---|
1468 | pop edi
|
---|
1469 | pop esi
|
---|
1470 | ret
|
---|
1471 | ENDPROC VMXR0StartVM32
|
---|
1472 |
|
---|
1473 |
|
---|
1474 | ; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1475 | ALIGNCODE(16)
|
---|
1476 | BEGINPROC VMXR0StartVM64
|
---|
1477 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1478 | je .not_in_long_mode
|
---|
1479 |
|
---|
1480 | ; stack frame
|
---|
1481 | push esi
|
---|
1482 | push edi
|
---|
1483 | push fs
|
---|
1484 | push gs
|
---|
1485 |
|
---|
1486 | ; jmp far .thunk64
|
---|
1487 | db 0xea
|
---|
1488 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1489 |
|
---|
1490 | ALIGNCODE(16)
|
---|
1491 | BITS 64
|
---|
1492 | .thunk64:
|
---|
1493 | sub esp, 20h
|
---|
1494 | mov edi, [rsp + 20h + 14h] ; fResume
|
---|
1495 | mov esi, [rsp + 20h + 18h] ; pCtx
|
---|
1496 | mov edx, [rsp + 20h + 1Ch] ; pCache
|
---|
1497 | call NAME(VMXR0StartVM64_64)
|
---|
1498 | add esp, 20h
|
---|
1499 | jmp far [.fpthunk32 wrt rip]
|
---|
1500 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1501 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1502 |
|
---|
1503 | BITS 32
|
---|
1504 | ALIGNCODE(16)
|
---|
1505 | .thunk32:
|
---|
1506 | pop gs
|
---|
1507 | pop fs
|
---|
1508 | pop edi
|
---|
1509 | pop esi
|
---|
1510 | ret
|
---|
1511 |
|
---|
1512 | .not_in_long_mode:
|
---|
1513 | mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
|
---|
1514 | ret
|
---|
1515 | ENDPROC VMXR0StartVM64
|
---|
1516 |
|
---|
1517 | ;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1518 | ALIGNCODE(16)
|
---|
1519 | BEGINPROC SVMR0VMRun
|
---|
1520 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1521 | je near NAME(SVMR0VMRun_32)
|
---|
1522 |
|
---|
1523 | ; stack frame
|
---|
1524 | push esi
|
---|
1525 | push edi
|
---|
1526 | push fs
|
---|
1527 | push gs
|
---|
1528 |
|
---|
1529 | ; jmp far .thunk64
|
---|
1530 | db 0xea
|
---|
1531 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1532 |
|
---|
1533 | ALIGNCODE(16)
|
---|
1534 | BITS 64
|
---|
1535 | .thunk64:
|
---|
1536 | sub esp, 20h
|
---|
1537 | mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
|
---|
1538 | mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
|
---|
1539 | mov edx, [rsp + 20h + 24h] ; pCtx
|
---|
1540 | call NAME(SVMR0VMRun_64)
|
---|
1541 | add esp, 20h
|
---|
1542 | jmp far [.fpthunk32 wrt rip]
|
---|
1543 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1544 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1545 |
|
---|
1546 | BITS 32
|
---|
1547 | ALIGNCODE(16)
|
---|
1548 | .thunk32:
|
---|
1549 | pop gs
|
---|
1550 | pop fs
|
---|
1551 | pop edi
|
---|
1552 | pop esi
|
---|
1553 | ret
|
---|
1554 | ENDPROC SVMR0VMRun
|
---|
1555 |
|
---|
1556 |
|
---|
1557 | ; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
|
---|
1558 | ALIGNCODE(16)
|
---|
1559 | BEGINPROC SVMR0VMRun64
|
---|
1560 | cmp byte [NAME(g_fVMXIs64bitHost)], 0
|
---|
1561 | je .not_in_long_mode
|
---|
1562 |
|
---|
1563 | ; stack frame
|
---|
1564 | push esi
|
---|
1565 | push edi
|
---|
1566 | push fs
|
---|
1567 | push gs
|
---|
1568 |
|
---|
1569 | ; jmp far .thunk64
|
---|
1570 | db 0xea
|
---|
1571 | dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
|
---|
1572 |
|
---|
1573 | ALIGNCODE(16)
|
---|
1574 | BITS 64
|
---|
1575 | .thunk64:
|
---|
1576 | sub esp, 20h
|
---|
1577 | mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
|
---|
1578 | mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
|
---|
1579 | mov edx, [rbp + 20h + 24h] ; pCtx
|
---|
1580 | call NAME(SVMR0VMRun64_64)
|
---|
1581 | add esp, 20h
|
---|
1582 | jmp far [.fpthunk32 wrt rip]
|
---|
1583 | .fpthunk32: ; 16:32 Pointer to .thunk32.
|
---|
1584 | dd .thunk32, NAME(SUPR0AbsKernelCS)
|
---|
1585 |
|
---|
1586 | BITS 32
|
---|
1587 | ALIGNCODE(16)
|
---|
1588 | .thunk32:
|
---|
1589 | pop gs
|
---|
1590 | pop fs
|
---|
1591 | pop edi
|
---|
1592 | pop esi
|
---|
1593 | ret
|
---|
1594 |
|
---|
1595 | .not_in_long_mode:
|
---|
1596 | mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
|
---|
1597 | ret
|
---|
1598 | ENDPROC SVMR0VMRun64
|
---|
1599 |
|
---|
1600 | ;
|
---|
1601 | ; Do it a second time pretending we're a 64-bit host.
|
---|
1602 | ;
|
---|
1603 | ; This *HAS* to be done at the very end of the file to avoid restoring
|
---|
1604 | ; macros. So, add new code *BEFORE* this mess.
|
---|
1605 | ;
|
---|
1606 | BITS 64
|
---|
1607 | %undef RT_ARCH_X86
|
---|
1608 | %define RT_ARCH_AMD64
|
---|
1609 | %undef ASM_CALL64_MSC
|
---|
1610 | %define ASM_CALL64_GCC
|
---|
1611 | %define xCB 8
|
---|
1612 | %define xSP rsp
|
---|
1613 | %define xBP rbp
|
---|
1614 | %define xAX rax
|
---|
1615 | %define xBX rbx
|
---|
1616 | %define xCX rcx
|
---|
1617 | %define xDX rdx
|
---|
1618 | %define xDI rdi
|
---|
1619 | %define xSI rsi
|
---|
1620 | %define MY_NAME(name) name %+ _64
|
---|
1621 | %define MYPUSHAD MYPUSHAD64
|
---|
1622 | %define MYPOPAD MYPOPAD64
|
---|
1623 | %define MYPUSHSEGS MYPUSHSEGS64
|
---|
1624 | %define MYPOPSEGS MYPOPSEGS64
|
---|
1625 |
|
---|
1626 | %include "HMR0Mixed.mac"
|
---|
1627 | %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
1628 |
|
---|