VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 49726

Last change on this file since 49726 was 49726, checked in by vboxsync, 11 years ago

VMM/HMR0Mixed: Comment.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.1 KB
Line 
1; $Id: HMR0Mixed.mac 49726 2013-11-29 14:10:53Z vboxsync $
2;; @file
3; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
4;
5; Darwin uses this to build two versions in the hybrid case.
6; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
7;
8
9;
10; Copyright (C) 2006-2013 Oracle Corporation
11;
12; This file is part of VirtualBox Open Source Edition (OSE), as
13; available from http://www.virtualbox.org. This file is free software;
14; you can redistribute it and/or modify it under the terms of the GNU
15; General Public License (GPL) as published by the Free Software
16; Foundation, in version 2 as it comes in the "COPYING" file of the
17; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19;
20
21%ifdef RT_ARCH_AMD64
22 ;;
23 ; Keep these macro definitions in this file as it gets included and compiled
24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
25 %define VMX_SKIP_GDTR
26 %ifdef RT_OS_DARWIN
27 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
28 ; See @bugref{6875}.
29 %elifdef RT_OS_WINDOWS
30 ; Windows 8.1 RTM also seems to be using the IDTR limit for something. See @bugref{6956}.
31 ;; @todo figure out what exactly it does and try and restrict it to specific Window versions.
32 %else
33 %define VMX_SKIP_IDTR
34 %endif
35 %define VMX_SKIP_TR
36%endif
37
38;; @def RESTORE_STATE_VM32
39; Macro restoring essential host state and updating guest state
40; for common host, 32-bit guest for VT-x.
41%macro RESTORE_STATE_VM32 0
42 ; Restore base and limit of the IDTR & GDTR.
43 %ifndef VMX_SKIP_IDTR
44 lidt [xSP]
45 add xSP, xCB * 2
46 %endif
47 %ifndef VMX_SKIP_GDTR
48 lgdt [xSP]
49 add xSP, xCB * 2
50 %endif
51
52 push xDI
53 %ifndef VMX_SKIP_TR
54 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
55 %else
56 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
57 %endif
58
59 mov [ss:xDI + CPUMCTX.eax], eax
60 mov [ss:xDI + CPUMCTX.ebx], ebx
61 mov [ss:xDI + CPUMCTX.ecx], ecx
62 mov [ss:xDI + CPUMCTX.edx], edx
63 mov [ss:xDI + CPUMCTX.esi], esi
64 mov [ss:xDI + CPUMCTX.ebp], ebp
65 mov xAX, cr2
66 mov [ss:xDI + CPUMCTX.cr2], xAX
67
68 %ifdef RT_ARCH_AMD64
69 pop xAX ; The guest edi we pushed above.
70 mov dword [ss:xDI + CPUMCTX.edi], eax
71 %else
72 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
73 %endif
74
75 %ifndef VMX_SKIP_TR
76 ; Restore TSS selector; must mark it as not busy before using ltr (!)
77 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
78 ; @todo get rid of sgdt
79 pop xBX ; Saved TR
80 sub xSP, xCB * 2
81 sgdt [xSP]
82 mov xAX, xBX
83 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
84 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
85 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
86 ltr bx
87 add xSP, xCB * 2
88 %endif
89
90 pop xAX ; Saved LDTR
91 %ifdef RT_ARCH_AMD64
92 cmp eax, 0
93 je %%skip_ldt_write32
94 %endif
95 lldt ax
96
97%%skip_ldt_write32:
98 add xSP, xCB ; pCtx
99
100 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
101 pop xDX ; Saved pCache
102
103 ; Note! If we get here as a result of invalid VMCS pointer, all the following
104 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
105 ; trouble only just less efficient.
106 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
107 cmp ecx, 0 ; Can't happen
108 je %%no_cached_read32
109 jmp %%cached_read32
110
111ALIGN(16)
112%%cached_read32:
113 dec xCX
114 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
115 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
116 cmp xCX, 0
117 jnz %%cached_read32
118%%no_cached_read32:
119 %endif
120
121 ; Restore segment registers.
122 MYPOPSEGS xAX, ax
123
124 ; Restore general purpose registers.
125 MYPOPAD
126%endmacro
127
128
129;/**
130; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
131; *
132; * @returns VBox status code
133; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
134; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
135; * @param pCache x86:[esp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
136; */
137ALIGNCODE(16)
138BEGINPROC MY_NAME(VMXR0StartVM32)
139 push xBP
140 mov xBP, xSP
141
142 pushf
143 cli
144
145 ; Save all general purpose host registers.
146 MYPUSHAD
147
148 ; First we have to save some final CPU context registers.
149 mov eax, VMX_VMCS_HOST_RIP
150%ifdef RT_ARCH_AMD64
151 lea r10, [.vmlaunch_done wrt rip]
152 vmwrite rax, r10
153%else
154 mov ecx, .vmlaunch_done
155 vmwrite eax, ecx
156%endif
157 ; Note: assumes success!
158
159 ; Save guest-CPU context pointer.
160%ifdef RT_ARCH_AMD64
161 %ifdef ASM_CALL64_GCC
162 ; fResume already in rdi
163 ; pCtx already in rsi
164 mov rbx, rdx ; pCache
165 %else
166 mov rdi, rcx ; fResume
167 mov rsi, rdx ; pCtx
168 mov rbx, r8 ; pCache
169 %endif
170%else
171 mov edi, [ebp + 8] ; fResume
172 mov esi, [ebp + 12] ; pCtx
173 mov ebx, [ebp + 16] ; pCache
174%endif
175
176 ; Save segment registers.
177 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
178 MYPUSHSEGS xAX, ax
179
180%ifdef VMX_USE_CACHED_VMCS_ACCESSES
181 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
182 cmp ecx, 0
183 je .no_cached_writes
184 mov edx, ecx
185 mov ecx, 0
186 jmp .cached_write
187
188ALIGN(16)
189.cached_write:
190 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
191 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
192 inc xCX
193 cmp xCX, xDX
194 jl .cached_write
195
196 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
197.no_cached_writes:
198
199 ; Save the pCache pointer.
200 push xBX
201%endif
202
203 ; Save the pCtx pointer.
204 push xSI
205
206 ; Save host LDTR.
207 xor eax, eax
208 sldt ax
209 push xAX
210
211%ifndef VMX_SKIP_TR
212 ; The host TR limit is reset to 0x67; save & restore it manually.
213 str eax
214 push xAX
215%endif
216
217%ifndef VMX_SKIP_GDTR
218 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
219 sub xSP, xCB * 2
220 sgdt [xSP]
221%endif
222%ifndef VMX_SKIP_IDTR
223 sub xSP, xCB * 2
224 sidt [xSP]
225%endif
226
227 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
228 mov xBX, [xSI + CPUMCTX.cr2]
229 mov xDX, cr2
230 cmp xBX, xDX
231 je .skip_cr2_write32
232 mov cr2, xBX
233
234.skip_cr2_write32:
235 mov eax, VMX_VMCS_HOST_RSP
236 vmwrite xAX, xSP
237 ; Note: assumes success!
238 ; Don't mess with ESP anymore!!!
239
240 ; Load guest general purpose registers.
241 mov eax, [xSI + CPUMCTX.eax]
242 mov ebx, [xSI + CPUMCTX.ebx]
243 mov ecx, [xSI + CPUMCTX.ecx]
244 mov edx, [xSI + CPUMCTX.edx]
245 mov ebp, [xSI + CPUMCTX.ebp]
246
247 ; Resume or start VM?
248 cmp xDI, 0 ; fResume
249 je .vmlaunch_launch
250
251 ; Load guest edi & esi.
252 mov edi, [xSI + CPUMCTX.edi]
253 mov esi, [xSI + CPUMCTX.esi]
254
255 vmresume
256 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
257
258.vmlaunch_launch:
259 ; Save guest edi & esi.
260 mov edi, [xSI + CPUMCTX.edi]
261 mov esi, [xSI + CPUMCTX.esi]
262
263 vmlaunch
264 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
265
266ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
267.vmlaunch_done:
268 jc near .vmxstart_invalid_vmcs_ptr
269 jz near .vmxstart_start_failed
270
271 RESTORE_STATE_VM32
272 mov eax, VINF_SUCCESS
273
274.vmstart_end:
275 popf
276 pop xBP
277 ret
278
279.vmxstart_invalid_vmcs_ptr:
280 RESTORE_STATE_VM32
281 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
282 jmp .vmstart_end
283
284.vmxstart_start_failed:
285 RESTORE_STATE_VM32
286 mov eax, VERR_VMX_UNABLE_TO_START_VM
287 jmp .vmstart_end
288
289ENDPROC MY_NAME(VMXR0StartVM32)
290
291
292%ifdef RT_ARCH_AMD64
293;; @def RESTORE_STATE_VM64
294; Macro restoring essential host state and updating guest state
295; for 64-bit host, 64-bit guest for VT-x.
296;
297%macro RESTORE_STATE_VM64 0
298 ; Restore base and limit of the IDTR & GDTR
299 %ifndef VMX_SKIP_IDTR
300 lidt [xSP]
301 add xSP, xCB * 2
302 %endif
303 %ifndef VMX_SKIP_GDTR
304 lgdt [xSP]
305 add xSP, xCB * 2
306 %endif
307
308 push xDI
309 %ifndef VMX_SKIP_TR
310 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
311 %else
312 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
313 %endif
314
315 mov qword [xDI + CPUMCTX.eax], rax
316 mov qword [xDI + CPUMCTX.ebx], rbx
317 mov qword [xDI + CPUMCTX.ecx], rcx
318 mov qword [xDI + CPUMCTX.edx], rdx
319 mov qword [xDI + CPUMCTX.esi], rsi
320 mov qword [xDI + CPUMCTX.ebp], rbp
321 mov qword [xDI + CPUMCTX.r8], r8
322 mov qword [xDI + CPUMCTX.r9], r9
323 mov qword [xDI + CPUMCTX.r10], r10
324 mov qword [xDI + CPUMCTX.r11], r11
325 mov qword [xDI + CPUMCTX.r12], r12
326 mov qword [xDI + CPUMCTX.r13], r13
327 mov qword [xDI + CPUMCTX.r14], r14
328 mov qword [xDI + CPUMCTX.r15], r15
329 mov rax, cr2
330 mov qword [xDI + CPUMCTX.cr2], rax
331
332 pop xAX ; The guest rdi we pushed above
333 mov qword [xDI + CPUMCTX.edi], rax
334
335 %ifndef VMX_SKIP_TR
336 ; Restore TSS selector; must mark it as not busy before using ltr (!)
337 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
338 ; @todo get rid of sgdt
339 pop xBX ; Saved TR
340 sub xSP, xCB * 2
341 sgdt [xSP]
342 mov xAX, xBX
343 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
344 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
345 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
346 ltr bx
347 add xSP, xCB * 2
348 %endif
349
350 pop xAX ; Saved LDTR
351 cmp eax, 0
352 je %%skip_ldt_write64
353 lldt ax
354
355%%skip_ldt_write64:
356 pop xSI ; pCtx (needed in rsi by the macros below)
357
358 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
359 pop xDX ; Saved pCache
360
361 ; Note! If we get here as a result of invalid VMCS pointer, all the following
362 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
363 ; trouble only just less efficient.
364 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
365 cmp ecx, 0 ; Can't happen
366 je %%no_cached_read64
367 jmp %%cached_read64
368
369ALIGN(16)
370%%cached_read64:
371 dec xCX
372 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
373 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
374 cmp xCX, 0
375 jnz %%cached_read64
376%%no_cached_read64:
377 %endif
378
379 ; Restore segment registers.
380 MYPOPSEGS xAX, ax
381
382 ; Restore general purpose registers.
383 MYPOPAD
384%endmacro
385
386
387;/**
388; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
389; *
390; * @returns VBox status code
391; * @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
392; * @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
393; * @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache.
394; */
395ALIGNCODE(16)
396BEGINPROC MY_NAME(VMXR0StartVM64)
397 push xBP
398 mov xBP, xSP
399
400 pushf
401 cli
402
403 ; Save all general purpose host registers.
404 MYPUSHAD
405
406 ; First we have to save some final CPU context registers.
407 lea r10, [.vmlaunch64_done wrt rip]
408 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
409 vmwrite rax, r10
410 ; Note: assumes success!
411
412 ; Save guest-CPU context pointer.
413%ifdef ASM_CALL64_GCC
414 ; fResume already in rdi
415 ; pCtx already in rsi
416 mov rbx, rdx ; pCache
417%else
418 mov rdi, rcx ; fResume
419 mov rsi, rdx ; pCtx
420 mov rbx, r8 ; pCache
421%endif
422
423 ; Save segment registers.
424 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
425 MYPUSHSEGS xAX, ax
426
427%ifdef VMX_USE_CACHED_VMCS_ACCESSES
428 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
429 cmp ecx, 0
430 je .no_cached_writes
431 mov edx, ecx
432 mov ecx, 0
433 jmp .cached_write
434
435ALIGN(16)
436.cached_write:
437 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
438 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
439 inc xCX
440 cmp xCX, xDX
441 jl .cached_write
442
443 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
444.no_cached_writes:
445
446 ; Save the pCache pointer.
447 push xBX
448%endif
449
450 ; Save the pCtx pointer.
451 push xSI
452
453 ; Save host LDTR.
454 xor eax, eax
455 sldt ax
456 push xAX
457
458%ifndef VMX_SKIP_TR
459 ; The host TR limit is reset to 0x67; save & restore it manually.
460 str eax
461 push xAX
462%endif
463
464%ifndef VMX_SKIP_GDTR
465 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
466 sub xSP, xCB * 2
467 sgdt [xSP]
468%endif
469%ifndef VMX_SKIP_IDTR
470 sub xSP, xCB * 2
471 sidt [xSP]
472%endif
473
474 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
475 mov rbx, qword [xSI + CPUMCTX.cr2]
476 mov rdx, cr2
477 cmp rbx, rdx
478 je .skip_cr2_write
479 mov cr2, rbx
480
481.skip_cr2_write:
482 mov eax, VMX_VMCS_HOST_RSP
483 vmwrite xAX, xSP
484 ; Note: assumes success!
485 ; Don't mess with ESP anymore!!!
486
487 ; Load guest general purpose registers.
488 mov rax, qword [xSI + CPUMCTX.eax]
489 mov rbx, qword [xSI + CPUMCTX.ebx]
490 mov rcx, qword [xSI + CPUMCTX.ecx]
491 mov rdx, qword [xSI + CPUMCTX.edx]
492 mov rbp, qword [xSI + CPUMCTX.ebp]
493 mov r8, qword [xSI + CPUMCTX.r8]
494 mov r9, qword [xSI + CPUMCTX.r9]
495 mov r10, qword [xSI + CPUMCTX.r10]
496 mov r11, qword [xSI + CPUMCTX.r11]
497 mov r12, qword [xSI + CPUMCTX.r12]
498 mov r13, qword [xSI + CPUMCTX.r13]
499 mov r14, qword [xSI + CPUMCTX.r14]
500 mov r15, qword [xSI + CPUMCTX.r15]
501
502 ; Resume or start VM?
503 cmp xDI, 0 ; fResume
504 je .vmlaunch64_launch
505
506 ; Load guest rdi & rsi.
507 mov rdi, qword [xSI + CPUMCTX.edi]
508 mov rsi, qword [xSI + CPUMCTX.esi]
509
510 vmresume
511 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
512
513.vmlaunch64_launch:
514 ; Save guest rdi & rsi.
515 mov rdi, qword [xSI + CPUMCTX.edi]
516 mov rsi, qword [xSI + CPUMCTX.esi]
517
518 vmlaunch
519 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
520
521ALIGNCODE(16)
522.vmlaunch64_done:
523 jc near .vmxstart64_invalid_vmcs_ptr
524 jz near .vmxstart64_start_failed
525
526 RESTORE_STATE_VM64
527 mov eax, VINF_SUCCESS
528
529.vmstart64_end:
530 popf
531 pop xBP
532 ret
533
534.vmxstart64_invalid_vmcs_ptr:
535 RESTORE_STATE_VM64
536 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
537 jmp .vmstart64_end
538
539.vmxstart64_start_failed:
540 RESTORE_STATE_VM64
541 mov eax, VERR_VMX_UNABLE_TO_START_VM
542 jmp .vmstart64_end
543ENDPROC MY_NAME(VMXR0StartVM64)
544%endif ; RT_ARCH_AMD64
545
546
547;/**
548; * Prepares for and executes VMRUN (32 bits guests)
549; *
550; * @returns VBox status code
551; * @param HCPhysVMCB Physical address of host VMCB.
552; * @param HCPhysVMCB Physical address of guest VMCB.
553; * @param pCtx Pointer to the guest CPU-context.
554; */
555ALIGNCODE(16)
556BEGINPROC MY_NAME(SVMR0VMRun)
557%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
558 %ifdef ASM_CALL64_GCC
559 push rdx
560 push rsi
561 push rdi
562 %else
563 push r8
564 push rdx
565 push rcx
566 %endif
567 push 0
568%endif
569 push xBP
570 mov xBP, xSP
571 pushf
572
573 ; Save all general purpose host registers.
574 MYPUSHAD
575
576 ; Save guest CPU-context pointer.
577 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
578 push xSI ; push for saving the state at the end
579
580 ; Save host fs, gs, sysenter msr etc.
581 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
582 push xAX ; save for the vmload after vmrun
583 vmsave
584
585 ; Setup eax for VMLOAD.
586 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
587
588 ; Load guest general purpose registers.
589 ; eax is loaded from the VMCB by VMRUN.
590 mov ebx, [xSI + CPUMCTX.ebx]
591 mov ecx, [xSI + CPUMCTX.ecx]
592 mov edx, [xSI + CPUMCTX.edx]
593 mov edi, [xSI + CPUMCTX.edi]
594 mov ebp, [xSI + CPUMCTX.ebp]
595 mov esi, [xSI + CPUMCTX.esi]
596
597 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
598 clgi
599 sti
600
601 ; Load guest fs, gs, sysenter msr etc.
602 vmload
603 ; Run the VM.
604 vmrun
605
606 ; eax is in the VMCB already; we can use it here.
607
608 ; Save guest fs, gs, sysenter msr etc.
609 vmsave
610
611 ; Load host fs, gs, sysenter msr etc.
612 pop xAX ; Pushed above
613 vmload
614
615 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
616 cli
617 stgi
618
619 pop xAX ; pCtx
620
621 mov [ss:xAX + CPUMCTX.ebx], ebx
622 mov [ss:xAX + CPUMCTX.ecx], ecx
623 mov [ss:xAX + CPUMCTX.edx], edx
624 mov [ss:xAX + CPUMCTX.esi], esi
625 mov [ss:xAX + CPUMCTX.edi], edi
626 mov [ss:xAX + CPUMCTX.ebp], ebp
627
628 ; Restore host general purpose registers.
629 MYPOPAD
630
631 mov eax, VINF_SUCCESS
632
633 popf
634 pop xBP
635%ifdef RT_ARCH_AMD64
636 add xSP, 4*xCB
637%endif
638 ret
639ENDPROC MY_NAME(SVMR0VMRun)
640
641%ifdef RT_ARCH_AMD64
642;/**
643; * Prepares for and executes VMRUN (64 bits guests)
644; *
645; * @returns VBox status code
646; * @param HCPhysVMCB Physical address of host VMCB.
647; * @param HCPhysVMCB Physical address of guest VMCB.
648; * @param pCtx Pointer to the guest-CPU context.
649; */
650ALIGNCODE(16)
651BEGINPROC MY_NAME(SVMR0VMRun64)
652 ; Fake a cdecl stack frame
653 %ifdef ASM_CALL64_GCC
654 push rdx
655 push rsi
656 push rdi
657 %else
658 push r8
659 push rdx
660 push rcx
661 %endif
662 push 0
663 push rbp
664 mov rbp, rsp
665 pushf
666
667 ; Manual save and restore:
668 ; - General purpose registers except RIP, RSP, RAX
669 ;
670 ; Trashed:
671 ; - CR2 (we don't care)
672 ; - LDTR (reset to 0)
673 ; - DRx (presumably not changed at all)
674 ; - DR7 (reset to 0x400)
675 ;
676
677 ; Save all general purpose host registers.
678 MYPUSHAD
679
680 ; Save guest CPU-context pointer.
681 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
682 push rsi ; push for saving the state at the end
683
684 ; Save host fs, gs, sysenter msr etc.
685 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
686 push rax ; Save for the vmload after vmrun
687 vmsave
688
689 ; Setup eax for VMLOAD.
690 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
691
692 ; Load guest general purpose registers.
693 ; rax is loaded from the VMCB by VMRUN.
694 mov rbx, qword [xSI + CPUMCTX.ebx]
695 mov rcx, qword [xSI + CPUMCTX.ecx]
696 mov rdx, qword [xSI + CPUMCTX.edx]
697 mov rdi, qword [xSI + CPUMCTX.edi]
698 mov rbp, qword [xSI + CPUMCTX.ebp]
699 mov r8, qword [xSI + CPUMCTX.r8]
700 mov r9, qword [xSI + CPUMCTX.r9]
701 mov r10, qword [xSI + CPUMCTX.r10]
702 mov r11, qword [xSI + CPUMCTX.r11]
703 mov r12, qword [xSI + CPUMCTX.r12]
704 mov r13, qword [xSI + CPUMCTX.r13]
705 mov r14, qword [xSI + CPUMCTX.r14]
706 mov r15, qword [xSI + CPUMCTX.r15]
707 mov rsi, qword [xSI + CPUMCTX.esi]
708
709 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
710 clgi
711 sti
712
713 ; Load guest fs, gs, sysenter msr etc.
714 vmload
715 ; Run the VM.
716 vmrun
717
718 ; rax is in the VMCB already; we can use it here.
719
720 ; Save guest fs, gs, sysenter msr etc.
721 vmsave
722
723 ; Load host fs, gs, sysenter msr etc.
724 pop rax ; pushed above
725 vmload
726
727 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
728 cli
729 stgi
730
731 pop rax ; pCtx
732
733 mov qword [rax + CPUMCTX.ebx], rbx
734 mov qword [rax + CPUMCTX.ecx], rcx
735 mov qword [rax + CPUMCTX.edx], rdx
736 mov qword [rax + CPUMCTX.esi], rsi
737 mov qword [rax + CPUMCTX.edi], rdi
738 mov qword [rax + CPUMCTX.ebp], rbp
739 mov qword [rax + CPUMCTX.r8], r8
740 mov qword [rax + CPUMCTX.r9], r9
741 mov qword [rax + CPUMCTX.r10], r10
742 mov qword [rax + CPUMCTX.r11], r11
743 mov qword [rax + CPUMCTX.r12], r12
744 mov qword [rax + CPUMCTX.r13], r13
745 mov qword [rax + CPUMCTX.r14], r14
746 mov qword [rax + CPUMCTX.r15], r15
747
748 ; Restore host general purpose registers.
749 MYPOPAD
750
751 mov eax, VINF_SUCCESS
752
753 popf
754 pop rbp
755 add rsp, 4 * xCB
756 ret
757ENDPROC MY_NAME(SVMR0VMRun64)
758%endif ; RT_ARCH_AMD64
759
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette