VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 48318

Last change on this file since 48318 was 48318, checked in by vboxsync, 11 years ago

VMM/HM: Fix incorrect jump causing panic while restoring GS in VT-x.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.5 KB
Line 
1; $Id: HMR0Mixed.mac 48318 2013-09-05 17:15:49Z vboxsync $
2;; @file
3; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
4;
5; Darwin uses this to build two versions in the hybrid case.
6; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
7;
8
9;
10; Copyright (C) 2006-2013 Oracle Corporation
11;
12; This file is part of VirtualBox Open Source Edition (OSE), as
13; available from http://www.virtualbox.org. This file is free software;
14; you can redistribute it and/or modify it under the terms of the GNU
15; General Public License (GPL) as published by the Free Software
16; Foundation, in version 2 as it comes in the "COPYING" file of the
17; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19;
20
21%ifdef RT_ARCH_AMD64
22 ;;
23 ; Keep these macro definitions in this file as it gets included and compiled
24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
25 %define VMX_SKIP_GDTR
26 %ifndef RT_OS_DARWIN
27 ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}.
28 %define VMX_SKIP_IDTR
29 %endif
30 %define VMX_SKIP_TR
31%endif
32
33;; @def RESTORESTATEVM32
34; Macro restoring essential host state and updating guest state
35; for common host, 32-bit guest for VT-x.
36;
37; @param 1 Jump label suffix 1.
38; @param 2 Jump label suffix 2.
39; @param 3 Jump label suffix 3.
40%macro RESTORESTATEVM32 3
41 ; Restore base and limit of the IDTR & GDTR.
42 %ifndef VMX_SKIP_IDTR
43 lidt [xSP]
44 add xSP, xCB * 2
45 %endif
46 %ifndef VMX_SKIP_GDTR
47 lgdt [xSP]
48 add xSP, xCB * 2
49 %endif
50
51 push xDI
52 %ifndef VMX_SKIP_TR
53 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
54 %else
55 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
56 %endif
57
58 mov [ss:xDI + CPUMCTX.eax], eax
59 mov [ss:xDI + CPUMCTX.ebx], ebx
60 mov [ss:xDI + CPUMCTX.ecx], ecx
61 mov [ss:xDI + CPUMCTX.edx], edx
62 mov [ss:xDI + CPUMCTX.esi], esi
63 mov [ss:xDI + CPUMCTX.ebp], ebp
64 mov xAX, cr2
65 mov [ss:xDI + CPUMCTX.cr2], xAX
66
67 %ifdef RT_ARCH_AMD64
68 pop xAX ; The guest edi we pushed above.
69 mov dword [ss:xDI + CPUMCTX.edi], eax
70 %else
71 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
72 %endif
73
74 %ifndef VMX_SKIP_TR
75 ; Restore TSS selector; must mark it as not busy before using ltr (!)
76 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
77 ; @todo get rid of sgdt
78 pop xBX ; Saved TR
79 sub xSP, xCB * 2
80 sgdt [xSP]
81 mov xAX, xBX
82 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
83 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
84 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
85 ltr bx
86 add xSP, xCB * 2
87 %endif
88
89 pop xAX ; Saved LDTR
90 %ifdef RT_ARCH_AMD64
91 cmp eax, 0
92 je .skipldtwrite32%1
93 %endif
94 lldt ax
95
96.skipldtwrite32%1:
97 add xSP, xCB ; pCtx
98
99 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
100 pop xDX ; Saved pCache
101
102 ; Note! If we get here as a result of invalid VMCS pointer, all the following
103 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
104 ; trouble only just less efficient.
105 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
106 cmp ecx, 0 ; Can't happen
107 je .no_cached_read32%2
108 jmp .cached_read32%3
109
110ALIGN(16)
111.cached_read32%3:
112 dec xCX
113 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
114 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
115 cmp xCX, 0
116 jnz .cached_read32%3
117.no_cached_read32%2:
118 %endif
119
120 ; Restore segment registers.
121 MYPOPSEGS xAX, ax
122
123 ; Restore general purpose registers.
124 MYPOPAD
125%endmacro
126
127
128;/**
129; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
130; *
131; * @returns VBox status code
132; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
133; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
134; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
135; */
136ALIGNCODE(16)
137BEGINPROC MY_NAME(VMXR0StartVM32)
138 push xBP
139 mov xBP, xSP
140
141 pushf
142 cli
143
144 ; Save all general purpose host registers.
145 MYPUSHAD
146
147 ; First we have to save some final CPU context registers.
148 mov eax, VMX_VMCS_HOST_RIP
149%ifdef RT_ARCH_AMD64
150 lea r10, [.vmlaunch_done wrt rip]
151 vmwrite rax, r10
152%else
153 mov ecx, .vmlaunch_done
154 vmwrite eax, ecx
155%endif
156 ; Note: assumes success!
157
158 ; Save the Guest CPU context pointer.
159%ifdef RT_ARCH_AMD64
160 %ifdef ASM_CALL64_GCC
161 ; fResume already in rdi
162 ; pCtx already in rsi
163 mov rbx, rdx ; pCache
164 %else
165 mov rdi, rcx ; fResume
166 mov rsi, rdx ; pCtx
167 mov rbx, r8 ; pCache
168 %endif
169%else
170 mov edi, [ebp + 8] ; fResume
171 mov esi, [ebp + 12] ; pCtx
172 mov ebx, [ebp + 16] ; pCache
173%endif
174
175 ; Save segment registers.
176 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
177 MYPUSHSEGS xAX, ax
178
179%ifdef VMX_USE_CACHED_VMCS_ACCESSES
180 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
181 cmp ecx, 0
182 je .no_cached_writes
183 mov edx, ecx
184 mov ecx, 0
185 jmp .cached_write
186
187ALIGN(16)
188.cached_write:
189 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
190 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
191 inc xCX
192 cmp xCX, xDX
193 jl .cached_write
194
195 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
196.no_cached_writes:
197
198 ; Save the pCache pointer.
199 push xBX
200%endif
201
202 ; Save the pCtx pointer.
203 push xSI
204
205 ; Save LDTR.
206 xor eax, eax
207 sldt ax
208 push xAX
209
210%ifndef VMX_SKIP_TR
211 ; The TR limit is reset to 0x67; restore it manually.
212 str eax
213 push xAX
214%endif
215
216%ifndef VMX_SKIP_GDTR
217 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
218 sub xSP, xCB * 2
219 sgdt [xSP]
220%endif
221%ifndef VMX_SKIP_IDTR
222 sub xSP, xCB * 2
223 sidt [xSP]
224%endif
225
226 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
227 mov xBX, [xSI + CPUMCTX.cr2]
228 mov xDX, cr2
229 cmp xBX, xDX
230 je .skipcr2write32
231 mov cr2, xBX
232
233.skipcr2write32:
234 mov eax, VMX_VMCS_HOST_RSP
235 vmwrite xAX, xSP
236 ; Note: assumes success!
237 ; Don't mess with ESP anymore!!!
238
239 ; Load Guest's general purpose registers.
240 mov eax, [xSI + CPUMCTX.eax]
241 mov ebx, [xSI + CPUMCTX.ebx]
242 mov ecx, [xSI + CPUMCTX.ecx]
243 mov edx, [xSI + CPUMCTX.edx]
244 mov ebp, [xSI + CPUMCTX.ebp]
245
246 ; Resume or start?
247 cmp xDI, 0 ; fResume
248 je .vmlaunch_launch
249
250 ; Restore edi & esi.
251 mov edi, [xSI + CPUMCTX.edi]
252 mov esi, [xSI + CPUMCTX.esi]
253
254 vmresume
255 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
256
257.vmlaunch_launch:
258 ; Restore edi & esi.
259 mov edi, [xSI + CPUMCTX.edi]
260 mov esi, [xSI + CPUMCTX.esi]
261
262 vmlaunch
263 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
264
265ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
266.vmlaunch_done:
267 jc near .vmxstart_invalid_vmcs_ptr
268 jz near .vmxstart_start_failed
269
270 RESTORESTATEVM32 A, B, C
271 mov eax, VINF_SUCCESS
272
273.vmstart_end:
274 popf
275 pop xBP
276 ret
277
278.vmxstart_invalid_vmcs_ptr:
279 RESTORESTATEVM32 D, E, F
280 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
281 jmp .vmstart_end
282
283.vmxstart_start_failed:
284 RESTORESTATEVM32 G, H, I
285 mov eax, VERR_VMX_UNABLE_TO_START_VM
286 jmp .vmstart_end
287
288ENDPROC MY_NAME(VMXR0StartVM32)
289
290
291%ifdef RT_ARCH_AMD64
292;; @def RESTORESTATEVM64
293; Macro restoring essential host state and updating guest state
294; for 64-bit host, 64-bit guest for VT-x.
295;
296; @param 1 Jump label suffix 1.
297; @param 2 Jump label suffix 2.
298; @param 3 Jump label suffix 3.
299%macro RESTORESTATEVM64 3
300 ; Restore base and limit of the IDTR & GDTR
301 %ifndef VMX_SKIP_IDTR
302 lidt [xSP]
303 add xSP, xCB * 2
304 %endif
305 %ifndef VMX_SKIP_GDTR
306 lgdt [xSP]
307 add xSP, xCB * 2
308 %endif
309
310 push xDI
311 %ifndef VMX_SKIP_TR
312 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
313 %else
314 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
315 %endif
316
317 mov qword [xDI + CPUMCTX.eax], rax
318 mov qword [xDI + CPUMCTX.ebx], rbx
319 mov qword [xDI + CPUMCTX.ecx], rcx
320 mov qword [xDI + CPUMCTX.edx], rdx
321 mov qword [xDI + CPUMCTX.esi], rsi
322 mov qword [xDI + CPUMCTX.ebp], rbp
323 mov qword [xDI + CPUMCTX.r8], r8
324 mov qword [xDI + CPUMCTX.r9], r9
325 mov qword [xDI + CPUMCTX.r10], r10
326 mov qword [xDI + CPUMCTX.r11], r11
327 mov qword [xDI + CPUMCTX.r12], r12
328 mov qword [xDI + CPUMCTX.r13], r13
329 mov qword [xDI + CPUMCTX.r14], r14
330 mov qword [xDI + CPUMCTX.r15], r15
331 mov rax, cr2
332 mov qword [xDI + CPUMCTX.cr2], rax
333
334 pop xAX ; The guest edi we pushed above
335 mov qword [xDI + CPUMCTX.edi], rax
336
337 %ifndef VMX_SKIP_TR
338 ; Restore TSS selector; must mark it as not busy before using ltr (!)
339 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
340 ; @todo get rid of sgdt
341 pop xBX ; Saved TR
342 sub xSP, xCB * 2
343 sgdt [xSP]
344 mov xAX, xBX
345 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
346 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
347 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
348 ltr bx
349 add xSP, xCB * 2
350 %endif
351
352 pop xAX ; Saved LDTR
353 cmp eax, 0
354 je .skipldtwrite64%1
355 lldt ax
356
357.skipldtwrite64%1:
358 pop xSI ; pCtx (needed in rsi by the macros below)
359
360 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
361 ; Save the guest MSRs and load the host MSRs.
362 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
363 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
364 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
365 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
366 %endif
367
368 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
369 pop xDX ; Saved pCache
370
371 ; Note! If we get here as a result of invalid VMCS pointer, all the following
372 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
373 ; trouble only just less efficient.
374 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
375 cmp ecx, 0 ; Can't happen
376 je .no_cached_read64%2
377 jmp .cached_read64%3
378
379ALIGN(16)
380.cached_read64%3:
381 dec xCX
382 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
383 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
384 cmp xCX, 0
385 jnz .cached_read64%3
386.no_cached_read64%2:
387 %endif
388
389 ; Restore segment registers.
390 MYPOPSEGS xAX, ax
391
392 ; Restore general purpose registers.
393 MYPOPAD
394%endmacro
395
396
397;/**
398; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
399; *
400; * @returns VBox status code
401; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
402; * @param pCtx msc:rdx, gcc:rsi Guest context
403; * @param pCache msc:r8, gcc:rdx VMCS cache
404; */
405ALIGNCODE(16)
406BEGINPROC MY_NAME(VMXR0StartVM64)
407 push xBP
408 mov xBP, xSP
409
410 pushf
411 cli
412
413 ; Save all general purpose host registers.
414 MYPUSHAD
415
416 ; First we have to save some final CPU context registers.
417 lea r10, [.vmlaunch64_done wrt rip]
418 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
419 vmwrite rax, r10
420 ; Note: assumes success!
421
422 ; Save the Guest CPU context pointer.
423%ifdef ASM_CALL64_GCC
424 ; fResume already in rdi
425 ; pCtx already in rsi
426 mov rbx, rdx ; pCache
427%else
428 mov rdi, rcx ; fResume
429 mov rsi, rdx ; pCtx
430 mov rbx, r8 ; pCache
431%endif
432
433 ; Save segment registers.
434 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
435 MYPUSHSEGS xAX, ax
436
437%ifdef VMX_USE_CACHED_VMCS_ACCESSES
438 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
439 cmp ecx, 0
440 je .no_cached_writes
441 mov edx, ecx
442 mov ecx, 0
443 jmp .cached_write
444
445ALIGN(16)
446.cached_write:
447 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
448 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
449 inc xCX
450 cmp xCX, xDX
451 jl .cached_write
452
453 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
454.no_cached_writes:
455
456 ; Save the pCache pointer.
457 push xBX
458%endif
459
460%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
461 ; Save the host MSRs and load the guest MSRs.
462 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
463 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
464 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
465 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
466%endif
467
468 ; Save the pCtx pointer.
469 push xSI
470
471 ; Save LDTR.
472 xor eax, eax
473 sldt ax
474 push xAX
475
476%ifndef VMX_SKIP_TR
477 ; The TR limit is reset to 0x67; restore it manually.
478 str eax
479 push xAX
480%endif
481
482 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
483%ifndef VMX_SKIP_GDTR
484 sub xSP, xCB * 2
485 sgdt [xSP]
486%endif
487%ifndef VMX_SKIP_IDTR
488 sub xSP, xCB * 2
489 sidt [xSP]
490%endif
491
492 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
493 mov rbx, qword [xSI + CPUMCTX.cr2]
494 mov rdx, cr2
495 cmp rbx, rdx
496 je .skipcr2write
497 mov cr2, rbx
498
499.skipcr2write:
500 mov eax, VMX_VMCS_HOST_RSP
501 vmwrite xAX, xSP
502 ; Note: assumes success!
503 ; Don't mess with ESP anymore!!!
504
505 ; Restore Guest's general purpose registers.
506 mov rax, qword [xSI + CPUMCTX.eax]
507 mov rbx, qword [xSI + CPUMCTX.ebx]
508 mov rcx, qword [xSI + CPUMCTX.ecx]
509 mov rdx, qword [xSI + CPUMCTX.edx]
510 mov rbp, qword [xSI + CPUMCTX.ebp]
511 mov r8, qword [xSI + CPUMCTX.r8]
512 mov r9, qword [xSI + CPUMCTX.r9]
513 mov r10, qword [xSI + CPUMCTX.r10]
514 mov r11, qword [xSI + CPUMCTX.r11]
515 mov r12, qword [xSI + CPUMCTX.r12]
516 mov r13, qword [xSI + CPUMCTX.r13]
517 mov r14, qword [xSI + CPUMCTX.r14]
518 mov r15, qword [xSI + CPUMCTX.r15]
519
520 ; Resume or start?
521 cmp xDI, 0 ; fResume
522 je .vmlaunch64_launch
523
524 ; Restore edi & esi.
525 mov rdi, qword [xSI + CPUMCTX.edi]
526 mov rsi, qword [xSI + CPUMCTX.esi]
527
528 vmresume
529 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
530
531.vmlaunch64_launch:
532 ; Restore rdi & rsi.
533 mov rdi, qword [xSI + CPUMCTX.edi]
534 mov rsi, qword [xSI + CPUMCTX.esi]
535
536 vmlaunch
537 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
538
539ALIGNCODE(16)
540.vmlaunch64_done:
541 jc near .vmxstart64_invalid_vmcs_ptr
542 jz near .vmxstart64_start_failed
543
544 RESTORESTATEVM64 a, b, c
545 mov eax, VINF_SUCCESS
546
547.vmstart64_end:
548 popf
549 pop xBP
550 ret
551
552.vmxstart64_invalid_vmcs_ptr:
553 RESTORESTATEVM64 d, e, f
554 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
555 jmp .vmstart64_end
556
557.vmxstart64_start_failed:
558 RESTORESTATEVM64 g, h, i
559 mov eax, VERR_VMX_UNABLE_TO_START_VM
560 jmp .vmstart64_end
561ENDPROC MY_NAME(VMXR0StartVM64)
562%endif ; RT_ARCH_AMD64
563
564
565;/**
566; * Prepares for and executes VMRUN (32 bits guests)
567; *
568; * @returns VBox status code
569; * @param HCPhysVMCB Physical address of host VMCB
570; * @param HCPhysVMCB Physical address of guest VMCB
571; * @param pCtx Guest context
572; */
573ALIGNCODE(16)
574BEGINPROC MY_NAME(SVMR0VMRun)
575%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
576 %ifdef ASM_CALL64_GCC
577 push rdx
578 push rsi
579 push rdi
580 %else
581 push r8
582 push rdx
583 push rcx
584 %endif
585 push 0
586%endif
587 push xBP
588 mov xBP, xSP
589 pushf
590
591 ; Save all general purpose host registers.
592 MYPUSHAD
593
594 ; Save the Guest CPU context pointer.
595 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
596 push xSI ; push for saving the state at the end
597
598 ; Save host fs, gs, sysenter msr etc.
599 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
600 push xAX ; save for the vmload after vmrun
601 vmsave
602
603 ; Setup eax for VMLOAD.
604 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
605
606 ; Restore Guest's general purpose registers.
607 ; eax is loaded from the VMCB by VMRUN.
608 mov ebx, [xSI + CPUMCTX.ebx]
609 mov ecx, [xSI + CPUMCTX.ecx]
610 mov edx, [xSI + CPUMCTX.edx]
611 mov edi, [xSI + CPUMCTX.edi]
612 mov ebp, [xSI + CPUMCTX.ebp]
613 mov esi, [xSI + CPUMCTX.esi]
614
615 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
616 clgi
617 sti
618
619 ; Load guest fs, gs, sysenter msr etc.
620 vmload
621 ; Run the VM.
622 vmrun
623
624 ; eax is in the VMCB already; we can use it here.
625
626 ; Save guest fs, gs, sysenter msr etc.
627 vmsave
628
629 ; Load host fs, gs, sysenter msr etc.
630 pop xAX ; Pushed above
631 vmload
632
633 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
634 cli
635 stgi
636
637 pop xAX ; pCtx
638
639 mov [ss:xAX + CPUMCTX.ebx], ebx
640 mov [ss:xAX + CPUMCTX.ecx], ecx
641 mov [ss:xAX + CPUMCTX.edx], edx
642 mov [ss:xAX + CPUMCTX.esi], esi
643 mov [ss:xAX + CPUMCTX.edi], edi
644 mov [ss:xAX + CPUMCTX.ebp], ebp
645
646 ; Restore general purpose registers.
647 MYPOPAD
648
649 mov eax, VINF_SUCCESS
650
651 popf
652 pop xBP
653%ifdef RT_ARCH_AMD64
654 add xSP, 4*xCB
655%endif
656 ret
657ENDPROC MY_NAME(SVMR0VMRun)
658
659%ifdef RT_ARCH_AMD64
660;/**
661; * Prepares for and executes VMRUN (64 bits guests)
662; *
663; * @returns VBox status code
664; * @param HCPhysVMCB Physical address of host VMCB
665; * @param HCPhysVMCB Physical address of guest VMCB
666; * @param pCtx Guest context
667; */
668ALIGNCODE(16)
669BEGINPROC MY_NAME(SVMR0VMRun64)
670 ; Fake a cdecl stack frame
671 %ifdef ASM_CALL64_GCC
672 push rdx
673 push rsi
674 push rdi
675 %else
676 push r8
677 push rdx
678 push rcx
679 %endif
680 push 0
681 push rbp
682 mov rbp, rsp
683 pushf
684
685 ; Manual save and restore:
686 ; - General purpose registers except RIP, RSP, RAX
687 ;
688 ; Trashed:
689 ; - CR2 (we don't care)
690 ; - LDTR (reset to 0)
691 ; - DRx (presumably not changed at all)
692 ; - DR7 (reset to 0x400)
693 ;
694
695 ; Save all general purpose host registers.
696 MYPUSHAD
697
698 ; Save the Guest CPU context pointer.
699 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
700 push rsi ; push for saving the state at the end
701
702 ; Save host fs, gs, sysenter msr etc.
703 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
704 push rax ; Save for the vmload after vmrun
705 vmsave
706
707 ; Setup eax for VMLOAD.
708 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
709
710 ; Restore Guest's general purpose registers.
711 ; rax is loaded from the VMCB by VMRUN.
712 mov rbx, qword [xSI + CPUMCTX.ebx]
713 mov rcx, qword [xSI + CPUMCTX.ecx]
714 mov rdx, qword [xSI + CPUMCTX.edx]
715 mov rdi, qword [xSI + CPUMCTX.edi]
716 mov rbp, qword [xSI + CPUMCTX.ebp]
717 mov r8, qword [xSI + CPUMCTX.r8]
718 mov r9, qword [xSI + CPUMCTX.r9]
719 mov r10, qword [xSI + CPUMCTX.r10]
720 mov r11, qword [xSI + CPUMCTX.r11]
721 mov r12, qword [xSI + CPUMCTX.r12]
722 mov r13, qword [xSI + CPUMCTX.r13]
723 mov r14, qword [xSI + CPUMCTX.r14]
724 mov r15, qword [xSI + CPUMCTX.r15]
725 mov rsi, qword [xSI + CPUMCTX.esi]
726
727 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
728 clgi
729 sti
730
731 ; Load guest fs, gs, sysenter msr etc.
732 vmload
733 ; Run the VM.
734 vmrun
735
736 ; rax is in the VMCB already; we can use it here.
737
738 ; Save guest fs, gs, sysenter msr etc.
739 vmsave
740
741 ; Load host fs, gs, sysenter msr etc.
742 pop rax ; pushed above
743 vmload
744
745 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
746 cli
747 stgi
748
749 pop rax ; pCtx
750
751 mov qword [rax + CPUMCTX.ebx], rbx
752 mov qword [rax + CPUMCTX.ecx], rcx
753 mov qword [rax + CPUMCTX.edx], rdx
754 mov qword [rax + CPUMCTX.esi], rsi
755 mov qword [rax + CPUMCTX.edi], rdi
756 mov qword [rax + CPUMCTX.ebp], rbp
757 mov qword [rax + CPUMCTX.r8], r8
758 mov qword [rax + CPUMCTX.r9], r9
759 mov qword [rax + CPUMCTX.r10], r10
760 mov qword [rax + CPUMCTX.r11], r11
761 mov qword [rax + CPUMCTX.r12], r12
762 mov qword [rax + CPUMCTX.r13], r13
763 mov qword [rax + CPUMCTX.r14], r14
764 mov qword [rax + CPUMCTX.r15], r15
765
766 ; Restore general purpose registers.
767 MYPOPAD
768
769 mov eax, VINF_SUCCESS
770
771 popf
772 pop rbp
773 add rsp, 4 * xCB
774 ret
775ENDPROC MY_NAME(SVMR0VMRun64)
776%endif ; RT_ARCH_AMD64
777
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette