VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 48935

Last change on this file since 48935 was 48618, checked in by vboxsync, 11 years ago

nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.8 KB
Line 
1; $Id: HMR0Mixed.mac 48618 2013-09-20 16:40:05Z vboxsync $
2;; @file
3; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
4;
5; Darwin uses this to build two versions in the hybrid case.
6; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
7;
8
9;
10; Copyright (C) 2006-2013 Oracle Corporation
11;
12; This file is part of VirtualBox Open Source Edition (OSE), as
13; available from http://www.virtualbox.org. This file is free software;
14; you can redistribute it and/or modify it under the terms of the GNU
15; General Public License (GPL) as published by the Free Software
16; Foundation, in version 2 as it comes in the "COPYING" file of the
17; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19;
20
21%ifdef RT_ARCH_AMD64
22 ;;
23 ; Keep these macro definitions in this file as it gets included and compiled
24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
25 %define VMX_SKIP_GDTR
26 %ifdef RT_OS_DARWIN
27 ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always.
28 ; See @bugref{6875}.
29 %elifdef RT_OS_WINDOWS
30 ; Windows 8.1 RTM also seems to be using the IDTR limit for something. See @bugref{6956}.
31 ;; @todo figure out what exactly it does and try and restrict it more.
32 %else
33 %define VMX_SKIP_IDTR
34 %endif
35 %define VMX_SKIP_TR
36%endif
37
38;; @def RESTORESTATEVM32
39; Macro restoring essential host state and updating guest state
40; for common host, 32-bit guest for VT-x.
41;
42; @param 1 Jump label suffix 1.
43; @param 2 Jump label suffix 2.
44; @param 3 Jump label suffix 3.
45%macro RESTORESTATEVM32 3
46 ; Restore base and limit of the IDTR & GDTR.
47 %ifndef VMX_SKIP_IDTR
48 lidt [xSP]
49 add xSP, xCB * 2
50 %endif
51 %ifndef VMX_SKIP_GDTR
52 lgdt [xSP]
53 add xSP, xCB * 2
54 %endif
55
56 push xDI
57 %ifndef VMX_SKIP_TR
58 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
59 %else
60 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
61 %endif
62
63 mov [ss:xDI + CPUMCTX.eax], eax
64 mov [ss:xDI + CPUMCTX.ebx], ebx
65 mov [ss:xDI + CPUMCTX.ecx], ecx
66 mov [ss:xDI + CPUMCTX.edx], edx
67 mov [ss:xDI + CPUMCTX.esi], esi
68 mov [ss:xDI + CPUMCTX.ebp], ebp
69 mov xAX, cr2
70 mov [ss:xDI + CPUMCTX.cr2], xAX
71
72 %ifdef RT_ARCH_AMD64
73 pop xAX ; The guest edi we pushed above.
74 mov dword [ss:xDI + CPUMCTX.edi], eax
75 %else
76 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
77 %endif
78
79 %ifndef VMX_SKIP_TR
80 ; Restore TSS selector; must mark it as not busy before using ltr (!)
81 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
82 ; @todo get rid of sgdt
83 pop xBX ; Saved TR
84 sub xSP, xCB * 2
85 sgdt [xSP]
86 mov xAX, xBX
87 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
88 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
89 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
90 ltr bx
91 add xSP, xCB * 2
92 %endif
93
94 pop xAX ; Saved LDTR
95 %ifdef RT_ARCH_AMD64
96 cmp eax, 0
97 je .skipldtwrite32%1
98 %endif
99 lldt ax
100
101.skipldtwrite32%1:
102 add xSP, xCB ; pCtx
103
104 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
105 pop xDX ; Saved pCache
106
107 ; Note! If we get here as a result of invalid VMCS pointer, all the following
108 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
109 ; trouble only just less efficient.
110 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
111 cmp ecx, 0 ; Can't happen
112 je .no_cached_read32%2
113 jmp .cached_read32%3
114
115ALIGN(16)
116.cached_read32%3:
117 dec xCX
118 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
119 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
120 cmp xCX, 0
121 jnz .cached_read32%3
122.no_cached_read32%2:
123 %endif
124
125 ; Restore segment registers.
126 MYPOPSEGS xAX, ax
127
128 ; Restore general purpose registers.
129 MYPOPAD
130%endmacro
131
132
133;/**
134; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
135; *
136; * @returns VBox status code
137; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
138; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
139; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
140; */
141ALIGNCODE(16)
142BEGINPROC MY_NAME(VMXR0StartVM32)
143 push xBP
144 mov xBP, xSP
145
146 pushf
147 cli
148
149 ; Save all general purpose host registers.
150 MYPUSHAD
151
152 ; First we have to save some final CPU context registers.
153 mov eax, VMX_VMCS_HOST_RIP
154%ifdef RT_ARCH_AMD64
155 lea r10, [.vmlaunch_done wrt rip]
156 vmwrite rax, r10
157%else
158 mov ecx, .vmlaunch_done
159 vmwrite eax, ecx
160%endif
161 ; Note: assumes success!
162
163 ; Save the Guest CPU context pointer.
164%ifdef RT_ARCH_AMD64
165 %ifdef ASM_CALL64_GCC
166 ; fResume already in rdi
167 ; pCtx already in rsi
168 mov rbx, rdx ; pCache
169 %else
170 mov rdi, rcx ; fResume
171 mov rsi, rdx ; pCtx
172 mov rbx, r8 ; pCache
173 %endif
174%else
175 mov edi, [ebp + 8] ; fResume
176 mov esi, [ebp + 12] ; pCtx
177 mov ebx, [ebp + 16] ; pCache
178%endif
179
180 ; Save segment registers.
181 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
182 MYPUSHSEGS xAX, ax
183
184%ifdef VMX_USE_CACHED_VMCS_ACCESSES
185 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
186 cmp ecx, 0
187 je .no_cached_writes
188 mov edx, ecx
189 mov ecx, 0
190 jmp .cached_write
191
192ALIGN(16)
193.cached_write:
194 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
195 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
196 inc xCX
197 cmp xCX, xDX
198 jl .cached_write
199
200 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
201.no_cached_writes:
202
203 ; Save the pCache pointer.
204 push xBX
205%endif
206
207 ; Save the pCtx pointer.
208 push xSI
209
210 ; Save LDTR.
211 xor eax, eax
212 sldt ax
213 push xAX
214
215%ifndef VMX_SKIP_TR
216 ; The TR limit is reset to 0x67; restore it manually.
217 str eax
218 push xAX
219%endif
220
221%ifndef VMX_SKIP_GDTR
222 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
223 sub xSP, xCB * 2
224 sgdt [xSP]
225%endif
226%ifndef VMX_SKIP_IDTR
227 sub xSP, xCB * 2
228 sidt [xSP]
229%endif
230
231 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
232 mov xBX, [xSI + CPUMCTX.cr2]
233 mov xDX, cr2
234 cmp xBX, xDX
235 je .skipcr2write32
236 mov cr2, xBX
237
238.skipcr2write32:
239 mov eax, VMX_VMCS_HOST_RSP
240 vmwrite xAX, xSP
241 ; Note: assumes success!
242 ; Don't mess with ESP anymore!!!
243
244 ; Load Guest's general purpose registers.
245 mov eax, [xSI + CPUMCTX.eax]
246 mov ebx, [xSI + CPUMCTX.ebx]
247 mov ecx, [xSI + CPUMCTX.ecx]
248 mov edx, [xSI + CPUMCTX.edx]
249 mov ebp, [xSI + CPUMCTX.ebp]
250
251 ; Resume or start?
252 cmp xDI, 0 ; fResume
253 je .vmlaunch_launch
254
255 ; Restore edi & esi.
256 mov edi, [xSI + CPUMCTX.edi]
257 mov esi, [xSI + CPUMCTX.esi]
258
259 vmresume
260 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
261
262.vmlaunch_launch:
263 ; Restore edi & esi.
264 mov edi, [xSI + CPUMCTX.edi]
265 mov esi, [xSI + CPUMCTX.esi]
266
267 vmlaunch
268 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
269
270ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
271.vmlaunch_done:
272 jc near .vmxstart_invalid_vmcs_ptr
273 jz near .vmxstart_start_failed
274
275 RESTORESTATEVM32 A, B, C
276 mov eax, VINF_SUCCESS
277
278.vmstart_end:
279 popf
280 pop xBP
281 ret
282
283.vmxstart_invalid_vmcs_ptr:
284 RESTORESTATEVM32 D, E, F
285 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
286 jmp .vmstart_end
287
288.vmxstart_start_failed:
289 RESTORESTATEVM32 G, H, I
290 mov eax, VERR_VMX_UNABLE_TO_START_VM
291 jmp .vmstart_end
292
293ENDPROC MY_NAME(VMXR0StartVM32)
294
295
296%ifdef RT_ARCH_AMD64
297;; @def RESTORESTATEVM64
298; Macro restoring essential host state and updating guest state
299; for 64-bit host, 64-bit guest for VT-x.
300;
301; @param 1 Jump label suffix 1.
302; @param 2 Jump label suffix 2.
303; @param 3 Jump label suffix 3.
304%macro RESTORESTATEVM64 3
305 ; Restore base and limit of the IDTR & GDTR
306 %ifndef VMX_SKIP_IDTR
307 lidt [xSP]
308 add xSP, xCB * 2
309 %endif
310 %ifndef VMX_SKIP_GDTR
311 lgdt [xSP]
312 add xSP, xCB * 2
313 %endif
314
315 push xDI
316 %ifndef VMX_SKIP_TR
317 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
318 %else
319 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
320 %endif
321
322 mov qword [xDI + CPUMCTX.eax], rax
323 mov qword [xDI + CPUMCTX.ebx], rbx
324 mov qword [xDI + CPUMCTX.ecx], rcx
325 mov qword [xDI + CPUMCTX.edx], rdx
326 mov qword [xDI + CPUMCTX.esi], rsi
327 mov qword [xDI + CPUMCTX.ebp], rbp
328 mov qword [xDI + CPUMCTX.r8], r8
329 mov qword [xDI + CPUMCTX.r9], r9
330 mov qword [xDI + CPUMCTX.r10], r10
331 mov qword [xDI + CPUMCTX.r11], r11
332 mov qword [xDI + CPUMCTX.r12], r12
333 mov qword [xDI + CPUMCTX.r13], r13
334 mov qword [xDI + CPUMCTX.r14], r14
335 mov qword [xDI + CPUMCTX.r15], r15
336 mov rax, cr2
337 mov qword [xDI + CPUMCTX.cr2], rax
338
339 pop xAX ; The guest edi we pushed above
340 mov qword [xDI + CPUMCTX.edi], rax
341
342 %ifndef VMX_SKIP_TR
343 ; Restore TSS selector; must mark it as not busy before using ltr (!)
344 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
345 ; @todo get rid of sgdt
346 pop xBX ; Saved TR
347 sub xSP, xCB * 2
348 sgdt [xSP]
349 mov xAX, xBX
350 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
351 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
352 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
353 ltr bx
354 add xSP, xCB * 2
355 %endif
356
357 pop xAX ; Saved LDTR
358 cmp eax, 0
359 je .skipldtwrite64%1
360 lldt ax
361
362.skipldtwrite64%1:
363 pop xSI ; pCtx (needed in rsi by the macros below)
364
365 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
366 ; Save the guest MSRs and load the host MSRs.
367 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
368 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
369 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
370 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
371 %endif
372
373 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
374 pop xDX ; Saved pCache
375
376 ; Note! If we get here as a result of invalid VMCS pointer, all the following
377 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
378 ; trouble only just less efficient.
379 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
380 cmp ecx, 0 ; Can't happen
381 je .no_cached_read64%2
382 jmp .cached_read64%3
383
384ALIGN(16)
385.cached_read64%3:
386 dec xCX
387 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
388 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
389 cmp xCX, 0
390 jnz .cached_read64%3
391.no_cached_read64%2:
392 %endif
393
394 ; Restore segment registers.
395 MYPOPSEGS xAX, ax
396
397 ; Restore general purpose registers.
398 MYPOPAD
399%endmacro
400
401
402;/**
403; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
404; *
405; * @returns VBox status code
406; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
407; * @param pCtx msc:rdx, gcc:rsi Guest context
408; * @param pCache msc:r8, gcc:rdx VMCS cache
409; */
410ALIGNCODE(16)
411BEGINPROC MY_NAME(VMXR0StartVM64)
412 push xBP
413 mov xBP, xSP
414
415 pushf
416 cli
417
418 ; Save all general purpose host registers.
419 MYPUSHAD
420
421 ; First we have to save some final CPU context registers.
422 lea r10, [.vmlaunch64_done wrt rip]
423 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
424 vmwrite rax, r10
425 ; Note: assumes success!
426
427 ; Save the Guest CPU context pointer.
428%ifdef ASM_CALL64_GCC
429 ; fResume already in rdi
430 ; pCtx already in rsi
431 mov rbx, rdx ; pCache
432%else
433 mov rdi, rcx ; fResume
434 mov rsi, rdx ; pCtx
435 mov rbx, r8 ; pCache
436%endif
437
438 ; Save segment registers.
439 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
440 MYPUSHSEGS xAX, ax
441
442%ifdef VMX_USE_CACHED_VMCS_ACCESSES
443 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
444 cmp ecx, 0
445 je .no_cached_writes
446 mov edx, ecx
447 mov ecx, 0
448 jmp .cached_write
449
450ALIGN(16)
451.cached_write:
452 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
453 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
454 inc xCX
455 cmp xCX, xDX
456 jl .cached_write
457
458 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
459.no_cached_writes:
460
461 ; Save the pCache pointer.
462 push xBX
463%endif
464
465%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
466 ; Save the host MSRs and load the guest MSRs.
467 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
468 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
469 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
470 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
471%endif
472
473 ; Save the pCtx pointer.
474 push xSI
475
476 ; Save LDTR.
477 xor eax, eax
478 sldt ax
479 push xAX
480
481%ifndef VMX_SKIP_TR
482 ; The TR limit is reset to 0x67; restore it manually.
483 str eax
484 push xAX
485%endif
486
487 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
488%ifndef VMX_SKIP_GDTR
489 sub xSP, xCB * 2
490 sgdt [xSP]
491%endif
492%ifndef VMX_SKIP_IDTR
493 sub xSP, xCB * 2
494 sidt [xSP]
495%endif
496
497 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
498 mov rbx, qword [xSI + CPUMCTX.cr2]
499 mov rdx, cr2
500 cmp rbx, rdx
501 je .skipcr2write
502 mov cr2, rbx
503
504.skipcr2write:
505 mov eax, VMX_VMCS_HOST_RSP
506 vmwrite xAX, xSP
507 ; Note: assumes success!
508 ; Don't mess with ESP anymore!!!
509
510 ; Restore Guest's general purpose registers.
511 mov rax, qword [xSI + CPUMCTX.eax]
512 mov rbx, qword [xSI + CPUMCTX.ebx]
513 mov rcx, qword [xSI + CPUMCTX.ecx]
514 mov rdx, qword [xSI + CPUMCTX.edx]
515 mov rbp, qword [xSI + CPUMCTX.ebp]
516 mov r8, qword [xSI + CPUMCTX.r8]
517 mov r9, qword [xSI + CPUMCTX.r9]
518 mov r10, qword [xSI + CPUMCTX.r10]
519 mov r11, qword [xSI + CPUMCTX.r11]
520 mov r12, qword [xSI + CPUMCTX.r12]
521 mov r13, qword [xSI + CPUMCTX.r13]
522 mov r14, qword [xSI + CPUMCTX.r14]
523 mov r15, qword [xSI + CPUMCTX.r15]
524
525 ; Resume or start?
526 cmp xDI, 0 ; fResume
527 je .vmlaunch64_launch
528
529 ; Restore edi & esi.
530 mov rdi, qword [xSI + CPUMCTX.edi]
531 mov rsi, qword [xSI + CPUMCTX.esi]
532
533 vmresume
534 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
535
536.vmlaunch64_launch:
537 ; Restore rdi & rsi.
538 mov rdi, qword [xSI + CPUMCTX.edi]
539 mov rsi, qword [xSI + CPUMCTX.esi]
540
541 vmlaunch
542 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
543
544ALIGNCODE(16)
545.vmlaunch64_done:
546 jc near .vmxstart64_invalid_vmcs_ptr
547 jz near .vmxstart64_start_failed
548
549 RESTORESTATEVM64 a, b, c
550 mov eax, VINF_SUCCESS
551
552.vmstart64_end:
553 popf
554 pop xBP
555 ret
556
557.vmxstart64_invalid_vmcs_ptr:
558 RESTORESTATEVM64 d, e, f
559 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
560 jmp .vmstart64_end
561
562.vmxstart64_start_failed:
563 RESTORESTATEVM64 g, h, i
564 mov eax, VERR_VMX_UNABLE_TO_START_VM
565 jmp .vmstart64_end
566ENDPROC MY_NAME(VMXR0StartVM64)
567%endif ; RT_ARCH_AMD64
568
569
570;/**
571; * Prepares for and executes VMRUN (32 bits guests)
572; *
573; * @returns VBox status code
574; * @param HCPhysVMCB Physical address of host VMCB
575; * @param HCPhysVMCB Physical address of guest VMCB
576; * @param pCtx Guest context
577; */
578ALIGNCODE(16)
579BEGINPROC MY_NAME(SVMR0VMRun)
580%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
581 %ifdef ASM_CALL64_GCC
582 push rdx
583 push rsi
584 push rdi
585 %else
586 push r8
587 push rdx
588 push rcx
589 %endif
590 push 0
591%endif
592 push xBP
593 mov xBP, xSP
594 pushf
595
596 ; Save all general purpose host registers.
597 MYPUSHAD
598
599 ; Save the Guest CPU context pointer.
600 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
601 push xSI ; push for saving the state at the end
602
603 ; Save host fs, gs, sysenter msr etc.
604 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
605 push xAX ; save for the vmload after vmrun
606 vmsave
607
608 ; Setup eax for VMLOAD.
609 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
610
611 ; Restore Guest's general purpose registers.
612 ; eax is loaded from the VMCB by VMRUN.
613 mov ebx, [xSI + CPUMCTX.ebx]
614 mov ecx, [xSI + CPUMCTX.ecx]
615 mov edx, [xSI + CPUMCTX.edx]
616 mov edi, [xSI + CPUMCTX.edi]
617 mov ebp, [xSI + CPUMCTX.ebp]
618 mov esi, [xSI + CPUMCTX.esi]
619
620 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
621 clgi
622 sti
623
624 ; Load guest fs, gs, sysenter msr etc.
625 vmload
626 ; Run the VM.
627 vmrun
628
629 ; eax is in the VMCB already; we can use it here.
630
631 ; Save guest fs, gs, sysenter msr etc.
632 vmsave
633
634 ; Load host fs, gs, sysenter msr etc.
635 pop xAX ; Pushed above
636 vmload
637
638 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
639 cli
640 stgi
641
642 pop xAX ; pCtx
643
644 mov [ss:xAX + CPUMCTX.ebx], ebx
645 mov [ss:xAX + CPUMCTX.ecx], ecx
646 mov [ss:xAX + CPUMCTX.edx], edx
647 mov [ss:xAX + CPUMCTX.esi], esi
648 mov [ss:xAX + CPUMCTX.edi], edi
649 mov [ss:xAX + CPUMCTX.ebp], ebp
650
651 ; Restore general purpose registers.
652 MYPOPAD
653
654 mov eax, VINF_SUCCESS
655
656 popf
657 pop xBP
658%ifdef RT_ARCH_AMD64
659 add xSP, 4*xCB
660%endif
661 ret
662ENDPROC MY_NAME(SVMR0VMRun)
663
664%ifdef RT_ARCH_AMD64
665;/**
666; * Prepares for and executes VMRUN (64 bits guests)
667; *
668; * @returns VBox status code
669; * @param HCPhysVMCB Physical address of host VMCB
670; * @param HCPhysVMCB Physical address of guest VMCB
671; * @param pCtx Guest context
672; */
673ALIGNCODE(16)
674BEGINPROC MY_NAME(SVMR0VMRun64)
675 ; Fake a cdecl stack frame
676 %ifdef ASM_CALL64_GCC
677 push rdx
678 push rsi
679 push rdi
680 %else
681 push r8
682 push rdx
683 push rcx
684 %endif
685 push 0
686 push rbp
687 mov rbp, rsp
688 pushf
689
690 ; Manual save and restore:
691 ; - General purpose registers except RIP, RSP, RAX
692 ;
693 ; Trashed:
694 ; - CR2 (we don't care)
695 ; - LDTR (reset to 0)
696 ; - DRx (presumably not changed at all)
697 ; - DR7 (reset to 0x400)
698 ;
699
700 ; Save all general purpose host registers.
701 MYPUSHAD
702
703 ; Save the Guest CPU context pointer.
704 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
705 push rsi ; push for saving the state at the end
706
707 ; Save host fs, gs, sysenter msr etc.
708 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
709 push rax ; Save for the vmload after vmrun
710 vmsave
711
712 ; Setup eax for VMLOAD.
713 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
714
715 ; Restore Guest's general purpose registers.
716 ; rax is loaded from the VMCB by VMRUN.
717 mov rbx, qword [xSI + CPUMCTX.ebx]
718 mov rcx, qword [xSI + CPUMCTX.ecx]
719 mov rdx, qword [xSI + CPUMCTX.edx]
720 mov rdi, qword [xSI + CPUMCTX.edi]
721 mov rbp, qword [xSI + CPUMCTX.ebp]
722 mov r8, qword [xSI + CPUMCTX.r8]
723 mov r9, qword [xSI + CPUMCTX.r9]
724 mov r10, qword [xSI + CPUMCTX.r10]
725 mov r11, qword [xSI + CPUMCTX.r11]
726 mov r12, qword [xSI + CPUMCTX.r12]
727 mov r13, qword [xSI + CPUMCTX.r13]
728 mov r14, qword [xSI + CPUMCTX.r14]
729 mov r15, qword [xSI + CPUMCTX.r15]
730 mov rsi, qword [xSI + CPUMCTX.esi]
731
732 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
733 clgi
734 sti
735
736 ; Load guest fs, gs, sysenter msr etc.
737 vmload
738 ; Run the VM.
739 vmrun
740
741 ; rax is in the VMCB already; we can use it here.
742
743 ; Save guest fs, gs, sysenter msr etc.
744 vmsave
745
746 ; Load host fs, gs, sysenter msr etc.
747 pop rax ; pushed above
748 vmload
749
750 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
751 cli
752 stgi
753
754 pop rax ; pCtx
755
756 mov qword [rax + CPUMCTX.ebx], rbx
757 mov qword [rax + CPUMCTX.ecx], rcx
758 mov qword [rax + CPUMCTX.edx], rdx
759 mov qword [rax + CPUMCTX.esi], rsi
760 mov qword [rax + CPUMCTX.edi], rdi
761 mov qword [rax + CPUMCTX.ebp], rbp
762 mov qword [rax + CPUMCTX.r8], r8
763 mov qword [rax + CPUMCTX.r9], r9
764 mov qword [rax + CPUMCTX.r10], r10
765 mov qword [rax + CPUMCTX.r11], r11
766 mov qword [rax + CPUMCTX.r12], r12
767 mov qword [rax + CPUMCTX.r13], r13
768 mov qword [rax + CPUMCTX.r14], r14
769 mov qword [rax + CPUMCTX.r15], r15
770
771 ; Restore general purpose registers.
772 MYPOPAD
773
774 mov eax, VINF_SUCCESS
775
776 popf
777 pop rbp
778 add rsp, 4 * xCB
779 ret
780ENDPROC MY_NAME(SVMR0VMRun64)
781%endif ; RT_ARCH_AMD64
782
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette