VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac@ 42156

Last change on this file since 42156 was 42156, checked in by vboxsync, 12 years ago

VMM/VMMR0: HWVMXR0: Use MSR auto load/store areas in the VMCS. Added IA32_TSC_AUX for auto load/store. Cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.4 KB
Line 
1; $Id: HWACCMR0Mixed.mac 42156 2012-07-16 06:59:45Z vboxsync $
2;; @file
3; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2007 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20
21;/**
22; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
23; *
24; * @returns VBox status code
25; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
26; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
27; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
28; */
29ALIGNCODE(16)
30BEGINPROC MY_NAME(VMXR0StartVM32)
31 push xBP
32 mov xBP, xSP
33
34 pushf
35 cli
36
37 ;/* Save all general purpose host registers. */
38 MYPUSHAD
39
40 ;/* First we have to save some final CPU context registers. */
41 mov eax, VMX_VMCS_HOST_RIP
42%ifdef RT_ARCH_AMD64
43 lea r10, [.vmlaunch_done wrt rip]
44 vmwrite rax, r10
45%else
46 mov ecx, .vmlaunch_done
47 vmwrite eax, ecx
48%endif
49 ;/* Note: assumes success... */
50
51 ;/* Manual save and restore:
52 ; * - General purpose registers except RIP, RSP
53 ; *
54 ; * Trashed:
55 ; * - CR2 (we don't care)
56 ; * - LDTR (reset to 0)
57 ; * - DRx (presumably not changed at all)
58 ; * - DR7 (reset to 0x400)
59 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
60 ; *
61 ; */
62
63 ;/* Save the Guest CPU context pointer. */
64%ifdef RT_ARCH_AMD64
65 %ifdef ASM_CALL64_GCC
66 ; fResume already in rdi
67 ; pCtx already in rsi
68 mov rbx, rdx ; pCache
69 %else
70 mov rdi, rcx ; fResume
71 mov rsi, rdx ; pCtx
72 mov rbx, r8 ; pCache
73 %endif
74%else
75 mov edi, [ebp + 8] ; fResume
76 mov esi, [ebp + 12] ; pCtx
77 mov ebx, [ebp + 16] ; pCache
78%endif
79
80 ;/* Save segment registers */
81 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
82 MYPUSHSEGS xAX, ax
83
84%ifdef VMX_USE_CACHED_VMCS_ACCESSES
85 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
86 cmp ecx, 0
87 je .no_cached_writes
88 mov edx, ecx
89 mov ecx, 0
90 jmp .cached_write
91
92ALIGN(16)
93.cached_write:
94 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
95 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
96 inc xCX
97 cmp xCX, xDX
98 jl .cached_write
99
100 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
101.no_cached_writes:
102
103 ; Save the pCache pointer
104 push xBX
105%endif
106
107 ; Save the pCtx pointer
108 push xSI
109
110 ; Save LDTR
111 xor eax, eax
112 sldt ax
113 push xAX
114
115 ; The TR limit is reset to 0x67; restore it manually
116 str eax
117 push xAX
118
119 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
120 sub xSP, xS*2
121 sgdt [xSP]
122
123 sub xSP, xS*2
124 sidt [xSP]
125
126%ifdef VBOX_WITH_DR6_EXPERIMENT
127 ; Restore DR6 - experiment, not safe!
128 mov xBX, [xSI + CPUMCTX.dr6]
129 mov dr6, xBX
130%endif
131
132 ; Restore CR2
133 mov ebx, [xSI + CPUMCTX.cr2]
134 mov cr2, xBX
135
136 mov eax, VMX_VMCS_HOST_RSP
137 vmwrite xAX, xSP
138 ;/* Note: assumes success... */
139 ;/* Don't mess with ESP anymore!! */
140
141 ;/* Restore Guest's general purpose registers. */
142 mov eax, [xSI + CPUMCTX.eax]
143 mov ebx, [xSI + CPUMCTX.ebx]
144 mov ecx, [xSI + CPUMCTX.ecx]
145 mov edx, [xSI + CPUMCTX.edx]
146 mov ebp, [xSI + CPUMCTX.ebp]
147
148 ; resume or start?
149 cmp xDI, 0 ; fResume
150 je .vmlauch_lauch
151
152 ;/* Restore edi & esi. */
153 mov edi, [xSI + CPUMCTX.edi]
154 mov esi, [xSI + CPUMCTX.esi]
155
156 vmresume
157 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
158
159.vmlauch_lauch:
160 ;/* Restore edi & esi. */
161 mov edi, [xSI + CPUMCTX.edi]
162 mov esi, [xSI + CPUMCTX.esi]
163
164 vmlaunch
165 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
166
167ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
168.vmlaunch_done:
169 jc near .vmxstart_invalid_vmxon_ptr
170 jz near .vmxstart_start_failed
171
172 ; Restore base and limit of the IDTR & GDTR
173 lidt [xSP]
174 add xSP, xS*2
175 lgdt [xSP]
176 add xSP, xS*2
177
178 push xDI
179 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
180
181 mov [ss:xDI + CPUMCTX.eax], eax
182 mov [ss:xDI + CPUMCTX.ebx], ebx
183 mov [ss:xDI + CPUMCTX.ecx], ecx
184 mov [ss:xDI + CPUMCTX.edx], edx
185 mov [ss:xDI + CPUMCTX.esi], esi
186 mov [ss:xDI + CPUMCTX.ebp], ebp
187%ifdef RT_ARCH_AMD64
188 pop xAX ; the guest edi we pushed above
189 mov dword [ss:xDI + CPUMCTX.edi], eax
190%else
191 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
192%endif
193
194%ifdef VBOX_WITH_DR6_EXPERIMENT
195 ; Save DR6 - experiment, not safe!
196 mov xAX, dr6
197 mov [ss:xDI + CPUMCTX.dr6], xAX
198%endif
199
200 ; Restore TSS selector; must mark it as not busy before using ltr (!)
201 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
202 ; @todo get rid of sgdt
203 pop xBX ; saved TR
204 sub xSP, xS*2
205 sgdt [xSP]
206 mov xAX, xBX
207 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
208 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
209 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
210 ltr bx
211 add xSP, xS*2
212
213 pop xAX ; saved LDTR
214 lldt ax
215
216 add xSP, xS ; pCtx
217
218%ifdef VMX_USE_CACHED_VMCS_ACCESSES
219 pop xDX ; saved pCache
220
221 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
222 cmp ecx, 0 ; can't happen
223 je .no_cached_reads
224 jmp .cached_read
225
226ALIGN(16)
227.cached_read:
228 dec xCX
229 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
230 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
231 cmp xCX, 0
232 jnz .cached_read
233.no_cached_reads:
234
235 ; Save CR2 for EPT
236 mov xAX, cr2
237 mov [ss:xDX + VMCSCACHE.cr2], xAX
238%endif
239
240 ; Restore segment registers
241 MYPOPSEGS xAX, ax
242
243 ; Restore general purpose registers
244 MYPOPAD
245
246 mov eax, VINF_SUCCESS
247
248.vmstart_end:
249 popf
250 pop xBP
251 ret
252
253
254.vmxstart_invalid_vmxon_ptr:
255 ; Restore base and limit of the IDTR & GDTR
256 lidt [xSP]
257 add xSP, xS*2
258 lgdt [xSP]
259 add xSP, xS*2
260
261 ; Restore TSS selector; must mark it as not busy before using ltr (!)
262 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
263 ; @todo get rid of sgdt
264 pop xBX ; saved TR
265 sub xSP, xS*2
266 sgdt [xSP]
267 mov xAX, xBX
268 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
269 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
270 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
271 ltr bx
272 add xSP, xS*2
273
274 pop xAX ; saved LDTR
275 lldt ax
276
277%ifdef VMX_USE_CACHED_VMCS_ACCESSES
278 add xSP, xS*2 ; pCtx + pCache
279%else
280 add xSP, xS ; pCtx
281%endif
282
283 ; Restore segment registers
284 MYPOPSEGS xAX, ax
285
286 ; Restore all general purpose host registers.
287 MYPOPAD
288 mov eax, VERR_VMX_INVALID_VMXON_PTR
289 jmp .vmstart_end
290
291.vmxstart_start_failed:
292 ; Restore base and limit of the IDTR & GDTR
293 lidt [xSP]
294 add xSP, xS*2
295 lgdt [xSP]
296 add xSP, xS*2
297
298 ; Restore TSS selector; must mark it as not busy before using ltr (!)
299 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
300 ; @todo get rid of sgdt
301 pop xBX ; saved TR
302 sub xSP, xS*2
303 sgdt [xSP]
304 mov xAX, xBX
305 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
306 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
307 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
308 ltr bx
309 add xSP, xS*2
310
311 pop xAX ; saved LDTR
312 lldt ax
313
314%ifdef VMX_USE_CACHED_VMCS_ACCESSES
315 add xSP, xS*2 ; pCtx + pCache
316%else
317 add xSP, xS ; pCtx
318%endif
319
320 ; Restore segment registers
321 MYPOPSEGS xAX, ax
322
323 ; Restore all general purpose host registers.
324 MYPOPAD
325 mov eax, VERR_VMX_UNABLE_TO_START_VM
326 jmp .vmstart_end
327
328ENDPROC MY_NAME(VMXR0StartVM32)
329
330%ifdef RT_ARCH_AMD64
331;/**
332; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
333; *
334; * @returns VBox status code
335; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
336; * @param pCtx msc:rdx, gcc:rsi Guest context
337; * @param pCache msc:r8, gcc:rdx VMCS cache
338; */
339ALIGNCODE(16)
340BEGINPROC MY_NAME(VMXR0StartVM64)
341 push xBP
342 mov xBP, xSP
343
344 pushf
345 cli
346
347 ;/* Save all general purpose host registers. */
348 MYPUSHAD
349
350 ;/* First we have to save some final CPU context registers. */
351 lea r10, [.vmlaunch64_done wrt rip]
352 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
353 vmwrite rax, r10
354 ;/* Note: assumes success... */
355
356 ;/* Manual save and restore:
357 ; * - General purpose registers except RIP, RSP
358 ; *
359 ; * Trashed:
360 ; * - CR2 (we don't care)
361 ; * - LDTR (reset to 0)
362 ; * - DRx (presumably not changed at all)
363 ; * - DR7 (reset to 0x400)
364 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
365 ; *
366 ; */
367
368 ;/* Save the Guest CPU context pointer. */
369%ifdef ASM_CALL64_GCC
370 ; fResume already in rdi
371 ; pCtx already in rsi
372 mov rbx, rdx ; pCache
373%else
374 mov rdi, rcx ; fResume
375 mov rsi, rdx ; pCtx
376 mov rbx, r8 ; pCache
377%endif
378
379 ;/* Save segment registers */
380 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
381 MYPUSHSEGS xAX, ax
382
383%ifdef VMX_USE_CACHED_VMCS_ACCESSES
384 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
385 cmp ecx, 0
386 je .no_cached_writes
387 mov edx, ecx
388 mov ecx, 0
389 jmp .cached_write
390
391ALIGN(16)
392.cached_write:
393 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
394 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
395 inc xCX
396 cmp xCX, xDX
397 jl .cached_write
398
399 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
400.no_cached_writes:
401
402 ; Save the pCache pointer
403 push xBX
404%endif
405
406%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
407 ; Save the host MSRs and load the guest MSRs
408 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
409%if 0 ; not supported on Intel CPUs
410 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
411%endif
412 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
413 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
414%endif
415 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}
416 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
417
418 ; Save the pCtx pointer
419 push xSI
420
421 ; Save LDTR
422 xor eax, eax
423 sldt ax
424 push xAX
425
426 ; The TR limit is reset to 0x67; restore it manually
427 str eax
428 push xAX
429
430 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
431 sub xSP, xS*2
432 sgdt [xSP]
433
434 sub xSP, xS*2
435 sidt [xSP]
436
437%ifdef VBOX_WITH_DR6_EXPERIMENT
438 ; Restore DR6 - experiment, not safe!
439 mov xBX, [xSI + CPUMCTX.dr6]
440 mov dr6, xBX
441%endif
442
443 ; Restore CR2
444 mov rbx, qword [xSI + CPUMCTX.cr2]
445 mov cr2, rbx
446
447 mov eax, VMX_VMCS_HOST_RSP
448 vmwrite xAX, xSP
449 ;/* Note: assumes success... */
450 ;/* Don't mess with ESP anymore!! */
451
452 ;/* Restore Guest's general purpose registers. */
453 mov rax, qword [xSI + CPUMCTX.eax]
454 mov rbx, qword [xSI + CPUMCTX.ebx]
455 mov rcx, qword [xSI + CPUMCTX.ecx]
456 mov rdx, qword [xSI + CPUMCTX.edx]
457 mov rbp, qword [xSI + CPUMCTX.ebp]
458 mov r8, qword [xSI + CPUMCTX.r8]
459 mov r9, qword [xSI + CPUMCTX.r9]
460 mov r10, qword [xSI + CPUMCTX.r10]
461 mov r11, qword [xSI + CPUMCTX.r11]
462 mov r12, qword [xSI + CPUMCTX.r12]
463 mov r13, qword [xSI + CPUMCTX.r13]
464 mov r14, qword [xSI + CPUMCTX.r14]
465 mov r15, qword [xSI + CPUMCTX.r15]
466
467 ; resume or start?
468 cmp xDI, 0 ; fResume
469 je .vmlauch64_lauch
470
471 ;/* Restore edi & esi. */
472 mov rdi, qword [xSI + CPUMCTX.edi]
473 mov rsi, qword [xSI + CPUMCTX.esi]
474
475 vmresume
476 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
477
478.vmlauch64_lauch:
479 ;/* Restore rdi & rsi. */
480 mov rdi, qword [xSI + CPUMCTX.edi]
481 mov rsi, qword [xSI + CPUMCTX.esi]
482
483 vmlaunch
484 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
485
486ALIGNCODE(16)
487.vmlaunch64_done:
488 jc near .vmxstart64_invalid_vmxon_ptr
489 jz near .vmxstart64_start_failed
490
491 ; Restore base and limit of the IDTR & GDTR
492 lidt [xSP]
493 add xSP, xS*2
494 lgdt [xSP]
495 add xSP, xS*2
496
497 push xDI
498 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
499
500 mov qword [xDI + CPUMCTX.eax], rax
501 mov qword [xDI + CPUMCTX.ebx], rbx
502 mov qword [xDI + CPUMCTX.ecx], rcx
503 mov qword [xDI + CPUMCTX.edx], rdx
504 mov qword [xDI + CPUMCTX.esi], rsi
505 mov qword [xDI + CPUMCTX.ebp], rbp
506 mov qword [xDI + CPUMCTX.r8], r8
507 mov qword [xDI + CPUMCTX.r9], r9
508 mov qword [xDI + CPUMCTX.r10], r10
509 mov qword [xDI + CPUMCTX.r11], r11
510 mov qword [xDI + CPUMCTX.r12], r12
511 mov qword [xDI + CPUMCTX.r13], r13
512 mov qword [xDI + CPUMCTX.r14], r14
513 mov qword [xDI + CPUMCTX.r15], r15
514
515 pop xAX ; the guest edi we pushed above
516 mov qword [xDI + CPUMCTX.edi], rax
517
518%ifdef VBOX_WITH_DR6_EXPERIMENT
519 ; Save DR6 - experiment, not safe!
520 mov xAX, dr6
521 mov [xDI + CPUMCTX.dr6], xAX
522%endif
523
524 ; Restore TSS selector; must mark it as not busy before using ltr (!)
525 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
526 ; @todo get rid of sgdt
527 pop xBX ; saved TR
528 sub xSP, xS*2
529 sgdt [xSP]
530 mov xAX, xBX
531 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
532 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
533 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
534 ltr bx
535 add xSP, xS*2
536
537 pop xAX ; saved LDTR
538 lldt ax
539
540 pop xSI ; pCtx (needed in rsi by the macros below)
541
542 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
543 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
544%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
545 ; Save the guest MSRs and load the host MSRs
546 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
547 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
548%if 0 ; not supported on Intel CPUs
549 LOADHOSTMSREX MSR_K8_CSTAR, CPUMCTX.msrCSTAR
550%endif
551 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
552%endif
553
554%ifdef VMX_USE_CACHED_VMCS_ACCESSES
555 pop xDX ; saved pCache
556
557 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
558 cmp ecx, 0 ; can't happen
559 je .no_cached_reads
560 jmp .cached_read
561
562ALIGN(16)
563.cached_read:
564 dec xCX
565 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
566 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
567 cmp xCX, 0
568 jnz .cached_read
569.no_cached_reads:
570
571 ; Save CR2 for EPT
572 mov xAX, cr2
573 mov [xDX + VMCSCACHE.cr2], xAX
574%endif
575
576 ; Restore segment registers
577 MYPOPSEGS xAX, ax
578
579 ; Restore general purpose registers
580 MYPOPAD
581
582 mov eax, VINF_SUCCESS
583
584.vmstart64_end:
585 popf
586 pop xBP
587 ret
588
589
590.vmxstart64_invalid_vmxon_ptr:
591 ; Restore base and limit of the IDTR & GDTR
592 lidt [xSP]
593 add xSP, xS*2
594 lgdt [xSP]
595 add xSP, xS*2
596
597 ; Restore TSS selector; must mark it as not busy before using ltr (!)
598 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
599 ; @todo get rid of sgdt
600 pop xBX ; saved TR
601 sub xSP, xS*2
602 sgdt [xSP]
603 mov xAX, xBX
604 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
605 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
606 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
607 ltr bx
608 add xSP, xS*2
609
610 pop xAX ; saved LDTR
611 lldt ax
612
613 pop xSI ; pCtx (needed in rsi by the macros below)
614
615 ; Kernel GS base is special, we need to manually load/store it See @bugref{6208}.
616 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
617%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
618 ; Load the host MSRs
619 LOADHOSTMSR MSR_K8_SF_MASK
620 LOADHOSTMSR MSR_K6_STAR
621%if 0 ; not supported on Intel CPUs
622 LOADHOSTMSR MSR_K8_CSTAR
623%endif
624 LOADHOSTMSR MSR_K8_LSTAR
625%endif
626
627%ifdef VMX_USE_CACHED_VMCS_ACCESSES
628 add xSP, xS ; pCache
629%endif
630
631 ; Restore segment registers
632 MYPOPSEGS xAX, ax
633
634 ; Restore all general purpose host registers.
635 MYPOPAD
636 mov eax, VERR_VMX_INVALID_VMXON_PTR
637 jmp .vmstart64_end
638
639.vmxstart64_start_failed:
640 ; Restore base and limit of the IDTR & GDTR
641 lidt [xSP]
642 add xSP, xS*2
643 lgdt [xSP]
644 add xSP, xS*2
645
646 ; Restore TSS selector; must mark it as not busy before using ltr (!)
647 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
648 ; @todo get rid of sgdt
649 pop xBX ; saved TR
650 sub xSP, xS*2
651 sgdt [xSP]
652 mov xAX, xBX
653 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
654 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
655 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
656 ltr bx
657 add xSP, xS*2
658
659 pop xAX ; saved LDTR
660 lldt ax
661
662 pop xSI ; pCtx (needed in rsi by the macros below)
663
664 ; Kernel GS base is special, load it manually. See @bugref{6208}.
665 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
666%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
667 ; Load the host MSRs
668 LOADHOSTMSR MSR_K8_SF_MASK
669 LOADHOSTMSR MSR_K6_STAR
670%if 0 ; not supported on Intel CPUs
671 LOADHOSTMSR MSR_K8_CSTAR
672%endif
673 LOADHOSTMSR MSR_K8_LSTAR
674%endif
675
676%ifdef VMX_USE_CACHED_VMCS_ACCESSES
677 add xSP, xS ; pCache
678%endif
679
680 ; Restore segment registers
681 MYPOPSEGS xAX, ax
682
683 ; Restore all general purpose host registers.
684 MYPOPAD
685 mov eax, VERR_VMX_UNABLE_TO_START_VM
686 jmp .vmstart64_end
687ENDPROC MY_NAME(VMXR0StartVM64)
688%endif ; RT_ARCH_AMD64
689
690
691;/**
692; * Prepares for and executes VMRUN (32 bits guests)
693; *
694; * @returns VBox status code
695; * @param HCPhysVMCB Physical address of host VMCB
696; * @param HCPhysVMCB Physical address of guest VMCB
697; * @param pCtx Guest context
698; */
699ALIGNCODE(16)
700BEGINPROC MY_NAME(SVMR0VMRun)
701%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
702 %ifdef ASM_CALL64_GCC
703 push rdx
704 push rsi
705 push rdi
706 %else
707 push r8
708 push rdx
709 push rcx
710 %endif
711 push 0
712%endif
713 push xBP
714 mov xBP, xSP
715 pushf
716
717 ;/* Manual save and restore:
718 ; * - General purpose registers except RIP, RSP, RAX
719 ; *
720 ; * Trashed:
721 ; * - CR2 (we don't care)
722 ; * - LDTR (reset to 0)
723 ; * - DRx (presumably not changed at all)
724 ; * - DR7 (reset to 0x400)
725 ; */
726
727 ;/* Save all general purpose host registers. */
728 MYPUSHAD
729
730 ;/* Save the Guest CPU context pointer. */
731 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
732 push xSI ; push for saving the state at the end
733
734 ; save host fs, gs, sysenter msr etc
735 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
736 push xAX ; save for the vmload after vmrun
737 vmsave
738
739 ; setup eax for VMLOAD
740 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
741
742 ;/* Restore Guest's general purpose registers. */
743 ;/* EAX is loaded from the VMCB by VMRUN */
744 mov ebx, [xSI + CPUMCTX.ebx]
745 mov ecx, [xSI + CPUMCTX.ecx]
746 mov edx, [xSI + CPUMCTX.edx]
747 mov edi, [xSI + CPUMCTX.edi]
748 mov ebp, [xSI + CPUMCTX.ebp]
749 mov esi, [xSI + CPUMCTX.esi]
750
751 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
752 clgi
753 sti
754
755 ; load guest fs, gs, sysenter msr etc
756 vmload
757 ; run the VM
758 vmrun
759
760 ;/* EAX is in the VMCB already; we can use it here. */
761
762 ; save guest fs, gs, sysenter msr etc
763 vmsave
764
765 ; load host fs, gs, sysenter msr etc
766 pop xAX ; pushed above
767 vmload
768
769 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
770 cli
771 stgi
772
773 pop xAX ; pCtx
774
775 mov [ss:xAX + CPUMCTX.ebx], ebx
776 mov [ss:xAX + CPUMCTX.ecx], ecx
777 mov [ss:xAX + CPUMCTX.edx], edx
778 mov [ss:xAX + CPUMCTX.esi], esi
779 mov [ss:xAX + CPUMCTX.edi], edi
780 mov [ss:xAX + CPUMCTX.ebp], ebp
781
782 ; Restore general purpose registers
783 MYPOPAD
784
785 mov eax, VINF_SUCCESS
786
787 popf
788 pop xBP
789%ifdef RT_ARCH_AMD64
790 add xSP, 4*xS
791%endif
792 ret
793ENDPROC MY_NAME(SVMR0VMRun)
794
795%ifdef RT_ARCH_AMD64
796;/**
797; * Prepares for and executes VMRUN (64 bits guests)
798; *
799; * @returns VBox status code
800; * @param HCPhysVMCB Physical address of host VMCB
801; * @param HCPhysVMCB Physical address of guest VMCB
802; * @param pCtx Guest context
803; */
804ALIGNCODE(16)
805BEGINPROC MY_NAME(SVMR0VMRun64)
806 ; fake a cdecl stack frame
807 %ifdef ASM_CALL64_GCC
808 push rdx
809 push rsi
810 push rdi
811 %else
812 push r8
813 push rdx
814 push rcx
815 %endif
816 push 0
817 push rbp
818 mov rbp, rsp
819 pushf
820
821 ;/* Manual save and restore:
822 ; * - General purpose registers except RIP, RSP, RAX
823 ; *
824 ; * Trashed:
825 ; * - CR2 (we don't care)
826 ; * - LDTR (reset to 0)
827 ; * - DRx (presumably not changed at all)
828 ; * - DR7 (reset to 0x400)
829 ; */
830
831 ;/* Save all general purpose host registers. */
832 MYPUSHAD
833
834 ;/* Save the Guest CPU context pointer. */
835 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
836 push rsi ; push for saving the state at the end
837
838 ; save host fs, gs, sysenter msr etc
839 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
840 push rax ; save for the vmload after vmrun
841 vmsave
842
843 ; setup eax for VMLOAD
844 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
845
846 ;/* Restore Guest's general purpose registers. */
847 ;/* RAX is loaded from the VMCB by VMRUN */
848 mov rbx, qword [xSI + CPUMCTX.ebx]
849 mov rcx, qword [xSI + CPUMCTX.ecx]
850 mov rdx, qword [xSI + CPUMCTX.edx]
851 mov rdi, qword [xSI + CPUMCTX.edi]
852 mov rbp, qword [xSI + CPUMCTX.ebp]
853 mov r8, qword [xSI + CPUMCTX.r8]
854 mov r9, qword [xSI + CPUMCTX.r9]
855 mov r10, qword [xSI + CPUMCTX.r10]
856 mov r11, qword [xSI + CPUMCTX.r11]
857 mov r12, qword [xSI + CPUMCTX.r12]
858 mov r13, qword [xSI + CPUMCTX.r13]
859 mov r14, qword [xSI + CPUMCTX.r14]
860 mov r15, qword [xSI + CPUMCTX.r15]
861 mov rsi, qword [xSI + CPUMCTX.esi]
862
863 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
864 clgi
865 sti
866
867 ; load guest fs, gs, sysenter msr etc
868 vmload
869 ; run the VM
870 vmrun
871
872 ;/* RAX is in the VMCB already; we can use it here. */
873
874 ; save guest fs, gs, sysenter msr etc
875 vmsave
876
877 ; load host fs, gs, sysenter msr etc
878 pop rax ; pushed above
879 vmload
880
881 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
882 cli
883 stgi
884
885 pop rax ; pCtx
886
887 mov qword [rax + CPUMCTX.ebx], rbx
888 mov qword [rax + CPUMCTX.ecx], rcx
889 mov qword [rax + CPUMCTX.edx], rdx
890 mov qword [rax + CPUMCTX.esi], rsi
891 mov qword [rax + CPUMCTX.edi], rdi
892 mov qword [rax + CPUMCTX.ebp], rbp
893 mov qword [rax + CPUMCTX.r8], r8
894 mov qword [rax + CPUMCTX.r9], r9
895 mov qword [rax + CPUMCTX.r10], r10
896 mov qword [rax + CPUMCTX.r11], r11
897 mov qword [rax + CPUMCTX.r12], r12
898 mov qword [rax + CPUMCTX.r13], r13
899 mov qword [rax + CPUMCTX.r14], r14
900 mov qword [rax + CPUMCTX.r15], r15
901
902 ; Restore general purpose registers
903 MYPOPAD
904
905 mov eax, VINF_SUCCESS
906
907 popf
908 pop rbp
909 add rsp, 4*xS
910 ret
911ENDPROC MY_NAME(SVMR0VMRun64)
912%endif ; RT_ARCH_AMD64
913
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette