VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac@ 29250

Last change on this file since 29250 was 28800, checked in by vboxsync, 14 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 23.5 KB
Line 
1; $Id: HWACCMR0Mixed.mac 28800 2010-04-27 08:22:32Z vboxsync $
2;; @file
3; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2007 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20
21;/**
22; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
23; *
24; * @returns VBox status code
25; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
26; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
27; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
28; */
29ALIGNCODE(16)
30BEGINPROC MY_NAME(VMXR0StartVM32)
31 push xBP
32 mov xBP, xSP
33
34 pushf
35 cli
36
37 ;/* Save all general purpose host registers. */
38 MYPUSHAD
39
40 ;/* First we have to save some final CPU context registers. */
41 mov eax, VMX_VMCS_HOST_RIP
42%ifdef RT_ARCH_AMD64
43 lea r10, [.vmlaunch_done wrt rip]
44 vmwrite rax, r10
45%else
46 mov ecx, .vmlaunch_done
47 vmwrite eax, ecx
48%endif
49 ;/* Note: assumes success... */
50
51 ;/* Manual save and restore:
52 ; * - General purpose registers except RIP, RSP
53 ; *
54 ; * Trashed:
55 ; * - CR2 (we don't care)
56 ; * - LDTR (reset to 0)
57 ; * - DRx (presumably not changed at all)
58 ; * - DR7 (reset to 0x400)
59 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
60 ; *
61 ; */
62
63 ;/* Save the Guest CPU context pointer. */
64%ifdef RT_ARCH_AMD64
65 %ifdef ASM_CALL64_GCC
66 ; fResume already in rdi
67 ; pCtx already in rsi
68 mov rbx, rdx ; pCache
69 %else
70 mov rdi, rcx ; fResume
71 mov rsi, rdx ; pCtx
72 mov rbx, r8 ; pCache
73 %endif
74%else
75 mov edi, [ebp + 8] ; fResume
76 mov esi, [ebp + 12] ; pCtx
77 mov ebx, [ebp + 16] ; pCache
78%endif
79
80 ;/* Save segment registers */
81 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
82 MYPUSHSEGS xAX, ax
83
84%ifdef VMX_USE_CACHED_VMCS_ACCESSES
85 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
86 cmp ecx, 0
87 je .no_cached_writes
88 mov edx, ecx
89 mov ecx, 0
90 jmp .cached_write
91
92ALIGN(16)
93.cached_write:
94 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
95 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
96 inc xCX
97 cmp xCX, xDX
98 jl .cached_write
99
100 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
101.no_cached_writes:
102
103 ; Save the pCache pointer
104 push xBX
105%endif
106
107 ; Save the pCtx pointer
108 push xSI
109
110 ; Save LDTR
111 xor eax, eax
112 sldt ax
113 push xAX
114
115 ; The TR limit is reset to 0x67; restore it manually
116 str eax
117 push xAX
118
119 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
120 sub xSP, xS*2
121 sgdt [xSP]
122
123 sub xSP, xS*2
124 sidt [xSP]
125
126%ifdef VBOX_WITH_DR6_EXPERIMENT
127 ; Restore DR6 - experiment, not safe!
128 mov xBX, [xSI + CPUMCTX.dr6]
129 mov dr6, xBX
130%endif
131
132 ; Restore CR2
133 mov ebx, [xSI + CPUMCTX.cr2]
134 mov cr2, xBX
135
136 mov eax, VMX_VMCS_HOST_RSP
137 vmwrite xAX, xSP
138 ;/* Note: assumes success... */
139 ;/* Don't mess with ESP anymore!! */
140
141 ;/* Restore Guest's general purpose registers. */
142 mov eax, [xSI + CPUMCTX.eax]
143 mov ebx, [xSI + CPUMCTX.ebx]
144 mov ecx, [xSI + CPUMCTX.ecx]
145 mov edx, [xSI + CPUMCTX.edx]
146 mov ebp, [xSI + CPUMCTX.ebp]
147
148 ; resume or start?
149 cmp xDI, 0 ; fResume
150 je .vmlauch_lauch
151
152 ;/* Restore edi & esi. */
153 mov edi, [xSI + CPUMCTX.edi]
154 mov esi, [xSI + CPUMCTX.esi]
155
156 vmresume
157 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
158
159.vmlauch_lauch:
160 ;/* Restore edi & esi. */
161 mov edi, [xSI + CPUMCTX.edi]
162 mov esi, [xSI + CPUMCTX.esi]
163
164 vmlaunch
165 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
166
167ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
168.vmlaunch_done:
169 jc near .vmxstart_invalid_vmxon_ptr
170 jz near .vmxstart_start_failed
171
172 ; Restore base and limit of the IDTR & GDTR
173 lidt [xSP]
174 add xSP, xS*2
175 lgdt [xSP]
176 add xSP, xS*2
177
178 push xDI
179 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
180
181 mov [ss:xDI + CPUMCTX.eax], eax
182 mov [ss:xDI + CPUMCTX.ebx], ebx
183 mov [ss:xDI + CPUMCTX.ecx], ecx
184 mov [ss:xDI + CPUMCTX.edx], edx
185 mov [ss:xDI + CPUMCTX.esi], esi
186 mov [ss:xDI + CPUMCTX.ebp], ebp
187%ifdef RT_ARCH_AMD64
188 pop xAX ; the guest edi we pushed above
189 mov dword [ss:xDI + CPUMCTX.edi], eax
190%else
191 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
192%endif
193
194%ifdef VBOX_WITH_DR6_EXPERIMENT
195 ; Save DR6 - experiment, not safe!
196 mov xAX, dr6
197 mov [ss:xDI + CPUMCTX.dr6], xAX
198%endif
199
200 ; Restore TSS selector; must mark it as not busy before using ltr (!)
201 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
202 ; @todo get rid of sgdt
203 pop xBX ; saved TR
204%ifndef RT_ARCH_AMD64
205 sub xSP, xS*2
206 sgdt [xSP]
207 mov eax, ebx
208 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
209 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
210 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
211 ltr bx
212 add xSP, xS*2
213%endif
214
215 pop xAX ; saved LDTR
216 lldt ax
217
218 add xSP, xS ; pCtx
219
220%ifdef VMX_USE_CACHED_VMCS_ACCESSES
221 pop xDX ; saved pCache
222
223 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
224 cmp ecx, 0 ; can't happen
225 je .no_cached_reads
226 jmp .cached_read
227
228ALIGN(16)
229.cached_read:
230 dec xCX
231 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
232 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
233 cmp xCX, 0
234 jnz .cached_read
235.no_cached_reads:
236
237 ; Save CR2 for EPT
238 mov xAX, cr2
239 mov [ss:xDX + VMCSCACHE.cr2], xAX
240%endif
241
242 ; Restore segment registers
243 MYPOPSEGS xAX, ax
244
245 ; Restore general purpose registers
246 MYPOPAD
247
248 mov eax, VINF_SUCCESS
249
250.vmstart_end:
251 popf
252 pop xBP
253 ret
254
255
256.vmxstart_invalid_vmxon_ptr:
257 ; Restore base and limit of the IDTR & GDTR
258 lidt [xSP]
259 add xSP, xS*2
260 lgdt [xSP]
261 add xSP, xS*2
262
263 ; Restore TSS selector; must mark it as not busy before using ltr (!)
264 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
265 ; @todo get rid of sgdt
266 pop xBX ; saved TR
267%ifndef RT_ARCH_AMD64
268 sub xSP, xS*2
269 sgdt [xSP]
270 mov eax, ebx
271 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
272 add eax, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
273 and dword [ss:eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
274 ltr bx
275 add xSP, xS*2
276%endif
277
278 pop xAX ; saved LDTR
279 lldt ax
280
281%ifdef VMX_USE_CACHED_VMCS_ACCESSES
282 add xSP, xS*2 ; pCtx + pCache
283%else
284 add xSP, xS ; pCtx
285%endif
286
287 ; Restore segment registers
288 MYPOPSEGS xAX, ax
289
290 ; Restore all general purpose host registers.
291 MYPOPAD
292 mov eax, VERR_VMX_INVALID_VMXON_PTR
293 jmp .vmstart_end
294
295.vmxstart_start_failed:
296 ; Restore base and limit of the IDTR & GDTR
297 lidt [xSP]
298 add xSP, xS*2
299 lgdt [xSP]
300 add xSP, xS*2
301
302 ; Restore TSS selector; must mark it as not busy before using ltr (!)
303 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
304 ; @todo get rid of sgdt
305 pop xBX ; saved TR
306%ifndef RT_ARCH_AMD64
307 sub xSP, xS*2
308 sgdt [xSP]
309 mov eax, ebx
310 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
311 add eax, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
312 and dword [ss:eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
313 ltr bx
314 add xSP, xS*2
315%endif
316
317 pop xAX ; saved LDTR
318 lldt ax
319
320%ifdef VMX_USE_CACHED_VMCS_ACCESSES
321 add xSP, xS*2 ; pCtx + pCache
322%else
323 add xSP, xS ; pCtx
324%endif
325
326 ; Restore segment registers
327 MYPOPSEGS xAX, ax
328
329 ; Restore all general purpose host registers.
330 MYPOPAD
331 mov eax, VERR_VMX_UNABLE_TO_START_VM
332 jmp .vmstart_end
333
334ENDPROC MY_NAME(VMXR0StartVM32)
335
336%ifdef RT_ARCH_AMD64
337;/**
338; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
339; *
340; * @returns VBox status code
341; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
342; * @param pCtx msc:rdx, gcc:rsi Guest context
343; * @param pCache msc:r8, gcc:rdx VMCS cache
344; */
345ALIGNCODE(16)
346BEGINPROC MY_NAME(VMXR0StartVM64)
347 push xBP
348 mov xBP, xSP
349
350 pushf
351 cli
352
353 ;/* Save all general purpose host registers. */
354 MYPUSHAD
355
356 ;/* First we have to save some final CPU context registers. */
357 lea r10, [.vmlaunch64_done wrt rip]
358 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
359 vmwrite rax, r10
360 ;/* Note: assumes success... */
361
362 ;/* Manual save and restore:
363 ; * - General purpose registers except RIP, RSP
364 ; *
365 ; * Trashed:
366 ; * - CR2 (we don't care)
367 ; * - LDTR (reset to 0)
368 ; * - DRx (presumably not changed at all)
369 ; * - DR7 (reset to 0x400)
370 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
371 ; *
372 ; */
373
374 ;/* Save the Guest CPU context pointer. */
375%ifdef ASM_CALL64_GCC
376 ; fResume already in rdi
377 ; pCtx already in rsi
378 mov rbx, rdx ; pCache
379%else
380 mov rdi, rcx ; fResume
381 mov rsi, rdx ; pCtx
382 mov rbx, r8 ; pCache
383%endif
384
385 ;/* Save segment registers */
386 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
387 MYPUSHSEGS xAX, ax
388
389%ifdef VMX_USE_CACHED_VMCS_ACCESSES
390 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
391 cmp ecx, 0
392 je .no_cached_writes
393 mov edx, ecx
394 mov ecx, 0
395 jmp .cached_write
396
397ALIGN(16)
398.cached_write:
399 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
400 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
401 inc xCX
402 cmp xCX, xDX
403 jl .cached_write
404
405 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
406.no_cached_writes:
407
408 ; Save the pCache pointer
409 push xBX
410%endif
411
412 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
413 ;; @todo use the automatic load feature for MSRs
414 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
415%if 0 ; not supported on Intel CPUs
416 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
417%endif
418 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
419 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
420 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
421
422 ; Save the pCtx pointer
423 push xSI
424
425 ; Save LDTR
426 xor eax, eax
427 sldt ax
428 push xAX
429
430 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
431 sub xSP, xS*2
432 sgdt [xSP]
433
434 sub xSP, xS*2
435 sidt [xSP]
436
437%ifdef VBOX_WITH_DR6_EXPERIMENT
438 ; Restore DR6 - experiment, not safe!
439 mov xBX, [xSI + CPUMCTX.dr6]
440 mov dr6, xBX
441%endif
442
443 ; Restore CR2
444 mov rbx, qword [xSI + CPUMCTX.cr2]
445 mov cr2, rbx
446
447 mov eax, VMX_VMCS_HOST_RSP
448 vmwrite xAX, xSP
449 ;/* Note: assumes success... */
450 ;/* Don't mess with ESP anymore!! */
451
452 ;/* Restore Guest's general purpose registers. */
453 mov rax, qword [xSI + CPUMCTX.eax]
454 mov rbx, qword [xSI + CPUMCTX.ebx]
455 mov rcx, qword [xSI + CPUMCTX.ecx]
456 mov rdx, qword [xSI + CPUMCTX.edx]
457 mov rbp, qword [xSI + CPUMCTX.ebp]
458 mov r8, qword [xSI + CPUMCTX.r8]
459 mov r9, qword [xSI + CPUMCTX.r9]
460 mov r10, qword [xSI + CPUMCTX.r10]
461 mov r11, qword [xSI + CPUMCTX.r11]
462 mov r12, qword [xSI + CPUMCTX.r12]
463 mov r13, qword [xSI + CPUMCTX.r13]
464 mov r14, qword [xSI + CPUMCTX.r14]
465 mov r15, qword [xSI + CPUMCTX.r15]
466
467 ; resume or start?
468 cmp xDI, 0 ; fResume
469 je .vmlauch64_lauch
470
471 ;/* Restore edi & esi. */
472 mov rdi, qword [xSI + CPUMCTX.edi]
473 mov rsi, qword [xSI + CPUMCTX.esi]
474
475 vmresume
476 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
477
478.vmlauch64_lauch:
479 ;/* Restore rdi & rsi. */
480 mov rdi, qword [xSI + CPUMCTX.edi]
481 mov rsi, qword [xSI + CPUMCTX.esi]
482
483 vmlaunch
484 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
485
486ALIGNCODE(16)
487.vmlaunch64_done:
488 jc near .vmxstart64_invalid_vmxon_ptr
489 jz near .vmxstart64_start_failed
490
491 ; Restore base and limit of the IDTR & GDTR
492 lidt [xSP]
493 add xSP, xS*2
494 lgdt [xSP]
495 add xSP, xS*2
496
497 push xDI
498 mov xDI, [xSP + xS * 2] ; pCtx (*2 to skip the saved LDTR)
499
500 mov qword [xDI + CPUMCTX.eax], rax
501 mov qword [xDI + CPUMCTX.ebx], rbx
502 mov qword [xDI + CPUMCTX.ecx], rcx
503 mov qword [xDI + CPUMCTX.edx], rdx
504 mov qword [xDI + CPUMCTX.esi], rsi
505 mov qword [xDI + CPUMCTX.ebp], rbp
506 mov qword [xDI + CPUMCTX.r8], r8
507 mov qword [xDI + CPUMCTX.r9], r9
508 mov qword [xDI + CPUMCTX.r10], r10
509 mov qword [xDI + CPUMCTX.r11], r11
510 mov qword [xDI + CPUMCTX.r12], r12
511 mov qword [xDI + CPUMCTX.r13], r13
512 mov qword [xDI + CPUMCTX.r14], r14
513 mov qword [xDI + CPUMCTX.r15], r15
514
515 pop xAX ; the guest edi we pushed above
516 mov qword [xDI + CPUMCTX.edi], rax
517
518%ifdef VBOX_WITH_DR6_EXPERIMENT
519 ; Save DR6 - experiment, not safe!
520 mov xAX, dr6
521 mov [xDI + CPUMCTX.dr6], xAX
522%endif
523
524 pop xAX ; saved LDTR
525 lldt ax
526
527 pop xSI ; pCtx (needed in rsi by the macros below)
528
529 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
530 ;; @todo use the automatic load feature for MSRs
531 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
532 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
533 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
534%if 0 ; not supported on Intel CPUs
535 LOADHOSTMSREX MSR_K8_CSTAR, CPUMCTX.msrCSTAR
536%endif
537 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
538
539%ifdef VMX_USE_CACHED_VMCS_ACCESSES
540 pop xDX ; saved pCache
541
542 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
543 cmp ecx, 0 ; can't happen
544 je .no_cached_reads
545 jmp .cached_read
546
547ALIGN(16)
548.cached_read:
549 dec xCX
550 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
551 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
552 cmp xCX, 0
553 jnz .cached_read
554.no_cached_reads:
555
556 ; Save CR2 for EPT
557 mov xAX, cr2
558 mov [xDX + VMCSCACHE.cr2], xAX
559%endif
560
561 ; Restore segment registers
562 MYPOPSEGS xAX, ax
563
564 ; Restore general purpose registers
565 MYPOPAD
566
567 mov eax, VINF_SUCCESS
568
569.vmstart64_end:
570 popf
571 pop xBP
572 ret
573
574
575.vmxstart64_invalid_vmxon_ptr:
576 ; Restore base and limit of the IDTR & GDTR
577 lidt [xSP]
578 add xSP, xS*2
579 lgdt [xSP]
580 add xSP, xS*2
581
582 pop xAX ; saved LDTR
583 lldt ax
584
585 pop xSI ; pCtx (needed in rsi by the macros below)
586
587 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
588 ;; @todo use the automatic load feature for MSRs
589 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
590 LOADHOSTMSR MSR_K8_SF_MASK
591 LOADHOSTMSR MSR_K6_STAR
592%if 0 ; not supported on Intel CPUs
593 LOADHOSTMSR MSR_K8_CSTAR
594%endif
595 LOADHOSTMSR MSR_K8_LSTAR
596
597%ifdef VMX_USE_CACHED_VMCS_ACCESSES
598 add xSP, xS ; pCache
599%endif
600
601 ; Restore segment registers
602 MYPOPSEGS xAX, ax
603
604 ; Restore all general purpose host registers.
605 MYPOPAD
606 mov eax, VERR_VMX_INVALID_VMXON_PTR
607 jmp .vmstart64_end
608
609.vmxstart64_start_failed:
610 ; Restore base and limit of the IDTR & GDTR
611 lidt [xSP]
612 add xSP, xS*2
613 lgdt [xSP]
614 add xSP, xS*2
615
616 pop xAX ; saved LDTR
617 lldt ax
618
619 pop xSI ; pCtx (needed in rsi by the macros below)
620
621 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
622 ;; @todo use the automatic load feature for MSRs
623 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
624 LOADHOSTMSR MSR_K8_SF_MASK
625 LOADHOSTMSR MSR_K6_STAR
626%if 0 ; not supported on Intel CPUs
627 LOADHOSTMSR MSR_K8_CSTAR
628%endif
629 LOADHOSTMSR MSR_K8_LSTAR
630
631%ifdef VMX_USE_CACHED_VMCS_ACCESSES
632 add xSP, xS ; pCache
633%endif
634
635 ; Restore segment registers
636 MYPOPSEGS xAX, ax
637
638 ; Restore all general purpose host registers.
639 MYPOPAD
640 mov eax, VERR_VMX_UNABLE_TO_START_VM
641 jmp .vmstart64_end
642ENDPROC MY_NAME(VMXR0StartVM64)
643%endif ; RT_ARCH_AMD64
644
645
646;/**
647; * Prepares for and executes VMRUN (32 bits guests)
648; *
649; * @returns VBox status code
650; * @param HCPhysVMCB Physical address of host VMCB
651; * @param HCPhysVMCB Physical address of guest VMCB
652; * @param pCtx Guest context
653; */
654ALIGNCODE(16)
655BEGINPROC MY_NAME(SVMR0VMRun)
656%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
657 %ifdef ASM_CALL64_GCC
658 push rdx
659 push rsi
660 push rdi
661 %else
662 push r8
663 push rdx
664 push rcx
665 %endif
666 push 0
667%endif
668 push xBP
669 mov xBP, xSP
670 pushf
671
672 ;/* Manual save and restore:
673 ; * - General purpose registers except RIP, RSP, RAX
674 ; *
675 ; * Trashed:
676 ; * - CR2 (we don't care)
677 ; * - LDTR (reset to 0)
678 ; * - DRx (presumably not changed at all)
679 ; * - DR7 (reset to 0x400)
680 ; */
681
682 ;/* Save all general purpose host registers. */
683 MYPUSHAD
684
685 ;/* Save the Guest CPU context pointer. */
686 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
687 push xSI ; push for saving the state at the end
688
689 ; save host fs, gs, sysenter msr etc
690 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
691 push xAX ; save for the vmload after vmrun
692 vmsave
693
694 ; setup eax for VMLOAD
695 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
696
697 ;/* Restore Guest's general purpose registers. */
698 ;/* EAX is loaded from the VMCB by VMRUN */
699 mov ebx, [xSI + CPUMCTX.ebx]
700 mov ecx, [xSI + CPUMCTX.ecx]
701 mov edx, [xSI + CPUMCTX.edx]
702 mov edi, [xSI + CPUMCTX.edi]
703 mov ebp, [xSI + CPUMCTX.ebp]
704 mov esi, [xSI + CPUMCTX.esi]
705
706 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
707 clgi
708 sti
709
710 ; load guest fs, gs, sysenter msr etc
711 vmload
712 ; run the VM
713 vmrun
714
715 ;/* EAX is in the VMCB already; we can use it here. */
716
717 ; save guest fs, gs, sysenter msr etc
718 vmsave
719
720 ; load host fs, gs, sysenter msr etc
721 pop xAX ; pushed above
722 vmload
723
724 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
725 cli
726 stgi
727
728 pop xAX ; pCtx
729
730 mov [ss:xAX + CPUMCTX.ebx], ebx
731 mov [ss:xAX + CPUMCTX.ecx], ecx
732 mov [ss:xAX + CPUMCTX.edx], edx
733 mov [ss:xAX + CPUMCTX.esi], esi
734 mov [ss:xAX + CPUMCTX.edi], edi
735 mov [ss:xAX + CPUMCTX.ebp], ebp
736
737 ; Restore general purpose registers
738 MYPOPAD
739
740 mov eax, VINF_SUCCESS
741
742 popf
743 pop xBP
744%ifdef RT_ARCH_AMD64
745 add xSP, 4*xS
746%endif
747 ret
748ENDPROC MY_NAME(SVMR0VMRun)
749
750%ifdef RT_ARCH_AMD64
751;/**
752; * Prepares for and executes VMRUN (64 bits guests)
753; *
754; * @returns VBox status code
755; * @param HCPhysVMCB Physical address of host VMCB
756; * @param HCPhysVMCB Physical address of guest VMCB
757; * @param pCtx Guest context
758; */
759ALIGNCODE(16)
760BEGINPROC MY_NAME(SVMR0VMRun64)
761 ; fake a cdecl stack frame
762 %ifdef ASM_CALL64_GCC
763 push rdx
764 push rsi
765 push rdi
766 %else
767 push r8
768 push rdx
769 push rcx
770 %endif
771 push 0
772 push rbp
773 mov rbp, rsp
774 pushf
775
776 ;/* Manual save and restore:
777 ; * - General purpose registers except RIP, RSP, RAX
778 ; *
779 ; * Trashed:
780 ; * - CR2 (we don't care)
781 ; * - LDTR (reset to 0)
782 ; * - DRx (presumably not changed at all)
783 ; * - DR7 (reset to 0x400)
784 ; */
785
786 ;/* Save all general purpose host registers. */
787 MYPUSHAD
788
789 ;/* Save the Guest CPU context pointer. */
790 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
791 push rsi ; push for saving the state at the end
792
793 ; save host fs, gs, sysenter msr etc
794 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
795 push rax ; save for the vmload after vmrun
796 vmsave
797
798 ; setup eax for VMLOAD
799 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
800
801 ;/* Restore Guest's general purpose registers. */
802 ;/* RAX is loaded from the VMCB by VMRUN */
803 mov rbx, qword [xSI + CPUMCTX.ebx]
804 mov rcx, qword [xSI + CPUMCTX.ecx]
805 mov rdx, qword [xSI + CPUMCTX.edx]
806 mov rdi, qword [xSI + CPUMCTX.edi]
807 mov rbp, qword [xSI + CPUMCTX.ebp]
808 mov r8, qword [xSI + CPUMCTX.r8]
809 mov r9, qword [xSI + CPUMCTX.r9]
810 mov r10, qword [xSI + CPUMCTX.r10]
811 mov r11, qword [xSI + CPUMCTX.r11]
812 mov r12, qword [xSI + CPUMCTX.r12]
813 mov r13, qword [xSI + CPUMCTX.r13]
814 mov r14, qword [xSI + CPUMCTX.r14]
815 mov r15, qword [xSI + CPUMCTX.r15]
816 mov rsi, qword [xSI + CPUMCTX.esi]
817
818 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
819 clgi
820 sti
821
822 ; load guest fs, gs, sysenter msr etc
823 vmload
824 ; run the VM
825 vmrun
826
827 ;/* RAX is in the VMCB already; we can use it here. */
828
829 ; save guest fs, gs, sysenter msr etc
830 vmsave
831
832 ; load host fs, gs, sysenter msr etc
833 pop rax ; pushed above
834 vmload
835
836 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
837 cli
838 stgi
839
840 pop rax ; pCtx
841
842 mov qword [rax + CPUMCTX.ebx], rbx
843 mov qword [rax + CPUMCTX.ecx], rcx
844 mov qword [rax + CPUMCTX.edx], rdx
845 mov qword [rax + CPUMCTX.esi], rsi
846 mov qword [rax + CPUMCTX.edi], rdi
847 mov qword [rax + CPUMCTX.ebp], rbp
848 mov qword [rax + CPUMCTX.r8], r8
849 mov qword [rax + CPUMCTX.r9], r9
850 mov qword [rax + CPUMCTX.r10], r10
851 mov qword [rax + CPUMCTX.r11], r11
852 mov qword [rax + CPUMCTX.r12], r12
853 mov qword [rax + CPUMCTX.r13], r13
854 mov qword [rax + CPUMCTX.r14], r14
855 mov qword [rax + CPUMCTX.r15], r15
856
857 ; Restore general purpose registers
858 MYPOPAD
859
860 mov eax, VINF_SUCCESS
861
862 popf
863 pop rbp
864 add rsp, 4*xS
865 ret
866ENDPROC MY_NAME(SVMR0VMRun64)
867%endif ; RT_ARCH_AMD64
868
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette