VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 46814

Last change on this file since 46814 was 46099, checked in by vboxsync, 12 years ago

VMM/VMMR0: Tidying of the assembly code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.5 KB
Line 
1; $Id: LegacyandAMD64.mac 46099 2013-05-15 14:23:49Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2013 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.virtualbox.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;; Stubs for making OS/2 compile (though, not work).
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; Debug options
51;%define DEBUG_STUFF 1
52;%define STRICT_IF 1
53
54
55;*******************************************************************************
56;* Header Files *
57;*******************************************************************************
58%include "VBox/asmdefs.mac"
59%include "iprt/x86.mac"
60%include "VBox/err.mac"
61%include "VBox/apic.mac"
62
63%include "VBox/vmm/cpum.mac"
64%include "VBox/vmm/stam.mac"
65%include "VBox/vmm/vm.mac"
66%include "VBox/vmm/hm_vmx.mac"
67%include "CPUMInternal.mac"
68%include "HMInternal.mac"
69%include "VMMSwitcher.mac"
70
71
72;
73; Start the fixup records
74; We collect the fixups in the .data section as we go along
75; It is therefore VITAL that no-one is using the .data section
76; for anything else between 'Start' and 'End'.
77;
78BEGINDATA
79GLOBALNAME Fixups
80
81
82
83BEGINCODE
84GLOBALNAME Start
85
86BITS 32
87
88;;
89; The C interface.
90; @param [esp + 04h] Param 1 - VM handle
91; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
92; structure for the calling EMT.
93;
94BEGINPROC vmmR0ToRawMode
95%ifdef DEBUG_STUFF
96 COM32_S_NEWLINE
97 COM32_S_CHAR '^'
98%endif
99
100%ifdef VBOX_WITH_STATISTICS
101 ;
102 ; Switcher stats.
103 ;
104 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
105 mov edx, 0ffffffffh
106 STAM_PROFILE_ADV_START edx
107%endif
108
109 push ebp
110 mov ebp, [esp + 12] ; CPUMCPU offset
111
112 ; turn off interrupts
113 pushf
114 cli
115
116 ;
117 ; Call worker.
118 ;
119 FIXUP FIX_HC_CPUM_OFF, 1, 0
120 mov edx, 0ffffffffh
121 push cs ; allow for far return and restore cs correctly.
122 call NAME(vmmR0ToRawModeAsm)
123
124%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
125 CPUM_FROM_CPUMCPU(edx)
126 ; Restore blocked Local APIC NMI vectors
127 mov ecx, [edx + CPUM.fApicDisVectors]
128 mov edx, [edx + CPUM.pvApicBase]
129 shr ecx, 1
130 jnc gth_nolint0
131 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
132gth_nolint0:
133 shr ecx, 1
134 jnc gth_nolint1
135 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
136gth_nolint1:
137 shr ecx, 1
138 jnc gth_nopc
139 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
140gth_nopc:
141 shr ecx, 1
142 jnc gth_notherm
143 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
144gth_notherm:
145%endif
146
147 ; restore original flags
148 popf
149 pop ebp
150
151%ifdef VBOX_WITH_STATISTICS
152 ;
153 ; Switcher stats.
154 ;
155 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
156 mov edx, 0ffffffffh
157 STAM_PROFILE_ADV_STOP edx
158%endif
159
160 ret
161
162ENDPROC vmmR0ToRawMode
163
164; *****************************************************************************
165; vmmR0ToRawModeAsm
166;
167; Phase one of the switch from host to guest context (host MMU context)
168;
169; INPUT:
170; - edx virtual address of CPUM structure (valid in host context)
171; - ebp offset of the CPUMCPU structure relative to CPUM.
172;
173; USES/DESTROYS:
174; - eax, ecx, edx, esi
175;
176; ASSUMPTION:
177; - current CS and DS selectors are wide open
178;
179; *****************************************************************************
180ALIGNCODE(16)
181BEGINPROC vmmR0ToRawModeAsm
182 ;;
183 ;; Save CPU host context
184 ;; Skip eax, edx and ecx as these are not preserved over calls.
185 ;;
186 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
187%ifdef VBOX_WITH_CRASHDUMP_MAGIC
188 ; phys address of scratch page
189 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
190 mov cr2, eax
191
192 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
193%endif
194
195 ; general registers.
196 mov [edx + CPUMCPU.Host.ebx], ebx
197 mov [edx + CPUMCPU.Host.edi], edi
198 mov [edx + CPUMCPU.Host.esi], esi
199 mov [edx + CPUMCPU.Host.esp], esp
200 mov [edx + CPUMCPU.Host.ebp], ebp
201 ; selectors.
202 mov [edx + CPUMCPU.Host.ds], ds
203 mov [edx + CPUMCPU.Host.es], es
204 mov [edx + CPUMCPU.Host.fs], fs
205 mov [edx + CPUMCPU.Host.gs], gs
206 mov [edx + CPUMCPU.Host.ss], ss
207 ; special registers.
208 DEBUG32_S_CHAR('s')
209 DEBUG32_S_CHAR(';')
210 sldt [edx + CPUMCPU.Host.ldtr]
211 sidt [edx + CPUMCPU.Host.idtr]
212 sgdt [edx + CPUMCPU.Host.gdtr]
213 str [edx + CPUMCPU.Host.tr]
214
215%ifdef VBOX_WITH_CRASHDUMP_MAGIC
216 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
217%endif
218
219%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
220 DEBUG32_S_CHAR('f')
221 DEBUG32_S_CHAR(';')
222 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
223 mov ebx, [edx + CPUM.pvApicBase]
224 or ebx, ebx
225 jz htg_noapic
226 mov eax, [ebx + APIC_REG_LVT_LINT0]
227 mov ecx, eax
228 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
229 cmp ecx, APIC_REG_LVT_MODE_NMI
230 jne htg_nolint0
231 or edi, 0x01
232 or eax, APIC_REG_LVT_MASKED
233 mov [ebx + APIC_REG_LVT_LINT0], eax
234 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
235htg_nolint0:
236 mov eax, [ebx + APIC_REG_LVT_LINT1]
237 mov ecx, eax
238 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
239 cmp ecx, APIC_REG_LVT_MODE_NMI
240 jne htg_nolint1
241 or edi, 0x02
242 or eax, APIC_REG_LVT_MASKED
243 mov [ebx + APIC_REG_LVT_LINT1], eax
244 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
245htg_nolint1:
246 mov eax, [ebx + APIC_REG_LVT_PC]
247 mov ecx, eax
248 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
249 cmp ecx, APIC_REG_LVT_MODE_NMI
250 jne htg_nopc
251 or edi, 0x04
252 or eax, APIC_REG_LVT_MASKED
253 mov [ebx + APIC_REG_LVT_PC], eax
254 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
255htg_nopc:
256 mov eax, [ebx + APIC_REG_VERSION]
257 shr eax, 16
258 cmp al, 5
259 jb htg_notherm
260 mov eax, [ebx + APIC_REG_LVT_THMR]
261 mov ecx, eax
262 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
263 cmp ecx, APIC_REG_LVT_MODE_NMI
264 jne htg_notherm
265 or edi, 0x08
266 or eax, APIC_REG_LVT_MASKED
267 mov [ebx + APIC_REG_LVT_THMR], eax
268 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
269htg_notherm:
270 mov [edx + CPUM.fApicDisVectors], edi
271htg_noapic:
272 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
273%endif
274
275 ; control registers.
276 mov eax, cr0
277 mov [edx + CPUMCPU.Host.cr0], eax
278 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
279 mov eax, cr3
280 mov [edx + CPUMCPU.Host.cr3], eax
281 mov eax, cr4
282 mov [edx + CPUMCPU.Host.cr4], eax
283 DEBUG32_S_CHAR('c')
284 DEBUG32_S_CHAR(';')
285
286 ; save the host EFER msr
287 mov ebx, edx
288 mov ecx, MSR_K6_EFER
289 rdmsr
290 mov [ebx + CPUMCPU.Host.efer], eax
291 mov [ebx + CPUMCPU.Host.efer + 4], edx
292 mov edx, ebx
293 DEBUG32_S_CHAR('e')
294 DEBUG32_S_CHAR(';')
295
296%ifdef VBOX_WITH_CRASHDUMP_MAGIC
297 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
298%endif
299
300 ; Load new gdt so we can do a far jump after going into 64 bits mode
301 lgdt [edx + CPUMCPU.Hyper.gdtr]
302
303 DEBUG32_S_CHAR('g')
304 DEBUG32_S_CHAR('!')
305%ifdef VBOX_WITH_CRASHDUMP_MAGIC
306 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
307%endif
308
309 ;;
310 ;; Load Intermediate memory context.
311 ;;
312 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
313 mov eax, 0ffffffffh
314 mov cr3, eax
315 DEBUG32_CHAR('?')
316
317 ;;
318 ;; Jump to identity mapped location
319 ;;
320 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
321 jmp near NAME(IDEnterTarget)
322
323
324 ; We're now on identity mapped pages!
325ALIGNCODE(16)
326GLOBALNAME IDEnterTarget
327 DEBUG32_CHAR('1')
328
329 ; 1. Disable paging.
330 mov ebx, cr0
331 and ebx, ~X86_CR0_PG
332 mov cr0, ebx
333 DEBUG32_CHAR('2')
334
335%ifdef VBOX_WITH_CRASHDUMP_MAGIC
336 mov eax, cr2
337 mov dword [eax], 3
338%endif
339
340 ; 2. Enable PAE.
341 mov ecx, cr4
342 or ecx, X86_CR4_PAE
343 mov cr4, ecx
344
345 ; 3. Load long mode intermediate CR3.
346 FIXUP FIX_INTER_AMD64_CR3, 1
347 mov ecx, 0ffffffffh
348 mov cr3, ecx
349 DEBUG32_CHAR('3')
350
351%ifdef VBOX_WITH_CRASHDUMP_MAGIC
352 mov eax, cr2
353 mov dword [eax], 4
354%endif
355
356 ; 4. Enable long mode.
357 mov esi, edx
358 mov ecx, MSR_K6_EFER
359 rdmsr
360 FIXUP FIX_EFER_OR_MASK, 1
361 or eax, 0ffffffffh
362 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
363 wrmsr
364 mov edx, esi
365 DEBUG32_CHAR('4')
366
367%ifdef VBOX_WITH_CRASHDUMP_MAGIC
368 mov eax, cr2
369 mov dword [eax], 5
370%endif
371
372 ; 5. Enable paging.
373 or ebx, X86_CR0_PG
374 ; Disable ring 0 write protection too
375 and ebx, ~X86_CR0_WRITE_PROTECT
376 mov cr0, ebx
377 DEBUG32_CHAR('5')
378
379 ; Jump from compatibility mode to 64-bit mode.
380 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
381 jmp 0ffffh:0fffffffeh
382
383 ;
384 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
385BITS 64
386ALIGNCODE(16)
387NAME(IDEnter64Mode):
388 DEBUG64_CHAR('6')
389 jmp [NAME(pICEnterTarget) wrt rip]
390
391; 64-bit jump target
392NAME(pICEnterTarget):
393FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
394dq 0ffffffffffffffffh
395
396; 64-bit pCpum address.
397NAME(pCpumIC):
398FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
399dq 0ffffffffffffffffh
400
401%ifdef VBOX_WITH_CRASHDUMP_MAGIC
402NAME(pMarker):
403db 'Switch_marker'
404%endif
405
406 ;
407 ; When we arrive here we're in 64 bits mode in the intermediate context
408 ;
409ALIGNCODE(16)
410GLOBALNAME ICEnterTarget
411 ; Load CPUM pointer into rdx
412 mov rdx, [NAME(pCpumIC) wrt rip]
413 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
414
415 mov rax, cs
416 mov ds, rax
417 mov es, rax
418
419 ; Invalidate fs & gs
420 mov rax, 0
421 mov fs, rax
422 mov gs, rax
423
424%ifdef VBOX_WITH_CRASHDUMP_MAGIC
425 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
426%endif
427
428 ; Setup stack.
429 DEBUG64_CHAR('7')
430 mov rsp, 0
431 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
432 mov ss, ax
433 mov esp, [rdx + CPUMCPU.Hyper.esp]
434
435%ifdef VBOX_WITH_CRASHDUMP_MAGIC
436 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
437%endif
438
439
440 ; load the hypervisor function address
441 mov r9, [rdx + CPUMCPU.Hyper.eip]
442 DEBUG64_S_CHAR('8')
443
444 ; Check if we need to restore the guest FPU state
445 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
446 test esi, CPUM_SYNC_FPU_STATE
447 jz near gth_fpu_no
448
449%ifdef VBOX_WITH_CRASHDUMP_MAGIC
450 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
451%endif
452
453 mov rax, cr0
454 mov rcx, rax ; save old CR0
455 and rax, ~(X86_CR0_TS | X86_CR0_EM)
456 mov cr0, rax
457 fxrstor [rdx + CPUMCPU.Guest.fpu]
458 mov cr0, rcx ; and restore old CR0 again
459
460 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
461
462gth_fpu_no:
463 ; Check if we need to restore the guest debug state
464 test esi, CPUM_SYNC_DEBUG_STATE
465 jz near gth_debug_no
466
467%ifdef VBOX_WITH_CRASHDUMP_MAGIC
468 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
469%endif
470
471 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
472 mov dr0, rax
473 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
474 mov dr1, rax
475 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
476 mov dr2, rax
477 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
478 mov dr3, rax
479 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
480 mov dr6, rax ; not required for AMD-V
481
482 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
483
484gth_debug_no:
485
486%ifdef VBOX_WITH_CRASHDUMP_MAGIC
487 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
488%endif
489
490 ; parameter for all helper functions (pCtx)
491 DEBUG64_CHAR('9')
492 lea rsi, [rdx + CPUMCPU.Guest.fpu]
493 lea rax, [gth_return wrt rip]
494 push rax ; return address
495
496 cmp r9d, HM64ON32OP_VMXRCStartVM64
497 jz NAME(VMXRCStartVM64)
498 cmp r9d, HM64ON32OP_SVMRCVMRun64
499 jz NAME(SVMRCVMRun64)
500 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
501 jz NAME(HMRCSaveGuestFPU64)
502 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
503 jz NAME(HMRCSaveGuestDebug64)
504 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
505 jz NAME(HMRCTestSwitcher64)
506 mov eax, VERR_HM_INVALID_HM64ON32OP
507gth_return:
508 DEBUG64_CHAR('r')
509
510 ; Load CPUM pointer into rdx
511 mov rdx, [NAME(pCpumIC) wrt rip]
512 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
513
514%ifdef VBOX_WITH_CRASHDUMP_MAGIC
515 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
516%endif
517
518 ; Save the return code
519 mov dword [rdx + CPUMCPU.u32RetCode], eax
520
521 ; now let's switch back
522 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
523
524ENDPROC vmmR0ToRawModeAsm
525
526
527
528
529;
530;
531; HM code (used to be HMRCA.asm at one point).
532; HM code (used to be HMRCA.asm at one point).
533; HM code (used to be HMRCA.asm at one point).
534;
535;
536
537
538
539; Load the corresponding guest MSR (trashes rdx & rcx)
540%macro LOADGUESTMSR 2
541 mov rcx, %1
542 mov edx, dword [rsi + %2 + 4]
543 mov eax, dword [rsi + %2]
544 wrmsr
545%endmacro
546
547; Save a guest MSR (trashes rdx & rcx)
548; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
549%macro SAVEGUESTMSR 2
550 mov rcx, %1
551 rdmsr
552 mov dword [rsi + %2], eax
553 mov dword [rsi + %2 + 4], edx
554%endmacro
555
556;; @def MYPUSHSEGS
557; Macro saving all segment registers on the stack.
558; @param 1 full width register name
559%macro MYPUSHSEGS 1
560 mov %1, es
561 push %1
562 mov %1, ds
563 push %1
564%endmacro
565
566;; @def MYPOPSEGS
567; Macro restoring all segment registers on the stack
568; @param 1 full width register name
569%macro MYPOPSEGS 1
570 pop %1
571 mov ds, %1
572 pop %1
573 mov es, %1
574%endmacro
575
576
577;/**
578; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
579; *
580; * @returns VBox status code
581; * @param HCPhysCpuPage VMXON physical address [rsp+8]
582; * @param HCPhysVmcs VMCS physical address [rsp+16]
583; * @param pCache VMCS cache [rsp+24]
584; * @param pCtx Guest context (rsi)
585; */
586BEGINPROC VMXRCStartVM64
587 push rbp
588 mov rbp, rsp
589
590 ; Make sure VT-x instructions are allowed.
591 mov rax, cr4
592 or rax, X86_CR4_VMXE
593 mov cr4, rax
594
595 ; Enter VMX Root Mode.
596 vmxon [rbp + 8 + 8]
597 jnc .vmxon_success
598 mov rax, VERR_VMX_INVALID_VMXON_PTR
599 jmp .vmstart64_vmxon_failed
600
601.vmxon_success:
602 jnz .vmxon_success2
603 mov rax, VERR_VMX_VMXON_FAILED
604 jmp .vmstart64_vmxon_failed
605
606.vmxon_success2:
607 ; Activate the VMCS pointer
608 vmptrld [rbp + 16 + 8]
609 jnc .vmptrld_success
610 mov rax, VERR_VMX_INVALID_VMCS_PTR
611 jmp .vmstart64_vmxoff_end
612
613.vmptrld_success:
614 jnz .vmptrld_success2
615 mov rax, VERR_VMX_VMPTRLD_FAILED
616 jmp .vmstart64_vmxoff_end
617
618.vmptrld_success2:
619
620 ; Save the VMCS pointer on the stack
621 push qword [rbp + 16 + 8];
622
623 ; Save segment registers.
624 MYPUSHSEGS rax
625
626%ifdef VMX_USE_CACHED_VMCS_ACCESSES
627 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
628 mov rbx, [rbp + 24 + 8] ; pCache
629
630 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
631 mov qword [rbx + VMCSCACHE.uPos], 2
632 %endif
633
634 %ifdef DEBUG
635 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
636 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
637 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
638 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
639 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
640 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
641 %endif
642
643 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
644 cmp ecx, 0
645 je .no_cached_writes
646 mov rdx, rcx
647 mov rcx, 0
648 jmp .cached_write
649
650ALIGN(16)
651.cached_write:
652 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
653 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
654 inc rcx
655 cmp rcx, rdx
656 jl .cached_write
657
658 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
659.no_cached_writes:
660
661 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
662 mov qword [rbx + VMCSCACHE.uPos], 3
663 %endif
664 ; Save the pCache pointer.
665 push rbx
666%endif
667
668 ; Save the host state that's relevant in the temporary 64-bit mode.
669 mov rdx, cr0
670 mov eax, VMX_VMCS_HOST_CR0
671 vmwrite rax, rdx
672
673 mov rdx, cr3
674 mov eax, VMX_VMCS_HOST_CR3
675 vmwrite rax, rdx
676
677 mov rdx, cr4
678 mov eax, VMX_VMCS_HOST_CR4
679 vmwrite rax, rdx
680
681 mov rdx, cs
682 mov eax, VMX_VMCS_HOST_FIELD_CS
683 vmwrite rax, rdx
684
685 mov rdx, ss
686 mov eax, VMX_VMCS_HOST_FIELD_SS
687 vmwrite rax, rdx
688
689 sub rsp, 8*2
690 sgdt [rsp]
691 mov eax, VMX_VMCS_HOST_GDTR_BASE
692 vmwrite rax, [rsp+2]
693 add rsp, 8*2
694
695%ifdef VBOX_WITH_CRASHDUMP_MAGIC
696 mov qword [rbx + VMCSCACHE.uPos], 4
697%endif
698
699 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
700
701 ; First we have to save some final CPU context registers.
702 lea rdx, [.vmlaunch64_done wrt rip]
703 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
704 vmwrite rax, rdx
705 ; Note: assumes success!
706
707 ; Manual save and restore:
708 ; - General purpose registers except RIP, RSP
709 ;
710 ; Trashed:
711 ; - CR2 (we don't care)
712 ; - LDTR (reset to 0)
713 ; - DRx (presumably not changed at all)
714 ; - DR7 (reset to 0x400)
715 ; - EFLAGS (reset to RT_BIT(1); not relevant)
716
717%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
718 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs.
719 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
720 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
721 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
722 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
723%else
724%ifdef VBOX_WITH_OLD_VTX_CODE
725 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
726 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
727%endif
728%endif
729
730%ifdef VBOX_WITH_CRASHDUMP_MAGIC
731 mov qword [rbx + VMCSCACHE.uPos], 5
732%endif
733
734 ; Save the pCtx pointer
735 push rsi
736
737 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
738 mov rbx, qword [rsi + CPUMCTX.cr2]
739 mov rdx, cr2
740 cmp rdx, rbx
741 je .skipcr2write64
742 mov cr2, rbx
743
744.skipcr2write64:
745 mov eax, VMX_VMCS_HOST_RSP
746 vmwrite rax, rsp
747 ; Note: assumes success!
748 ; Don't mess with ESP anymore!!!
749
750 ; Save Guest's general purpose registers.
751 mov rax, qword [rsi + CPUMCTX.eax]
752 mov rbx, qword [rsi + CPUMCTX.ebx]
753 mov rcx, qword [rsi + CPUMCTX.ecx]
754 mov rdx, qword [rsi + CPUMCTX.edx]
755 mov rbp, qword [rsi + CPUMCTX.ebp]
756 mov r8, qword [rsi + CPUMCTX.r8]
757 mov r9, qword [rsi + CPUMCTX.r9]
758 mov r10, qword [rsi + CPUMCTX.r10]
759 mov r11, qword [rsi + CPUMCTX.r11]
760 mov r12, qword [rsi + CPUMCTX.r12]
761 mov r13, qword [rsi + CPUMCTX.r13]
762 mov r14, qword [rsi + CPUMCTX.r14]
763 mov r15, qword [rsi + CPUMCTX.r15]
764
765 ; Save rdi & rsi.
766 mov rdi, qword [rsi + CPUMCTX.edi]
767 mov rsi, qword [rsi + CPUMCTX.esi]
768
769 vmlaunch
770 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
771
772ALIGNCODE(16)
773.vmlaunch64_done:
774 jc near .vmstart64_invalid_vmcs_ptr
775 jz near .vmstart64_start_failed
776
777 push rdi
778 mov rdi, [rsp + 8] ; pCtx
779
780 mov qword [rdi + CPUMCTX.eax], rax
781 mov qword [rdi + CPUMCTX.ebx], rbx
782 mov qword [rdi + CPUMCTX.ecx], rcx
783 mov qword [rdi + CPUMCTX.edx], rdx
784 mov qword [rdi + CPUMCTX.esi], rsi
785 mov qword [rdi + CPUMCTX.ebp], rbp
786 mov qword [rdi + CPUMCTX.r8], r8
787 mov qword [rdi + CPUMCTX.r9], r9
788 mov qword [rdi + CPUMCTX.r10], r10
789 mov qword [rdi + CPUMCTX.r11], r11
790 mov qword [rdi + CPUMCTX.r12], r12
791 mov qword [rdi + CPUMCTX.r13], r13
792 mov qword [rdi + CPUMCTX.r14], r14
793 mov qword [rdi + CPUMCTX.r15], r15
794%ifndef VBOX_WITH_OLD_VTX_CODE
795 mov rax, cr2
796 mov qword [rdi + CPUMCTX.cr2], rax
797%endif
798
799 pop rax ; The guest edi we pushed above
800 mov qword [rdi + CPUMCTX.edi], rax
801
802 pop rsi ; pCtx (needed in rsi by the macros below)
803
804%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
805 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
806 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
807 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
808 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
809%else
810%ifdef VBOX_WITH_OLD_VTX_CODE
811 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
812 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
813%endif
814%endif
815
816%ifdef VMX_USE_CACHED_VMCS_ACCESSES
817 pop rdi ; Saved pCache
818
819 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
820 mov dword [rdi + VMCSCACHE.uPos], 7
821 %endif
822 %ifdef DEBUG
823 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
824 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
825 mov rax, cr8
826 mov [rdi + VMCSCACHE.TestOut.cr8], rax
827 %endif
828
829 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
830 cmp ecx, 0 ; Can't happen
831 je .no_cached_reads
832 jmp .cached_read
833
834ALIGN(16)
835.cached_read:
836 dec rcx
837 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
838 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
839 cmp rcx, 0
840 jnz .cached_read
841.no_cached_reads:
842
843 %ifdef VBOX_WITH_OLD_VTX_CODE
844 ; Restore CR2 into VMCS-cache field (for EPT).
845 mov rax, cr2
846 mov [rdi + VMCSCACHE.cr2], rax
847 %endif
848 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
849 mov dword [rdi + VMCSCACHE.uPos], 8
850 %endif
851%endif
852
853 ; Restore segment registers.
854 MYPOPSEGS rax
855
856 mov eax, VINF_SUCCESS
857
858%ifdef VBOX_WITH_CRASHDUMP_MAGIC
859 mov dword [rdi + VMCSCACHE.uPos], 9
860%endif
861.vmstart64_end:
862
863%ifdef VMX_USE_CACHED_VMCS_ACCESSES
864 %ifdef DEBUG
865 mov rdx, [rsp] ; HCPhysVmcs
866 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
867 %endif
868%endif
869
870 ; Write back the data and disable the VMCS.
871 vmclear qword [rsp] ; Pushed pVMCS
872 add rsp, 8
873
874.vmstart64_vmxoff_end:
875 ; Disable VMX root mode.
876 vmxoff
877.vmstart64_vmxon_failed:
878%ifdef VMX_USE_CACHED_VMCS_ACCESSES
879 %ifdef DEBUG
880 cmp eax, VINF_SUCCESS
881 jne .skip_flags_save
882
883 pushf
884 pop rdx
885 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
886 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
887 mov dword [rdi + VMCSCACHE.uPos], 12
888 %endif
889.skip_flags_save:
890 %endif
891%endif
892 pop rbp
893 ret
894
895
896.vmstart64_invalid_vmcs_ptr:
897 pop rsi ; pCtx (needed in rsi by the macros below)
898
899%ifdef VMX_USE_CACHED_VMCS_ACCESSES
900 pop rdi ; pCache
901 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
902 mov dword [rdi + VMCSCACHE.uPos], 10
903 %endif
904
905 %ifdef DEBUG
906 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
907 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
908 %endif
909%endif
910
911 ; Restore segment registers.
912 MYPOPSEGS rax
913
914 ; Restore all general purpose host registers.
915 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
916 jmp .vmstart64_end
917
918.vmstart64_start_failed:
919 pop rsi ; pCtx (needed in rsi by the macros below)
920
921%ifdef VMX_USE_CACHED_VMCS_ACCESSES
922 pop rdi ; pCache
923
924 %ifdef DEBUG
925 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
926 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
927 %endif
928 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
929 mov dword [rdi + VMCSCACHE.uPos], 11
930 %endif
931%endif
932
933 ; Restore segment registers.
934 MYPOPSEGS rax
935
936 ; Restore all general purpose host registers.
937 mov eax, VERR_VMX_UNABLE_TO_START_VM
938 jmp .vmstart64_end
939ENDPROC VMXRCStartVM64
940
941
942;/**
943; * Prepares for and executes VMRUN (64 bits guests)
944; *
945; * @returns VBox status code
946; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
947; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
948; * @param pCtx Guest context (rsi)
949; */
950BEGINPROC SVMRCVMRun64
951 push rbp
952 mov rbp, rsp
953 pushf
954
955 ; Manual save and restore:
956 ; - General purpose registers except RIP, RSP, RAX
957 ;
958 ; Trashed:
959 ; - CR2 (we don't care)
960 ; - LDTR (reset to 0)
961 ; - DRx (presumably not changed at all)
962 ; - DR7 (reset to 0x400)
963
964 ; Save the Guest CPU context pointer.
965 push rsi ; Push for saving the state at the end
966
967 ; Save host fs, gs, sysenter msr etc
968 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
969 push rax ; Save for the vmload after vmrun
970 vmsave
971
972 ; Setup eax for VMLOAD
973 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
974
975 ; Restore Guest's general purpose registers.
976 ; rax is loaded from the VMCB by VMRUN.
977 mov rbx, qword [rsi + CPUMCTX.ebx]
978 mov rcx, qword [rsi + CPUMCTX.ecx]
979 mov rdx, qword [rsi + CPUMCTX.edx]
980 mov rdi, qword [rsi + CPUMCTX.edi]
981 mov rbp, qword [rsi + CPUMCTX.ebp]
982 mov r8, qword [rsi + CPUMCTX.r8]
983 mov r9, qword [rsi + CPUMCTX.r9]
984 mov r10, qword [rsi + CPUMCTX.r10]
985 mov r11, qword [rsi + CPUMCTX.r11]
986 mov r12, qword [rsi + CPUMCTX.r12]
987 mov r13, qword [rsi + CPUMCTX.r13]
988 mov r14, qword [rsi + CPUMCTX.r14]
989 mov r15, qword [rsi + CPUMCTX.r15]
990 mov rsi, qword [rsi + CPUMCTX.esi]
991
992 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
993 clgi
994 sti
995
996 ; Load guest fs, gs, sysenter msr etc
997 vmload
998 ; Run the VM
999 vmrun
1000
1001 ; rax is in the VMCB already; we can use it here.
1002
1003 ; Save guest fs, gs, sysenter msr etc.
1004 vmsave
1005
1006 ; Load host fs, gs, sysenter msr etc.
1007 pop rax ; Pushed above
1008 vmload
1009
1010 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1011 cli
1012 stgi
1013
1014 pop rax ; pCtx
1015
1016 mov qword [rax + CPUMCTX.ebx], rbx
1017 mov qword [rax + CPUMCTX.ecx], rcx
1018 mov qword [rax + CPUMCTX.edx], rdx
1019 mov qword [rax + CPUMCTX.esi], rsi
1020 mov qword [rax + CPUMCTX.edi], rdi
1021 mov qword [rax + CPUMCTX.ebp], rbp
1022 mov qword [rax + CPUMCTX.r8], r8
1023 mov qword [rax + CPUMCTX.r9], r9
1024 mov qword [rax + CPUMCTX.r10], r10
1025 mov qword [rax + CPUMCTX.r11], r11
1026 mov qword [rax + CPUMCTX.r12], r12
1027 mov qword [rax + CPUMCTX.r13], r13
1028 mov qword [rax + CPUMCTX.r14], r14
1029 mov qword [rax + CPUMCTX.r15], r15
1030
1031 mov eax, VINF_SUCCESS
1032
1033 popf
1034 pop rbp
1035 ret
1036ENDPROC SVMRCVMRun64
1037
1038;/**
1039; * Saves the guest FPU context
1040; *
1041; * @returns VBox status code
1042; * @param pCtx Guest context [rsi]
1043; */
1044BEGINPROC HMRCSaveGuestFPU64
1045 mov rax, cr0
1046 mov rcx, rax ; save old CR0
1047 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1048 mov cr0, rax
1049
1050 fxsave [rsi + CPUMCTX.fpu]
1051
1052 mov cr0, rcx ; and restore old CR0 again
1053
1054 mov eax, VINF_SUCCESS
1055 ret
1056ENDPROC HMRCSaveGuestFPU64
1057
1058;/**
1059; * Saves the guest debug context (DR0-3, DR6)
1060; *
1061; * @returns VBox status code
1062; * @param pCtx Guest context [rsi]
1063; */
1064BEGINPROC HMRCSaveGuestDebug64
1065 mov rax, dr0
1066 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1067 mov rax, dr1
1068 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1069 mov rax, dr2
1070 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1071 mov rax, dr3
1072 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1073 mov rax, dr6
1074 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1075 mov eax, VINF_SUCCESS
1076 ret
1077ENDPROC HMRCSaveGuestDebug64
1078
1079;/**
1080; * Dummy callback handler
1081; *
1082; * @returns VBox status code
1083; * @param param1 Parameter 1 [rsp+8]
1084; * @param param2 Parameter 2 [rsp+12]
1085; * @param param3 Parameter 3 [rsp+16]
1086; * @param param4 Parameter 4 [rsp+20]
1087; * @param param5 Parameter 5 [rsp+24]
1088; * @param pCtx Guest context [rsi]
1089; */
1090BEGINPROC HMRCTestSwitcher64
1091 mov eax, [rsp+8]
1092 ret
1093ENDPROC HMRCTestSwitcher64
1094
1095
1096
1097
1098;
1099;
1100; Back to switcher code.
1101; Back to switcher code.
1102; Back to switcher code.
1103;
1104;
1105
1106
1107
1108;;
1109; Trampoline for doing a call when starting the hyper visor execution.
1110;
1111; Push any arguments to the routine.
1112; Push the argument frame size (cArg * 4).
1113; Push the call target (_cdecl convention).
1114; Push the address of this routine.
1115;
1116;
1117BITS 64
1118ALIGNCODE(16)
1119BEGINPROC vmmRCCallTrampoline
1120%ifdef DEBUG_STUFF
1121 COM64_S_CHAR 'c'
1122 COM64_S_CHAR 't'
1123 COM64_S_CHAR '!'
1124%endif
1125 int3
1126ENDPROC vmmRCCallTrampoline
1127
1128
1129;;
1130; The C interface.
1131;
1132BITS 64
1133ALIGNCODE(16)
1134BEGINPROC vmmRCToHost
1135%ifdef DEBUG_STUFF
1136 push rsi
1137 COM_NEWLINE
1138 COM_CHAR 'b'
1139 COM_CHAR 'a'
1140 COM_CHAR 'c'
1141 COM_CHAR 'k'
1142 COM_CHAR '!'
1143 COM_NEWLINE
1144 pop rsi
1145%endif
1146 int3
1147ENDPROC vmmRCToHost
1148
1149;;
1150; vmmRCToHostAsm
1151;
1152; This is an alternative entry point which we'll be using
1153; when the we have saved the guest state already or we haven't
1154; been messing with the guest at all.
1155;
1156; @param eax Return code.
1157; @uses eax, edx, ecx (or it may use them in the future)
1158;
1159BITS 64
1160ALIGNCODE(16)
1161BEGINPROC vmmRCToHostAsm
1162NAME(vmmRCToHostAsmNoReturn):
1163 ;; We're still in the intermediate memory context!
1164
1165 ;;
1166 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1167 ;;
1168 jmp far [NAME(fpIDEnterTarget) wrt rip]
1169
1170; 16:32 Pointer to IDEnterTarget.
1171NAME(fpIDEnterTarget):
1172 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1173dd 0
1174 FIXUP FIX_HYPER_CS, 0
1175dd 0
1176
1177 ; We're now on identity mapped pages!
1178ALIGNCODE(16)
1179GLOBALNAME IDExitTarget
1180BITS 32
1181 DEBUG32_CHAR('1')
1182
1183 ; 1. Deactivate long mode by turning off paging.
1184 mov ebx, cr0
1185 and ebx, ~X86_CR0_PG
1186 mov cr0, ebx
1187 DEBUG32_CHAR('2')
1188
1189 ; 2. Load intermediate page table.
1190 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1191 mov edx, 0ffffffffh
1192 mov cr3, edx
1193 DEBUG32_CHAR('3')
1194
1195 ; 3. Disable long mode.
1196 mov ecx, MSR_K6_EFER
1197 rdmsr
1198 DEBUG32_CHAR('5')
1199 and eax, ~(MSR_K6_EFER_LME)
1200 wrmsr
1201 DEBUG32_CHAR('6')
1202
1203%ifndef NEED_PAE_ON_HOST
1204 ; 3b. Disable PAE.
1205 mov eax, cr4
1206 and eax, ~X86_CR4_PAE
1207 mov cr4, eax
1208 DEBUG32_CHAR('7')
1209%endif
1210
1211 ; 4. Enable paging.
1212 or ebx, X86_CR0_PG
1213 mov cr0, ebx
1214 jmp short just_a_jump
1215just_a_jump:
1216 DEBUG32_CHAR('8')
1217
1218 ;;
1219 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1220 ;;
1221 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1222 jmp near NAME(ICExitTarget)
1223
1224 ;;
1225 ;; When we arrive at this label we're at the
1226 ;; intermediate mapping of the switching code.
1227 ;;
1228BITS 32
1229ALIGNCODE(16)
1230GLOBALNAME ICExitTarget
1231 DEBUG32_CHAR('8')
1232
1233 ; load the hypervisor data selector into ds & es
1234 FIXUP FIX_HYPER_DS, 1
1235 mov eax, 0ffffh
1236 mov ds, eax
1237 mov es, eax
1238
1239 FIXUP FIX_GC_CPUM_OFF, 1, 0
1240 mov edx, 0ffffffffh
1241 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1242 mov esi, [edx + CPUMCPU.Host.cr3]
1243 mov cr3, esi
1244
1245 ;; now we're in host memory context, let's restore regs
1246 FIXUP FIX_HC_CPUM_OFF, 1, 0
1247 mov edx, 0ffffffffh
1248 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1249
1250 ; restore the host EFER
1251 mov ebx, edx
1252 mov ecx, MSR_K6_EFER
1253 mov eax, [ebx + CPUMCPU.Host.efer]
1254 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1255 wrmsr
1256 mov edx, ebx
1257
1258 ; activate host gdt and idt
1259 lgdt [edx + CPUMCPU.Host.gdtr]
1260 DEBUG32_CHAR('0')
1261 lidt [edx + CPUMCPU.Host.idtr]
1262 DEBUG32_CHAR('1')
1263
1264 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1265 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1266 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1267 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1268 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1269 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1270 ltr word [edx + CPUMCPU.Host.tr]
1271
1272 ; activate ldt
1273 DEBUG32_CHAR('2')
1274 lldt [edx + CPUMCPU.Host.ldtr]
1275
1276 ; Restore segment registers
1277 mov eax, [edx + CPUMCPU.Host.ds]
1278 mov ds, eax
1279 mov eax, [edx + CPUMCPU.Host.es]
1280 mov es, eax
1281 mov eax, [edx + CPUMCPU.Host.fs]
1282 mov fs, eax
1283 mov eax, [edx + CPUMCPU.Host.gs]
1284 mov gs, eax
1285 ; restore stack
1286 lss esp, [edx + CPUMCPU.Host.esp]
1287
1288 ; Control registers.
1289 mov ecx, [edx + CPUMCPU.Host.cr4]
1290 mov cr4, ecx
1291 mov ecx, [edx + CPUMCPU.Host.cr0]
1292 mov cr0, ecx
1293 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1294 ;mov cr2, ecx
1295
1296 ; restore general registers.
1297 mov edi, [edx + CPUMCPU.Host.edi]
1298 mov esi, [edx + CPUMCPU.Host.esi]
1299 mov ebx, [edx + CPUMCPU.Host.ebx]
1300 mov ebp, [edx + CPUMCPU.Host.ebp]
1301
1302 ; store the return code in eax
1303 mov eax, [edx + CPUMCPU.u32RetCode]
1304 retf
1305ENDPROC vmmRCToHostAsm
1306
1307
1308GLOBALNAME End
1309;
1310; The description string (in the text section).
1311;
1312NAME(Description):
1313 db SWITCHER_DESCRIPTION
1314 db 0
1315
1316extern NAME(Relocate)
1317
1318;
1319; End the fixup records.
1320;
1321BEGINDATA
1322 db FIX_THE_END ; final entry.
1323GLOBALNAME FixupsEnd
1324
1325;;
1326; The switcher definition structure.
1327ALIGNDATA(16)
1328GLOBALNAME Def
1329 istruc VMMSWITCHERDEF
1330 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1331 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1332 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1333 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1334 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1335 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1336 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1337 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1338 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1339 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1340 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1341 ; disasm help
1342 at VMMSWITCHERDEF.offHCCode0, dd 0
1343 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1344 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1345 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1346 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1347 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1348 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1349 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
1350 at VMMSWITCHERDEF.offGCCode, dd 0
1351 at VMMSWITCHERDEF.cbGCCode, dd 0
1352
1353 iend
1354
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette