VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 77481

Last change on this file since 77481 was 77481, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Renamed VMCSCACHE to VMXVMCSBATCHCACHE to better reflects its nature and use. This is a generic cache that uses array/linear-lookup for VMCS fields and is solely used atm for 64-on-32 bit host.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.1 KB
Line 
1; $Id: LegacyandAMD64.mac 77481 2019-02-27 12:59:58Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2019 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.virtualbox.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;;
34; This macro is used for storing a debug code in a CMOS location.
35;
36; If we tripple fault or something, the debug code can be retrieved and we
37; might have a clue as to where the problem occurred. The code is currently
38; using CMOS register 3 in the 2nd bank as this _seems_ to be unused on my
39; Extreme4 X79 asrock mainboard.
40;
41; @param %1 The debug code (byte)
42; @note Trashes AL.
43;
44%macro DEBUG_CMOS_TRASH_AL 1
45%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
46 mov al, 3
47 out 72h, al
48 mov al, %1
49 out 73h, al
50 in al, 73h
51%endif
52%endmacro
53
54;;
55; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
56; doesn't trash any registers.
57;
58%macro DEBUG_CMOS_STACK64 1
59%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
60 push rax
61 DEBUG_CMOS_TRASH_AL %1
62 pop rax
63%endif
64%endmacro
65
66;;
67; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
68; doesn't trash any registers.
69;
70%macro DEBUG_CMOS_STACK32 1
71%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
72 push eax
73 DEBUG_CMOS_TRASH_AL %1
74 pop eax
75%endif
76%endmacro
77
78
79;; Stubs for making OS/2 compile (though, not work).
80%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
81 %macro vmwrite 2,
82 int3
83 %endmacro
84 %define vmlaunch int3
85 %define vmresume int3
86 %define vmsave int3
87 %define vmload int3
88 %define vmrun int3
89 %define clgi int3
90 %define stgi int3
91 %macro invlpga 2,
92 int3
93 %endmacro
94%endif
95
96;; Debug options
97;%define DEBUG_STUFF 1
98;%define STRICT_IF 1
99
100
101;*******************************************************************************
102;* Header Files *
103;*******************************************************************************
104%include "VBox/asmdefs.mac"
105%include "iprt/x86.mac"
106%include "VBox/err.mac"
107%include "VBox/apic.mac"
108
109%include "VBox/vmm/cpum.mac"
110%include "VBox/vmm/stam.mac"
111%include "VBox/vmm/vm.mac"
112%include "VBox/vmm/hm_vmx.mac"
113%include "CPUMInternal.mac"
114%include "HMInternal.mac"
115%include "VMMSwitcher.mac"
116
117
118;
119; Start the fixup records
120; We collect the fixups in the .data section as we go along
121; It is therefore VITAL that no-one is using the .data section
122; for anything else between 'Start' and 'End'.
123;
124BEGINDATA
125GLOBALNAME Fixups
126
127
128
129BEGINCODE
130GLOBALNAME Start
131
132BITS 32
133
134;;
135; The C interface.
136; @param [esp + 04h] Param 1 - VM handle
137; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
138; structure for the calling EMT.
139;
140BEGINPROC vmmR0ToRawMode
141%ifdef DEBUG_STUFF
142 COM32_S_NEWLINE
143 COM32_S_CHAR '^'
144%endif
145
146%ifdef VBOX_WITH_STATISTICS
147 ;
148 ; Switcher stats.
149 ;
150 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
151 mov edx, 0ffffffffh
152 STAM_PROFILE_ADV_START edx
153%endif
154
155 push ebp
156 mov ebp, [esp + 12] ; CPUMCPU offset
157
158 ; turn off interrupts
159 pushf
160 cli
161 ;DEBUG_CMOS_STACK32 10h
162
163 ;
164 ; Call worker.
165 ;
166 FIXUP FIX_HC_CPUM_OFF, 1, 0
167 mov edx, 0ffffffffh
168 push cs ; allow for far return and restore cs correctly.
169 call NAME(vmmR0ToRawModeAsm)
170
171%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
172 ; Restore blocked Local APIC NMI vectors
173 ; Do this here to ensure the host CS is already restored
174 mov ecx, [edx + CPUMCPU.fApicDisVectors]
175 test ecx, ecx
176 jz gth_apic_done
177 cmp byte [edx + CPUMCPU.fX2Apic], 1
178 je gth_x2apic
179
180 ; Legacy xAPIC mode:
181 mov edx, [edx + CPUMCPU.pvApicBase]
182 shr ecx, 1
183 jnc gth_nolint0
184 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
185gth_nolint0:
186 shr ecx, 1
187 jnc gth_nolint1
188 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
189gth_nolint1:
190 shr ecx, 1
191 jnc gth_nopc
192 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
193gth_nopc:
194 shr ecx, 1
195 jnc gth_notherm
196 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
197gth_notherm:
198 shr ecx, 1
199 jnc gth_nocmci
200 and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
201gth_nocmci:
202 jmp gth_apic_done
203
204 ; x2APIC mode:
205gth_x2apic:
206 ;DEBUG_CMOS_STACK32 7ch
207 push eax ; save eax
208 push ebx ; save it for fApicDisVectors
209 push edx ; save edx just in case.
210 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
211 shr ebx, 1
212 jnc gth_x2_nolint0
213 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
214 rdmsr
215 and eax, ~APIC_REG_LVT_MASKED
216 wrmsr
217gth_x2_nolint0:
218 shr ebx, 1
219 jnc gth_x2_nolint1
220 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
221 rdmsr
222 and eax, ~APIC_REG_LVT_MASKED
223 wrmsr
224gth_x2_nolint1:
225 shr ebx, 1
226 jnc gth_x2_nopc
227 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
228 rdmsr
229 and eax, ~APIC_REG_LVT_MASKED
230 wrmsr
231gth_x2_nopc:
232 shr ebx, 1
233 jnc gth_x2_notherm
234 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
235 rdmsr
236 and eax, ~APIC_REG_LVT_MASKED
237 wrmsr
238gth_x2_notherm:
239 shr ebx, 1
240 jnc gth_x2_nocmci
241 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
242 rdmsr
243 and eax, ~APIC_REG_LVT_MASKED
244 wrmsr
245gth_x2_nocmci:
246 pop edx
247 pop ebx
248 pop eax
249
250gth_apic_done:
251%endif
252
253 ; restore original flags
254 ;DEBUG_CMOS_STACK32 7eh
255 popf
256 pop ebp
257
258%ifdef VBOX_WITH_STATISTICS
259 ;
260 ; Switcher stats.
261 ;
262 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
263 mov edx, 0ffffffffh
264 STAM_PROFILE_ADV_STOP edx
265%endif
266
267 ;DEBUG_CMOS_STACK32 7fh
268 ret
269
270ENDPROC vmmR0ToRawMode
271
272; *****************************************************************************
273; vmmR0ToRawModeAsm
274;
275; Phase one of the switch from host to guest context (host MMU context)
276;
277; INPUT:
278; - edx virtual address of CPUM structure (valid in host context)
279; - ebp offset of the CPUMCPU structure relative to CPUM.
280;
281; USES/DESTROYS:
282; - eax, ecx, edx, esi
283;
284; ASSUMPTION:
285; - current CS and DS selectors are wide open
286;
287; *****************************************************************************
288ALIGNCODE(16)
289BEGINPROC vmmR0ToRawModeAsm
290 ;;
291 ;; Save CPU host context
292 ;; Skip eax, edx and ecx as these are not preserved over calls.
293 ;;
294 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
295%ifdef VBOX_WITH_CRASHDUMP_MAGIC
296 ; phys address of scratch page
297 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
298 mov cr2, eax
299
300 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
301%endif
302
303 ; general registers.
304 mov [edx + CPUMCPU.Host.ebx], ebx
305 mov [edx + CPUMCPU.Host.edi], edi
306 mov [edx + CPUMCPU.Host.esi], esi
307 mov [edx + CPUMCPU.Host.esp], esp
308 mov [edx + CPUMCPU.Host.ebp], ebp ; offCpumCpu!
309 ; selectors.
310 mov [edx + CPUMCPU.Host.ds], ds
311 mov [edx + CPUMCPU.Host.es], es
312 mov [edx + CPUMCPU.Host.fs], fs
313 mov [edx + CPUMCPU.Host.gs], gs
314 mov [edx + CPUMCPU.Host.ss], ss
315 ; special registers.
316 DEBUG32_S_CHAR('s')
317 DEBUG32_S_CHAR(';')
318 sldt [edx + CPUMCPU.Host.ldtr]
319 sidt [edx + CPUMCPU.Host.idtr]
320 sgdt [edx + CPUMCPU.Host.gdtr]
321 str [edx + CPUMCPU.Host.tr]
322
323%ifdef VBOX_WITH_CRASHDUMP_MAGIC
324 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
325%endif
326
327%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
328 ; Block Local APIC NMI vectors
329 DEBUG32_S_CHAR('f')
330 DEBUG32_S_CHAR(';')
331 cmp byte [edx + CPUMCPU.fX2Apic], 1
332 je htg_x2apic
333
334 ; Legacy xAPIC mode. No write completion required when writing to the
335 ; LVT registers as we have mapped the APIC pages as non-cacheable and
336 ; the MMIO is CPU-local.
337 mov ebx, [edx + CPUMCPU.pvApicBase]
338 or ebx, ebx
339 jz htg_apic_done
340 mov eax, [ebx + APIC_REG_LVT_LINT0]
341 mov ecx, eax
342 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
343 cmp ecx, APIC_REG_LVT_MODE_NMI
344 jne htg_nolint0
345 or edi, 0x01
346 or eax, APIC_REG_LVT_MASKED
347 mov [ebx + APIC_REG_LVT_LINT0], eax
348htg_nolint0:
349 mov eax, [ebx + APIC_REG_LVT_LINT1]
350 mov ecx, eax
351 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
352 cmp ecx, APIC_REG_LVT_MODE_NMI
353 jne htg_nolint1
354 or edi, 0x02
355 or eax, APIC_REG_LVT_MASKED
356 mov [ebx + APIC_REG_LVT_LINT1], eax
357htg_nolint1:
358 mov eax, [ebx + APIC_REG_LVT_PC]
359 mov ecx, eax
360 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
361 cmp ecx, APIC_REG_LVT_MODE_NMI
362 jne htg_nopc
363 or edi, 0x04
364 or eax, APIC_REG_LVT_MASKED
365 mov [ebx + APIC_REG_LVT_PC], eax
366htg_nopc:
367 mov eax, [ebx + APIC_REG_VERSION]
368 shr eax, 16
369 cmp al, 5
370 jb htg_notherm
371 je htg_nocmci
372 mov eax, [ebx + APIC_REG_LVT_CMCI]
373 mov ecx, eax
374 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
375 cmp ecx, APIC_REG_LVT_MODE_NMI
376 jne htg_nocmci
377 or edi, 0x10
378 or eax, APIC_REG_LVT_MASKED
379 mov [ebx + APIC_REG_LVT_CMCI], eax
380htg_nocmci:
381 mov eax, [ebx + APIC_REG_LVT_THMR]
382 mov ecx, eax
383 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
384 cmp ecx, APIC_REG_LVT_MODE_NMI
385 jne htg_notherm
386 or edi, 0x08
387 or eax, APIC_REG_LVT_MASKED
388 mov [ebx + APIC_REG_LVT_THMR], eax
389htg_notherm:
390 mov [edx + CPUMCPU.fApicDisVectors], edi
391 jmp htg_apic_done
392
393 ; x2APIC mode:
394htg_x2apic:
395 mov esi, edx ; Save edx.
396 xor edi, edi ; fApicDisVectors
397
398 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
399 rdmsr
400 mov ebx, eax
401 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
402 cmp ebx, APIC_REG_LVT_MODE_NMI
403 jne htg_x2_nolint0
404 or edi, 0x01
405 or eax, APIC_REG_LVT_MASKED
406 wrmsr
407htg_x2_nolint0:
408 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
409 rdmsr
410 mov ebx, eax
411 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
412 cmp ebx, APIC_REG_LVT_MODE_NMI
413 jne htg_x2_nolint1
414 or edi, 0x02
415 or eax, APIC_REG_LVT_MASKED
416 wrmsr
417htg_x2_nolint1:
418 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
419 rdmsr
420 mov ebx, eax
421 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
422 cmp ebx, APIC_REG_LVT_MODE_NMI
423 jne htg_x2_nopc
424 or edi, 0x04
425 or eax, APIC_REG_LVT_MASKED
426 wrmsr
427htg_x2_nopc:
428 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
429 rdmsr
430 shr eax, 16
431 cmp al, 5
432 jb htg_x2_notherm
433 je htg_x2_nocmci
434 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
435 rdmsr
436 mov ebx, eax
437 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
438 cmp ebx, APIC_REG_LVT_MODE_NMI
439 jne htg_x2_nocmci
440 or edi, 0x10
441 or eax, APIC_REG_LVT_MASKED
442 wrmsr
443htg_x2_nocmci:
444 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
445 rdmsr
446 mov ebx, eax
447 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
448 cmp ebx, APIC_REG_LVT_MODE_NMI
449 jne htg_x2_notherm
450 or edi, 0x08
451 or eax, APIC_REG_LVT_MASKED
452 wrmsr
453htg_x2_notherm:
454 mov edx, esi ; Restore edx.
455 mov [edx + CPUMCPU.fApicDisVectors], edi
456
457htg_apic_done:
458%endif
459
460 ; control registers.
461 mov eax, cr0
462 mov [edx + CPUMCPU.Host.cr0], eax
463 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
464 mov eax, cr3
465 mov [edx + CPUMCPU.Host.cr3], eax
466 mov esi, cr4 ; esi = cr4, we'll modify it further down.
467 mov [edx + CPUMCPU.Host.cr4], esi
468
469 DEBUG32_S_CHAR('c')
470 DEBUG32_S_CHAR(';')
471
472 ; save the host EFER msr
473 mov ebx, edx
474 mov ecx, MSR_K6_EFER
475 rdmsr
476 mov [ebx + CPUMCPU.Host.efer], eax
477 mov [ebx + CPUMCPU.Host.efer + 4], edx
478 mov edx, ebx
479 DEBUG32_S_CHAR('e')
480 DEBUG32_S_CHAR(';')
481
482%ifdef VBOX_WITH_CRASHDUMP_MAGIC
483 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
484%endif
485
486 ; Load new gdt so we can do a far jump after going into 64 bits mode
487 ;DEBUG_CMOS_STACK32 16h
488 lgdt [edx + CPUMCPU.Hyper.gdtr]
489
490 DEBUG32_S_CHAR('g')
491 DEBUG32_S_CHAR('!')
492%ifdef VBOX_WITH_CRASHDUMP_MAGIC
493 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
494%endif
495
496 ;;
497 ;; Clean up CR4. X86_CR4_PGE, X86_CR4_PCE, X86_CR4_PCIDE (not really
498 ;; relevant for 32-bit, but whatever) and X86_CR4_VMXE must be cleared.
499 ;;
500 and esi, X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE \
501 | X86_CR4_MCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEEXCPT | X86_CR4_SMXE | X86_CR4_OSXSAVE
502 mov cr4, esi
503
504 ;;
505 ;; Load Intermediate memory context.
506 ;;
507 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
508 mov eax, 0ffffffffh
509 mov cr3, eax
510 DEBUG32_CHAR('?')
511%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
512 DEBUG_CMOS_TRASH_AL 17h
513%endif
514
515 ;;
516 ;; Jump to identity mapped location
517 ;;
518 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
519 jmp near NAME(IDEnterTarget)
520
521
522 ; We're now on identity mapped pages!
523ALIGNCODE(16)
524GLOBALNAME IDEnterTarget
525 DEBUG32_CHAR('1')
526 DEBUG_CMOS_TRASH_AL 19h
527
528 ; 1. Disable paging.
529 mov ebx, cr0
530 and ebx, ~X86_CR0_PG
531 mov cr0, ebx
532 DEBUG32_CHAR('2')
533 DEBUG_CMOS_TRASH_AL 1ah
534
535%ifdef VBOX_WITH_CRASHDUMP_MAGIC
536 mov eax, cr2
537 mov dword [eax], 3
538%endif
539
540 ; 2. Enable PAE.
541 mov ecx, cr4
542 or ecx, X86_CR4_PAE
543 mov cr4, ecx
544 DEBUG_CMOS_TRASH_AL 1bh
545
546 ; 3. Load long mode intermediate CR3.
547 FIXUP FIX_INTER_AMD64_CR3, 1
548 mov ecx, 0ffffffffh
549 mov cr3, ecx
550 DEBUG32_CHAR('3')
551 DEBUG_CMOS_TRASH_AL 1ch
552
553%ifdef VBOX_WITH_CRASHDUMP_MAGIC
554 mov eax, cr2
555 mov dword [eax], 4
556%endif
557
558 ; 4. Enable long mode.
559 mov esi, edx
560 mov ecx, MSR_K6_EFER
561 rdmsr
562 FIXUP FIX_EFER_OR_MASK, 1
563 or eax, 0ffffffffh
564 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
565 wrmsr
566 mov edx, esi
567 DEBUG32_CHAR('4')
568 DEBUG_CMOS_TRASH_AL 1dh
569
570%ifdef VBOX_WITH_CRASHDUMP_MAGIC
571 mov eax, cr2
572 mov dword [eax], 5
573%endif
574
575 ; 5. Enable paging.
576 or ebx, X86_CR0_PG
577 ; Disable ring 0 write protection too
578 and ebx, ~X86_CR0_WRITE_PROTECT
579 mov cr0, ebx
580 DEBUG32_CHAR('5')
581
582 ; Jump from compatibility mode to 64-bit mode.
583 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
584 jmp 0ffffh:0fffffffeh
585
586 ;
587 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
588BITS 64
589ALIGNCODE(16)
590NAME(IDEnter64Mode):
591 DEBUG64_CHAR('6')
592 DEBUG_CMOS_TRASH_AL 1eh
593 jmp [NAME(pICEnterTarget) wrt rip]
594
595; 64-bit jump target
596NAME(pICEnterTarget):
597FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
598dq 0ffffffffffffffffh
599
600; 64-bit pCpum address.
601NAME(pCpumIC):
602FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
603dq 0ffffffffffffffffh
604
605%ifdef VBOX_WITH_CRASHDUMP_MAGIC
606NAME(pMarker):
607db 'Switch_marker'
608%endif
609
610 ;
611 ; When we arrive here we're in 64 bits mode in the intermediate context
612 ;
613ALIGNCODE(16)
614GLOBALNAME ICEnterTarget
615 ;DEBUG_CMOS_TRASH_AL 1fh
616 ; Load CPUM pointer into rdx
617 mov rdx, [NAME(pCpumIC) wrt rip]
618 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
619
620 mov rax, cs
621 mov ds, rax
622 mov es, rax
623
624 ; Invalidate fs & gs
625 mov rax, 0
626 mov fs, rax
627 mov gs, rax
628
629%ifdef VBOX_WITH_CRASHDUMP_MAGIC
630 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
631%endif
632
633 ; Setup stack.
634 DEBUG64_CHAR('7')
635 mov rsp, 0
636 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
637 mov ss, ax
638 mov esp, [rdx + CPUMCPU.Hyper.esp]
639
640%ifdef VBOX_WITH_CRASHDUMP_MAGIC
641 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
642%endif
643
644%ifdef VBOX_WITH_64ON32_IDT
645 ; Set up emergency trap handlers.
646 lidt [rdx + CPUMCPU.Hyper.idtr]
647%endif
648
649 DEBUG64_S_CHAR('8')
650
651 ; Check if we need to restore the guest FPU state
652 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
653 test esi, CPUM_SYNC_FPU_STATE
654 jz near htg_fpu_no
655
656%ifdef VBOX_WITH_CRASHDUMP_MAGIC
657 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
658%endif
659
660 mov rax, cr0
661 mov rcx, rax ; save old CR0
662 and rax, ~(X86_CR0_TS | X86_CR0_EM)
663 mov cr0, rax
664
665 mov eax, [rdx + CPUMCPU.Guest.fXStateMask]
666 mov ebx, [rdx + CPUMCPU.Guest.pXStateRC]
667 or eax, eax
668 jz htg_fpu_fxrstor
669 mov r9, rdx
670 mov edx, [rdx + CPUMCPU.Guest.fXStateMask + 4]
671 o64 xsave [rbx]
672 mov rdx, r9
673 jmp htg_fpu_done
674htg_fpu_fxrstor:
675 o64 fxrstor [rbx] ; (use explicit REX prefix, see @bugref{6398})
676htg_fpu_done:
677 mov cr0, rcx ; and restore old CR0 again
678
679 and esi, ~CPUM_SYNC_FPU_STATE
680 or esi, CPUM_USED_FPU_GUEST
681 mov [rdx + CPUMCPU.fUseFlags], esi
682
683htg_fpu_no:
684 ; Check if we need to restore the guest debug state
685 test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
686 jz htg_debug_done
687
688%ifdef VBOX_WITH_CRASHDUMP_MAGIC
689 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
690%endif
691 test esi, CPUM_SYNC_DEBUG_REGS_HYPER
692 jnz htg_debug_hyper
693
694 ; Guest values in DRx, letting the guest access them directly.
695 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
696 mov dr0, rax
697 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
698 mov dr1, rax
699 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
700 mov dr2, rax
701 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
702 mov dr3, rax
703 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
704 mov dr6, rax ; not required for AMD-V
705
706 and esi, ~CPUM_SYNC_DEBUG_REGS_GUEST
707 or esi, CPUM_USED_DEBUG_REGS_GUEST
708 mov [rdx + CPUMCPU.fUseFlags], esi
709 jmp htg_debug_done
710
711htg_debug_hyper:
712 ; Combined values in DRx, intercepting all accesses.
713 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
714 mov dr0, rax
715 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
716 mov dr1, rax
717 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
718 mov dr2, rax
719 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
720 mov dr3, rax
721 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
722 mov dr6, rax ; not required for AMD-V
723
724 and esi, ~CPUM_SYNC_DEBUG_REGS_HYPER
725 or esi, CPUM_USED_DEBUG_REGS_HYPER
726 mov [rdx + CPUMCPU.fUseFlags], esi
727
728htg_debug_done:
729
730%ifdef VBOX_WITH_CRASHDUMP_MAGIC
731 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
732%endif
733
734 ;
735 ; "Call" the specified helper function.
736 ;
737
738 ; parameter for all helper functions (pCtx) (in addition to rdx = pCPUM ofc)
739 DEBUG64_CHAR('9')
740 lea rsi, [rdx + CPUMCPU.Guest]
741 lea rax, [htg_return wrt rip]
742 push rax ; return address
743
744 ; load the hypervisor function address
745 mov r9, [rdx + CPUMCPU.Hyper.eip]
746 cmp r9d, HM64ON32OP_VMXRCStartVM64
747 jz NAME(VMXRCStartVM64)
748 cmp r9d, HM64ON32OP_SVMRCVMRun64
749 jz NAME(SVMRCVMRun64)
750 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
751 jz NAME(HMRCSaveGuestFPU64)
752 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
753 jz NAME(HMRCSaveGuestDebug64)
754 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
755 jz NAME(HMRCTestSwitcher64)
756 mov eax, VERR_HM_INVALID_HM64ON32OP
757htg_return:
758 DEBUG64_CHAR('r')
759
760 ; Load CPUM pointer into rdx
761 mov rdx, [NAME(pCpumIC) wrt rip]
762 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
763
764%ifdef VBOX_WITH_CRASHDUMP_MAGIC
765 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
766%endif
767
768 ; Save the return code
769 mov dword [rdx + CPUMCPU.u32RetCode], eax
770
771 ; now let's switch back
772 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
773
774ENDPROC vmmR0ToRawModeAsm
775
776
777
778
779;
780;
781; HM code (used to be HMRCA.asm at one point).
782; HM code (used to be HMRCA.asm at one point).
783; HM code (used to be HMRCA.asm at one point).
784;
785;
786
787;; @def MYPUSHSEGS
788; Macro saving all segment registers on the stack.
789; @param 1 full width register name
790%macro MYPUSHSEGS 1
791 mov %1, es
792 push %1
793 mov %1, ds
794 push %1
795%endmacro
796
797;; @def MYPOPSEGS
798; Macro restoring all segment registers on the stack
799; @param 1 full width register name
800%macro MYPOPSEGS 1
801 pop %1
802 mov ds, %1
803 pop %1
804 mov es, %1
805%endmacro
806
807
808;/**
809; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
810; *
811; * @returns VBox status code
812; * @param HCPhysCpuPage VMXON physical address [rsp+8]
813; * @param HCPhysVmcs VMCS physical address [rsp+16]
814; * @param pCache VMCS cache [rsp+24]
815; * @param pVM The cross context VM structure. [rbp+28h]
816; * @param pVCpu The cross context virtual CPU structure. [rbp+30h]
817; * @param pCtx Guest context (rsi)
818; */
819BEGINPROC VMXRCStartVM64
820 push rbp
821 mov rbp, rsp
822 DEBUG_CMOS_STACK64 20h
823
824 ; Make sure VT-x instructions are allowed.
825 mov rax, cr4
826 or rax, X86_CR4_VMXE
827 mov cr4, rax
828
829 ; Enter VMX Root Mode.
830 vmxon [rbp + 8 + 8]
831 jnc .vmxon_success
832 mov rax, VERR_VMX_INVALID_VMXON_PTR
833 jmp .vmstart64_vmxon_failed
834
835.vmxon_success:
836 jnz .vmxon_success2
837 mov rax, VERR_VMX_VMXON_FAILED
838 jmp .vmstart64_vmxon_failed
839
840.vmxon_success2:
841 ; Activate the VMCS pointer
842 vmptrld [rbp + 16 + 8]
843 jnc .vmptrld_success
844 mov rax, VERR_VMX_INVALID_VMCS_PTR
845 jmp .vmstart64_vmxoff_end
846
847.vmptrld_success:
848 jnz .vmptrld_success2
849 mov rax, VERR_VMX_VMPTRLD_FAILED
850 jmp .vmstart64_vmxoff_end
851
852.vmptrld_success2:
853
854 ; Save the VMCS pointer on the stack
855 push qword [rbp + 16 + 8];
856
857 ; Save segment registers.
858 MYPUSHSEGS rax
859
860%ifdef VMX_USE_CACHED_VMCS_ACCESSES
861 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
862 mov rbx, [rbp + 24 + 8] ; pCache
863
864 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
865 mov qword [rbx + VMXVMCSBATCHCACHE.uPos], 2
866 %endif
867
868 %ifdef DEBUG
869 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
870 mov [rbx + VMXVMCSBATCHCACHE.TestIn.HCPhysCpuPage], rax
871 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
872 mov [rbx + VMXVMCSBATCHCACHE.TestIn.HCPhysVmcs], rax
873 mov [rbx + VMXVMCSBATCHCACHE.TestIn.pCache], rbx
874 mov [rbx + VMXVMCSBATCHCACHE.TestIn.pCtx], rsi
875 %endif
876
877 mov ecx, [rbx + VMXVMCSBATCHCACHE.Write.cValidEntries]
878 cmp ecx, 0
879 je .no_cached_writes
880 mov rdx, rcx
881 mov rcx, 0
882 jmp .cached_write
883
884ALIGN(16)
885.cached_write:
886 mov eax, [rbx + VMXVMCSBATCHCACHE.Write.aField + rcx*4]
887 vmwrite rax, qword [rbx + VMXVMCSBATCHCACHE.Write.aFieldVal + rcx*8]
888 inc rcx
889 cmp rcx, rdx
890 jl .cached_write
891
892 mov dword [rbx + VMXVMCSBATCHCACHE.Write.cValidEntries], 0
893.no_cached_writes:
894
895 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
896 mov qword [rbx + VMXVMCSBATCHCACHE.uPos], 3
897 %endif
898 ; Save the pCache pointer.
899 push rbx
900%endif
901
902 ; Save the host state that's relevant in the temporary 64-bit mode.
903 mov rdx, cr0
904 mov eax, VMX_VMCS_HOST_CR0
905 vmwrite rax, rdx
906
907 mov rdx, cr3
908 mov eax, VMX_VMCS_HOST_CR3
909 vmwrite rax, rdx
910
911 mov rdx, cr4
912 mov eax, VMX_VMCS_HOST_CR4
913 vmwrite rax, rdx
914
915 mov rdx, cs
916 mov eax, VMX_VMCS_HOST_FIELD_CS
917 vmwrite rax, rdx
918
919 mov rdx, ss
920 mov eax, VMX_VMCS_HOST_FIELD_SS
921 vmwrite rax, rdx
922
923%if 0 ; Another experiment regarding tripple faults... Seems not to be necessary.
924 sub rsp, 16
925 str [rsp]
926 movsx rdx, word [rsp]
927 mov eax, VMX_VMCS_HOST_FIELD_TR
928 vmwrite rax, rdx
929 add rsp, 16
930%endif
931
932 sub rsp, 16
933 sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.)
934 mov eax, VMX_VMCS_HOST_GDTR_BASE
935 vmwrite rax, [rsp + 6 + 2]
936 add rsp, 16
937
938%ifdef VBOX_WITH_64ON32_IDT
939 sub rsp, 16
940 sidt [rsp + 6]
941 mov eax, VMX_VMCS_HOST_IDTR_BASE
942 vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work?
943 add rsp, 16
944 ;call NAME(vmm64On32PrintIdtr)
945%endif
946
947%ifdef VBOX_WITH_CRASHDUMP_MAGIC
948 mov qword [rbx + VMXVMCSBATCHCACHE.uPos], 4
949%endif
950
951 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
952
953 ; First we have to save some final CPU context registers.
954 lea rdx, [.vmlaunch64_done wrt rip]
955 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
956 vmwrite rax, rdx
957 ; Note: assumes success!
958
959 ; Manual save and restore:
960 ; - General purpose registers except RIP, RSP
961 ; - XCR0
962 ;
963 ; Trashed:
964 ; - CR2 (we don't care)
965 ; - LDTR (reset to 0)
966 ; - DRx (presumably not changed at all)
967 ; - DR7 (reset to 0x400)
968 ; - EFLAGS (reset to RT_BIT(1); not relevant)
969
970%ifdef VBOX_WITH_CRASHDUMP_MAGIC
971 mov qword [rbx + VMXVMCSBATCHCACHE.uPos], 5
972%endif
973
974 ;
975 ; Save the host XCR0 and load the guest one if necessary.
976 ; Note! Trashes rdx and rcx.
977 ;
978 mov rax, [rbp + 30h] ; pVCpu
979 test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
980 jz .xcr0_before_skip
981
982 xor ecx, ecx
983 xgetbv ; Save the host one on the stack.
984 push rdx
985 push rax
986
987 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
988 mov edx, [xSI + CPUMCTX.aXcr + 4]
989 xor ecx, ecx ; paranoia
990 xsetbv
991
992 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
993 jmp .xcr0_before_done
994
995.xcr0_before_skip:
996 push 3fh ; indicate that we need not.
997.xcr0_before_done:
998
999 ; Save the pCtx pointer
1000 push rsi
1001
1002 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
1003 mov rbx, qword [rsi + CPUMCTX.cr2]
1004 mov rdx, cr2
1005 cmp rdx, rbx
1006 je .skipcr2write64
1007 mov cr2, rbx
1008
1009.skipcr2write64:
1010 mov eax, VMX_VMCS_HOST_RSP
1011 vmwrite rax, rsp
1012 ; Note: assumes success!
1013 ; Don't mess with ESP anymore!!!
1014
1015 ; Save Guest's general purpose registers.
1016 mov rax, qword [rsi + CPUMCTX.eax]
1017 mov rbx, qword [rsi + CPUMCTX.ebx]
1018 mov rcx, qword [rsi + CPUMCTX.ecx]
1019 mov rdx, qword [rsi + CPUMCTX.edx]
1020 mov rbp, qword [rsi + CPUMCTX.ebp]
1021 mov r8, qword [rsi + CPUMCTX.r8]
1022 mov r9, qword [rsi + CPUMCTX.r9]
1023 mov r10, qword [rsi + CPUMCTX.r10]
1024 mov r11, qword [rsi + CPUMCTX.r11]
1025 mov r12, qword [rsi + CPUMCTX.r12]
1026 mov r13, qword [rsi + CPUMCTX.r13]
1027 mov r14, qword [rsi + CPUMCTX.r14]
1028 mov r15, qword [rsi + CPUMCTX.r15]
1029
1030 ; Save rdi & rsi.
1031 mov rdi, qword [rsi + CPUMCTX.edi]
1032 mov rsi, qword [rsi + CPUMCTX.esi]
1033
1034 vmlaunch
1035 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
1036
1037ALIGNCODE(16)
1038.vmlaunch64_done:
1039%if 0 ;fixme later - def VBOX_WITH_64ON32_IDT
1040 push rdx
1041 mov rdx, [rsp + 8] ; pCtx
1042 lidt [rdx + CPUMCPU.Hyper.idtr]
1043 pop rdx
1044%endif
1045 jc near .vmstart64_invalid_vmcs_ptr
1046 jz near .vmstart64_start_failed
1047
1048 push rdi
1049 mov rdi, [rsp + 8] ; pCtx
1050
1051 mov qword [rdi + CPUMCTX.eax], rax
1052 mov qword [rdi + CPUMCTX.ebx], rbx
1053 mov qword [rdi + CPUMCTX.ecx], rcx
1054 mov qword [rdi + CPUMCTX.edx], rdx
1055 mov qword [rdi + CPUMCTX.esi], rsi
1056 mov qword [rdi + CPUMCTX.ebp], rbp
1057 mov qword [rdi + CPUMCTX.r8], r8
1058 mov qword [rdi + CPUMCTX.r9], r9
1059 mov qword [rdi + CPUMCTX.r10], r10
1060 mov qword [rdi + CPUMCTX.r11], r11
1061 mov qword [rdi + CPUMCTX.r12], r12
1062 mov qword [rdi + CPUMCTX.r13], r13
1063 mov qword [rdi + CPUMCTX.r14], r14
1064 mov qword [rdi + CPUMCTX.r15], r15
1065 mov rax, cr2
1066 mov qword [rdi + CPUMCTX.cr2], rax
1067
1068 pop rax ; The guest edi we pushed above
1069 mov qword [rdi + CPUMCTX.edi], rax
1070
1071 pop rsi ; pCtx (needed in rsi by the macros below)
1072
1073 ; Restore the host xcr0 if necessary.
1074 pop rcx
1075 test ecx, ecx
1076 jnz .xcr0_after_skip
1077 pop rax
1078 pop rdx
1079 xsetbv ; ecx is already zero.
1080.xcr0_after_skip:
1081
1082%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1083 pop rdi ; Saved pCache
1084
1085 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1086 mov dword [rdi + VMXVMCSBATCHCACHE.uPos], 7
1087 %endif
1088 %ifdef DEBUG
1089 mov [rdi + VMXVMCSBATCHCACHE.TestOut.pCache], rdi
1090 mov [rdi + VMXVMCSBATCHCACHE.TestOut.pCtx], rsi
1091 mov rax, cr8
1092 mov [rdi + VMXVMCSBATCHCACHE.TestOut.cr8], rax
1093 %endif
1094
1095 mov ecx, [rdi + VMXVMCSBATCHCACHE.Read.cValidEntries]
1096 cmp ecx, 0 ; Can't happen
1097 je .no_cached_reads
1098 jmp .cached_read
1099
1100ALIGN(16)
1101.cached_read:
1102 dec rcx
1103 mov eax, [rdi + VMXVMCSBATCHCACHE.Read.aField + rcx*4]
1104 vmread qword [rdi + VMXVMCSBATCHCACHE.Read.aFieldVal + rcx*8], rax
1105 cmp rcx, 0
1106 jnz .cached_read
1107.no_cached_reads:
1108 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1109 mov dword [rdi + VMXVMCSBATCHCACHE.uPos], 8
1110 %endif
1111%endif
1112
1113 ; Restore segment registers.
1114 MYPOPSEGS rax
1115
1116 mov eax, VINF_SUCCESS
1117
1118%ifdef VBOX_WITH_CRASHDUMP_MAGIC
1119 mov dword [rdi + VMXVMCSBATCHCACHE.uPos], 9
1120%endif
1121.vmstart64_end:
1122
1123%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1124 %ifdef DEBUG
1125 mov rdx, [rsp] ; HCPhysVmcs
1126 mov [rdi + VMXVMCSBATCHCACHE.TestOut.HCPhysVmcs], rdx
1127 %endif
1128%endif
1129
1130 ; Write back the data and disable the VMCS.
1131 vmclear qword [rsp] ; Pushed pVMCS
1132 add rsp, 8
1133
1134.vmstart64_vmxoff_end:
1135 ; Disable VMX root mode.
1136 vmxoff
1137.vmstart64_vmxon_failed:
1138%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1139 %ifdef DEBUG
1140 cmp eax, VINF_SUCCESS
1141 jne .skip_flags_save
1142
1143 pushf
1144 pop rdx
1145 mov [rdi + VMXVMCSBATCHCACHE.TestOut.eflags], rdx
1146 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1147 mov dword [rdi + VMXVMCSBATCHCACHE.uPos], 12
1148 %endif
1149.skip_flags_save:
1150 %endif
1151%endif
1152 pop rbp
1153 ret
1154
1155
1156.vmstart64_invalid_vmcs_ptr:
1157 pop rsi ; pCtx (needed in rsi by the macros below)
1158
1159 ; Restore the host xcr0 if necessary.
1160 pop rcx
1161 test ecx, ecx
1162 jnz .xcr0_after_skip2
1163 pop rax
1164 pop rdx
1165 xsetbv ; ecx is already zero.
1166.xcr0_after_skip2:
1167
1168%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1169 pop rdi ; pCache
1170 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1171 mov dword [rdi + VMXVMCSBATCHCACHE.uPos], 10
1172 %endif
1173
1174 %ifdef DEBUG
1175 mov [rdi + VMXVMCSBATCHCACHE.TestOut.pCache], rdi
1176 mov [rdi + VMXVMCSBATCHCACHE.TestOut.pCtx], rsi
1177 %endif
1178%endif
1179
1180 ; Restore segment registers.
1181 MYPOPSEGS rax
1182
1183 ; Restore all general purpose host registers.
1184 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1185 jmp .vmstart64_end
1186
1187.vmstart64_start_failed:
1188 pop rsi ; pCtx (needed in rsi by the macros below)
1189
1190 ; Restore the host xcr0 if necessary.
1191 pop rcx
1192 test ecx, ecx
1193 jnz .xcr0_after_skip3
1194 pop rax
1195 pop rdx
1196 xsetbv ; ecx is already zero.
1197.xcr0_after_skip3:
1198
1199%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1200 pop rdi ; pCache
1201
1202 %ifdef DEBUG
1203 mov [rdi + VMXVMCSBATCHCACHE.TestOut.pCache], rdi
1204 mov [rdi + VMXVMCSBATCHCACHE.TestOut.pCtx], rsi
1205 %endif
1206 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1207 mov dword [rdi + VMXVMCSBATCHCACHE.uPos], 11
1208 %endif
1209%endif
1210
1211 ; Restore segment registers.
1212 MYPOPSEGS rax
1213
1214 ; Restore all general purpose host registers.
1215 mov eax, VERR_VMX_UNABLE_TO_START_VM
1216 jmp .vmstart64_end
1217ENDPROC VMXRCStartVM64
1218
1219
1220;;
1221; Prepares for and executes VMRUN (64 bits guests)
1222;
1223; @returns VBox status code
1224; @param HCPhysVMCB Physical address of host VMCB [rbp+10h]
1225; @param HCPhysVMCB Physical address of guest VMCB [rbp+18h]
1226; @param pVM The cross context VM structure. [rbp+20h]
1227; @param pVCpu The cross context virtual CPU structure. [rbp+28h]
1228; @param pCtx Guest context [rsi]
1229;
1230BEGINPROC SVMRCVMRun64
1231 push rbp
1232 mov rbp, rsp
1233 pushf
1234 DEBUG_CMOS_STACK64 30h
1235
1236 ; Manual save and restore:
1237 ; - General purpose registers except RIP, RSP, RAX
1238 ;
1239 ; Trashed:
1240 ; - CR2 (we don't care)
1241 ; - LDTR (reset to 0)
1242 ; - DRx (presumably not changed at all)
1243 ; - DR7 (reset to 0x400)
1244
1245 ;
1246 ; Save the host XCR0 and load the guest one if necessary.
1247 ;
1248 mov rax, [rbp + 28h] ; pVCpu
1249 test byte [rax + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
1250 jz .xcr0_before_skip
1251
1252 xor ecx, ecx
1253 xgetbv ; Save the host one on the stack.
1254 push rdx
1255 push rax
1256
1257 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
1258 mov edx, [xSI + CPUMCTX.aXcr + 4]
1259 xor ecx, ecx ; paranoia
1260 xsetbv
1261
1262 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
1263 jmp .xcr0_before_done
1264
1265.xcr0_before_skip:
1266 push 3fh ; indicate that we need not.
1267.xcr0_before_done:
1268
1269 ; Save the Guest CPU context pointer.
1270 push rsi ; Push for saving the state at the end
1271
1272 ; Save host fs, gs, sysenter msr etc
1273 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
1274 push rax ; Save for the vmload after vmrun
1275 vmsave
1276
1277 ; Setup eax for VMLOAD
1278 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
1279
1280 ; Restore Guest's general purpose registers.
1281 ; rax is loaded from the VMCB by VMRUN.
1282 mov rbx, qword [rsi + CPUMCTX.ebx]
1283 mov rcx, qword [rsi + CPUMCTX.ecx]
1284 mov rdx, qword [rsi + CPUMCTX.edx]
1285 mov rdi, qword [rsi + CPUMCTX.edi]
1286 mov rbp, qword [rsi + CPUMCTX.ebp]
1287 mov r8, qword [rsi + CPUMCTX.r8]
1288 mov r9, qword [rsi + CPUMCTX.r9]
1289 mov r10, qword [rsi + CPUMCTX.r10]
1290 mov r11, qword [rsi + CPUMCTX.r11]
1291 mov r12, qword [rsi + CPUMCTX.r12]
1292 mov r13, qword [rsi + CPUMCTX.r13]
1293 mov r14, qword [rsi + CPUMCTX.r14]
1294 mov r15, qword [rsi + CPUMCTX.r15]
1295 mov rsi, qword [rsi + CPUMCTX.esi]
1296
1297 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1298 clgi
1299 sti
1300
1301 ; Load guest fs, gs, sysenter msr etc
1302 vmload
1303 ; Run the VM
1304 vmrun
1305
1306 ; rax is in the VMCB already; we can use it here.
1307
1308 ; Save guest fs, gs, sysenter msr etc.
1309 vmsave
1310
1311 ; Load host fs, gs, sysenter msr etc.
1312 pop rax ; Pushed above
1313 vmload
1314
1315 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1316 cli
1317 stgi
1318
1319 pop rax ; pCtx
1320
1321 mov qword [rax + CPUMCTX.ebx], rbx
1322 mov qword [rax + CPUMCTX.ecx], rcx
1323 mov qword [rax + CPUMCTX.edx], rdx
1324 mov qword [rax + CPUMCTX.esi], rsi
1325 mov qword [rax + CPUMCTX.edi], rdi
1326 mov qword [rax + CPUMCTX.ebp], rbp
1327 mov qword [rax + CPUMCTX.r8], r8
1328 mov qword [rax + CPUMCTX.r9], r9
1329 mov qword [rax + CPUMCTX.r10], r10
1330 mov qword [rax + CPUMCTX.r11], r11
1331 mov qword [rax + CPUMCTX.r12], r12
1332 mov qword [rax + CPUMCTX.r13], r13
1333 mov qword [rax + CPUMCTX.r14], r14
1334 mov qword [rax + CPUMCTX.r15], r15
1335
1336 ;
1337 ; Restore the host xcr0 if necessary.
1338 ;
1339 pop rcx
1340 test ecx, ecx
1341 jnz .xcr0_after_skip
1342 pop rax
1343 pop rdx
1344 xsetbv ; ecx is already zero.
1345.xcr0_after_skip:
1346
1347 mov eax, VINF_SUCCESS
1348
1349 popf
1350 pop rbp
1351 ret
1352ENDPROC SVMRCVMRun64
1353
1354;/**
1355; * Saves the guest FPU context
1356; *
1357; * @returns VBox status code
1358; * @param pCtx Guest context [rsi]
1359; * @param pCPUM Pointer to CPUMCPU [rdx]
1360; */
1361BEGINPROC HMRCSaveGuestFPU64
1362 DEBUG_CMOS_STACK64 40h
1363 mov rax, cr0
1364 mov rcx, rax ; save old CR0
1365 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1366 mov cr0, rax
1367
1368 mov eax, [rsi + CPUMCTX.fXStateMask]
1369 mov ebx, [rsi + CPUMCTX.pXStateRC]
1370 test eax, eax
1371 jz .use_fxsave
1372 mov edx, [rsi + CPUMCTX.fXStateMask + 4]
1373 o64 xsave [rbx]
1374 jmp .done
1375
1376.use_fxsave:
1377 o64 fxsave [rbx] ; (use explicit REX prefix, see @bugref{6398})
1378
1379.done:
1380 mov cr0, rcx ; and restore old CR0 again
1381
1382 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_USED_FPU_GUEST
1383
1384 mov eax, VINF_SUCCESS
1385 ret
1386ENDPROC HMRCSaveGuestFPU64
1387
1388;/**
1389; * Saves the guest debug context (DR0-3, DR6)
1390; *
1391; * @returns VBox status code
1392; * @param pCtx Guest context [rsi]
1393; */
1394BEGINPROC HMRCSaveGuestDebug64
1395 DEBUG_CMOS_STACK64 41h
1396 mov rax, dr0
1397 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1398 mov rax, dr1
1399 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1400 mov rax, dr2
1401 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1402 mov rax, dr3
1403 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1404 mov rax, dr6
1405 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1406 mov eax, VINF_SUCCESS
1407 ret
1408ENDPROC HMRCSaveGuestDebug64
1409
1410;/**
1411; * Dummy callback handler
1412; *
1413; * @returns VBox status code
1414; * @param param1 Parameter 1 [rsp+8]
1415; * @param param2 Parameter 2 [rsp+12]
1416; * @param param3 Parameter 3 [rsp+16]
1417; * @param param4 Parameter 4 [rsp+20]
1418; * @param param5 Parameter 5 [rsp+24]
1419; * @param pCtx Guest context [rsi]
1420; */
1421BEGINPROC HMRCTestSwitcher64
1422 DEBUG_CMOS_STACK64 42h
1423 mov eax, [rsp+8]
1424 ret
1425ENDPROC HMRCTestSwitcher64
1426
1427
1428%ifdef VBOX_WITH_64ON32_IDT
1429;
1430; Trap handling.
1431;
1432
1433;; Here follows an array of trap handler entry points, 8 byte in size.
1434BEGINPROC vmm64On32TrapHandlers
1435%macro vmm64On32TrapEntry 1
1436GLOBALNAME vmm64On32Trap %+ i
1437 db 06ah, i ; push imm8 - note that this is a signextended value.
1438 jmp NAME(%1)
1439 ALIGNCODE(8)
1440%assign i i+1
1441%endmacro
1442%assign i 0 ; start counter.
1443 vmm64On32TrapEntry vmm64On32Trap ; 0
1444 vmm64On32TrapEntry vmm64On32Trap ; 1
1445 vmm64On32TrapEntry vmm64On32Trap ; 2
1446 vmm64On32TrapEntry vmm64On32Trap ; 3
1447 vmm64On32TrapEntry vmm64On32Trap ; 4
1448 vmm64On32TrapEntry vmm64On32Trap ; 5
1449 vmm64On32TrapEntry vmm64On32Trap ; 6
1450 vmm64On32TrapEntry vmm64On32Trap ; 7
1451 vmm64On32TrapEntry vmm64On32TrapErrCode ; 8
1452 vmm64On32TrapEntry vmm64On32Trap ; 9
1453 vmm64On32TrapEntry vmm64On32TrapErrCode ; a
1454 vmm64On32TrapEntry vmm64On32TrapErrCode ; b
1455 vmm64On32TrapEntry vmm64On32TrapErrCode ; c
1456 vmm64On32TrapEntry vmm64On32TrapErrCode ; d
1457 vmm64On32TrapEntry vmm64On32TrapErrCode ; e
1458 vmm64On32TrapEntry vmm64On32Trap ; f (reserved)
1459 vmm64On32TrapEntry vmm64On32Trap ; 10
1460 vmm64On32TrapEntry vmm64On32TrapErrCode ; 11
1461 vmm64On32TrapEntry vmm64On32Trap ; 12
1462 vmm64On32TrapEntry vmm64On32Trap ; 13
1463%rep (0x100 - 0x14)
1464 vmm64On32TrapEntry vmm64On32Trap
1465%endrep
1466ENDPROC vmm64On32TrapHandlers
1467
1468;; Fake an error code and jump to the real thing.
1469BEGINPROC vmm64On32Trap
1470 push qword [rsp]
1471 jmp NAME(vmm64On32TrapErrCode)
1472ENDPROC vmm64On32Trap
1473
1474
1475;;
1476; Trap frame:
1477; [rbp + 38h] = ss
1478; [rbp + 30h] = rsp
1479; [rbp + 28h] = eflags
1480; [rbp + 20h] = cs
1481; [rbp + 18h] = rip
1482; [rbp + 10h] = error code (or trap number)
1483; [rbp + 08h] = trap number
1484; [rbp + 00h] = rbp
1485; [rbp - 08h] = rax
1486; [rbp - 10h] = rbx
1487; [rbp - 18h] = ds
1488;
1489BEGINPROC vmm64On32TrapErrCode
1490 push rbp
1491 mov rbp, rsp
1492 push rax
1493 push rbx
1494 mov ax, ds
1495 push rax
1496 sub rsp, 20h
1497
1498 mov ax, cs
1499 mov ds, ax
1500
1501%if 1
1502 COM64_S_NEWLINE
1503 COM64_S_CHAR '!'
1504 COM64_S_CHAR 't'
1505 COM64_S_CHAR 'r'
1506 COM64_S_CHAR 'a'
1507 COM64_S_CHAR 'p'
1508 movzx eax, byte [rbp + 08h]
1509 COM64_S_DWORD_REG eax
1510 COM64_S_CHAR '!'
1511%endif
1512
1513%if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM
1514 sidt [rsp]
1515 movsx eax, word [rsp]
1516 shr eax, 12 ; div by 16 * 256 (0x1000).
1517%else
1518 ; hardcoded VCPU(0) for now...
1519 mov rbx, [NAME(pCpumIC) wrt rip]
1520 mov eax, [rbx + CPUM.offCPUMCPU0]
1521%endif
1522 push rax ; Save the offset for rbp later.
1523
1524 add rbx, rax ; rbx = CPUMCPU
1525
1526 ;
1527 ; Deal with recursive traps due to vmxoff (lazy bird).
1528 ;
1529 lea rax, [.vmxoff_trap_location wrt rip]
1530 cmp rax, [rbp + 18h]
1531 je .not_vmx_root
1532
1533 ;
1534 ; Save the context.
1535 ;
1536 mov rax, [rbp - 8]
1537 mov [rbx + CPUMCPU.Hyper.eax], rax
1538 mov [rbx + CPUMCPU.Hyper.ecx], rcx
1539 mov [rbx + CPUMCPU.Hyper.edx], rdx
1540 mov rax, [rbp - 10h]
1541 mov [rbx + CPUMCPU.Hyper.ebx], rax
1542 mov rax, [rbp]
1543 mov [rbx + CPUMCPU.Hyper.ebp], rax
1544 mov rax, [rbp + 30h]
1545 mov [rbx + CPUMCPU.Hyper.esp], rax
1546 mov [rbx + CPUMCPU.Hyper.edi], rdi
1547 mov [rbx + CPUMCPU.Hyper.esi], rsi
1548 mov [rbx + CPUMCPU.Hyper.r8], r8
1549 mov [rbx + CPUMCPU.Hyper.r9], r9
1550 mov [rbx + CPUMCPU.Hyper.r10], r10
1551 mov [rbx + CPUMCPU.Hyper.r11], r11
1552 mov [rbx + CPUMCPU.Hyper.r12], r12
1553 mov [rbx + CPUMCPU.Hyper.r13], r13
1554 mov [rbx + CPUMCPU.Hyper.r14], r14
1555 mov [rbx + CPUMCPU.Hyper.r15], r15
1556
1557 mov rax, [rbp + 18h]
1558 mov [rbx + CPUMCPU.Hyper.eip], rax
1559 movzx ax, [rbp + 20h]
1560 mov [rbx + CPUMCPU.Hyper.cs.Sel], ax
1561 mov ax, [rbp + 38h]
1562 mov [rbx + CPUMCPU.Hyper.ss.Sel], ax
1563 mov ax, [rbp - 18h]
1564 mov [rbx + CPUMCPU.Hyper.ds.Sel], ax
1565
1566 mov rax, [rbp + 28h]
1567 mov [rbx + CPUMCPU.Hyper.eflags], rax
1568
1569 mov rax, cr2
1570 mov [rbx + CPUMCPU.Hyper.cr2], rax
1571
1572 mov rax, [rbp + 10h]
1573 mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code
1574 movzx eax, byte [rbp + 08h]
1575 mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number
1576
1577 ;
1578 ; Finally, leave VMX root operation before trying to return to the host.
1579 ;
1580 mov rax, cr4
1581 test rax, X86_CR4_VMXE
1582 jz .not_vmx_root
1583.vmxoff_trap_location:
1584 vmxoff
1585.not_vmx_root:
1586
1587 ;
1588 ; Go back to the host.
1589 ;
1590 pop rbp
1591 mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC
1592 jmp NAME(vmmRCToHostAsm)
1593ENDPROC vmm64On32TrapErrCode
1594
1595;; We allocate the IDT here to avoid having to allocate memory separately somewhere.
1596ALIGNCODE(16)
1597GLOBALNAME vmm64On32Idt
1598%assign i 0
1599%rep 256
1600 dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets.
1601 dq 0
1602%assign i (i + 1)
1603%endrep
1604
1605
1606 %if 0
1607;; For debugging purposes.
1608BEGINPROC vmm64On32PrintIdtr
1609 push rax
1610 push rsi ; paranoia
1611 push rdi ; ditto
1612 sub rsp, 16
1613
1614 COM64_S_CHAR ';'
1615 COM64_S_CHAR 'i'
1616 COM64_S_CHAR 'd'
1617 COM64_S_CHAR 't'
1618 COM64_S_CHAR 'r'
1619 COM64_S_CHAR '='
1620 sidt [rsp + 6]
1621 mov eax, [rsp + 8 + 4]
1622 COM64_S_DWORD_REG eax
1623 mov eax, [rsp + 8]
1624 COM64_S_DWORD_REG eax
1625 COM64_S_CHAR ':'
1626 movzx eax, word [rsp + 6]
1627 COM64_S_DWORD_REG eax
1628 COM64_S_CHAR '!'
1629
1630 add rsp, 16
1631 pop rdi
1632 pop rsi
1633 pop rax
1634 ret
1635ENDPROC vmm64On32PrintIdtr
1636 %endif
1637
1638 %if 1
1639;; For debugging purposes.
1640BEGINPROC vmm64On32DumpCmos
1641 push rax
1642 push rdx
1643 push rcx
1644 push rsi ; paranoia
1645 push rdi ; ditto
1646 sub rsp, 16
1647
1648%if 0
1649 mov al, 3
1650 out 72h, al
1651 mov al, 68h
1652 out 73h, al
1653%endif
1654
1655 COM64_S_NEWLINE
1656 COM64_S_CHAR 'c'
1657 COM64_S_CHAR 'm'
1658 COM64_S_CHAR 'o'
1659 COM64_S_CHAR 's'
1660 COM64_S_CHAR '0'
1661 COM64_S_CHAR ':'
1662
1663 xor ecx, ecx
1664.loop1:
1665 mov al, cl
1666 out 70h, al
1667 in al, 71h
1668 COM64_S_BYTE_REG eax
1669 COM64_S_CHAR ' '
1670 inc ecx
1671 cmp ecx, 128
1672 jb .loop1
1673
1674 COM64_S_NEWLINE
1675 COM64_S_CHAR 'c'
1676 COM64_S_CHAR 'm'
1677 COM64_S_CHAR 'o'
1678 COM64_S_CHAR 's'
1679 COM64_S_CHAR '1'
1680 COM64_S_CHAR ':'
1681 xor ecx, ecx
1682.loop2:
1683 mov al, cl
1684 out 72h, al
1685 in al, 73h
1686 COM64_S_BYTE_REG eax
1687 COM64_S_CHAR ' '
1688 inc ecx
1689 cmp ecx, 128
1690 jb .loop2
1691
1692%if 0
1693 COM64_S_NEWLINE
1694 COM64_S_CHAR 'c'
1695 COM64_S_CHAR 'm'
1696 COM64_S_CHAR 'o'
1697 COM64_S_CHAR 's'
1698 COM64_S_CHAR '2'
1699 COM64_S_CHAR ':'
1700 xor ecx, ecx
1701.loop3:
1702 mov al, cl
1703 out 74h, al
1704 in al, 75h
1705 COM64_S_BYTE_REG eax
1706 COM64_S_CHAR ' '
1707 inc ecx
1708 cmp ecx, 128
1709 jb .loop3
1710
1711 COM64_S_NEWLINE
1712 COM64_S_CHAR 'c'
1713 COM64_S_CHAR 'm'
1714 COM64_S_CHAR 'o'
1715 COM64_S_CHAR 's'
1716 COM64_S_CHAR '3'
1717 COM64_S_CHAR ':'
1718 xor ecx, ecx
1719.loop4:
1720 mov al, cl
1721 out 72h, al
1722 in al, 73h
1723 COM64_S_BYTE_REG eax
1724 COM64_S_CHAR ' '
1725 inc ecx
1726 cmp ecx, 128
1727 jb .loop4
1728
1729 COM64_S_NEWLINE
1730%endif
1731
1732 add rsp, 16
1733 pop rdi
1734 pop rsi
1735 pop rcx
1736 pop rdx
1737 pop rax
1738 ret
1739ENDPROC vmm64On32DumpCmos
1740 %endif
1741
1742%endif ; VBOX_WITH_64ON32_IDT
1743
1744
1745
1746;
1747;
1748; Back to switcher code.
1749; Back to switcher code.
1750; Back to switcher code.
1751;
1752;
1753
1754
1755
1756;;
1757; Trampoline for doing a call when starting the hyper visor execution.
1758;
1759; Push any arguments to the routine.
1760; Push the argument frame size (cArg * 4).
1761; Push the call target (_cdecl convention).
1762; Push the address of this routine.
1763;
1764;
1765BITS 64
1766ALIGNCODE(16)
1767BEGINPROC vmmRCCallTrampoline
1768%ifdef DEBUG_STUFF
1769 COM64_S_CHAR 'c'
1770 COM64_S_CHAR 't'
1771 COM64_S_CHAR '!'
1772%endif
1773 int3
1774ENDPROC vmmRCCallTrampoline
1775
1776
1777;;
1778; The C interface.
1779;
1780BITS 64
1781ALIGNCODE(16)
1782BEGINPROC vmmRCToHost
1783%ifdef DEBUG_STUFF
1784 push rsi
1785 COM_NEWLINE
1786 COM_CHAR 'b'
1787 COM_CHAR 'a'
1788 COM_CHAR 'c'
1789 COM_CHAR 'k'
1790 COM_CHAR '!'
1791 COM_NEWLINE
1792 pop rsi
1793%endif
1794 int3
1795ENDPROC vmmRCToHost
1796
1797;;
1798; vmmRCToHostAsm
1799;
1800; This is an alternative entry point which we'll be using
1801; when the we have saved the guest state already or we haven't
1802; been messing with the guest at all.
1803;
1804; @param rbp The virtual cpu number.
1805; @param
1806;
1807BITS 64
1808ALIGNCODE(16)
1809BEGINPROC vmmRCToHostAsm
1810NAME(vmmRCToHostAsmNoReturn):
1811 ;; We're still in the intermediate memory context!
1812
1813 ;;
1814 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1815 ;;
1816 jmp far [NAME(fpIDEnterTarget) wrt rip]
1817
1818; 16:32 Pointer to IDEnterTarget.
1819NAME(fpIDEnterTarget):
1820 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1821dd 0
1822 FIXUP FIX_HYPER_CS, 0
1823dd 0
1824
1825 ; We're now on identity mapped pages!
1826ALIGNCODE(16)
1827GLOBALNAME IDExitTarget
1828BITS 32
1829 DEBUG32_CHAR('1')
1830
1831 ; 1. Deactivate long mode by turning off paging.
1832 mov ebx, cr0
1833 and ebx, ~X86_CR0_PG
1834 mov cr0, ebx
1835 DEBUG32_CHAR('2')
1836
1837 ; 2. Load intermediate page table.
1838 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1839 mov edx, 0ffffffffh
1840 mov cr3, edx
1841 DEBUG32_CHAR('3')
1842
1843 ; 3. Disable long mode.
1844 mov ecx, MSR_K6_EFER
1845 rdmsr
1846 DEBUG32_CHAR('5')
1847 and eax, ~(MSR_K6_EFER_LME)
1848 wrmsr
1849 DEBUG32_CHAR('6')
1850
1851%ifndef NEED_PAE_ON_HOST
1852 ; 3b. Disable PAE.
1853 mov eax, cr4
1854 and eax, ~X86_CR4_PAE
1855 mov cr4, eax
1856 DEBUG32_CHAR('7')
1857%endif
1858
1859 ; 4. Enable paging.
1860 or ebx, X86_CR0_PG
1861 mov cr0, ebx
1862 jmp short just_a_jump
1863just_a_jump:
1864 DEBUG32_CHAR('8')
1865
1866 ;;
1867 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1868 ;;
1869 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1870 jmp near NAME(ICExitTarget)
1871
1872 ;;
1873 ;; When we arrive at this label we're at the host mapping of the
1874 ;; switcher code, but with intermediate page tables.
1875 ;;
1876BITS 32
1877ALIGNCODE(16)
1878GLOBALNAME ICExitTarget
1879 DEBUG32_CHAR('9')
1880 ;DEBUG_CMOS_TRASH_AL 70h
1881
1882 ; load the hypervisor data selector into ds & es
1883 FIXUP FIX_HYPER_DS, 1
1884 mov eax, 0ffffh
1885 mov ds, eax
1886 mov es, eax
1887 DEBUG32_CHAR('a')
1888
1889 FIXUP FIX_GC_CPUM_OFF, 1, 0
1890 mov edx, 0ffffffffh
1891 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1892
1893 DEBUG32_CHAR('b')
1894 mov esi, [edx + CPUMCPU.Host.cr3]
1895 mov cr3, esi
1896 DEBUG32_CHAR('c')
1897
1898 ;; now we're in host memory context, let's restore regs
1899 FIXUP FIX_HC_CPUM_OFF, 1, 0
1900 mov edx, 0ffffffffh
1901 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1902 DEBUG32_CHAR('e')
1903
1904 ; restore the host EFER
1905 mov ebx, edx
1906 mov ecx, MSR_K6_EFER
1907 mov eax, [ebx + CPUMCPU.Host.efer]
1908 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1909 DEBUG32_CHAR('f')
1910 wrmsr
1911 mov edx, ebx
1912 DEBUG32_CHAR('g')
1913
1914 ; activate host gdt and idt
1915 lgdt [edx + CPUMCPU.Host.gdtr]
1916 DEBUG32_CHAR('0')
1917 lidt [edx + CPUMCPU.Host.idtr]
1918 DEBUG32_CHAR('1')
1919
1920 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1921 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1922 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1923 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1924 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1925 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1926 ltr word [edx + CPUMCPU.Host.tr]
1927
1928 ; activate ldt
1929 DEBUG32_CHAR('2')
1930 lldt [edx + CPUMCPU.Host.ldtr]
1931
1932 ; Restore segment registers
1933 mov eax, [edx + CPUMCPU.Host.ds]
1934 mov ds, eax
1935 mov eax, [edx + CPUMCPU.Host.es]
1936 mov es, eax
1937 mov eax, [edx + CPUMCPU.Host.fs]
1938 mov fs, eax
1939 mov eax, [edx + CPUMCPU.Host.gs]
1940 mov gs, eax
1941 ; restore stack
1942 lss esp, [edx + CPUMCPU.Host.esp]
1943
1944 ; Control registers.
1945 mov ecx, [edx + CPUMCPU.Host.cr4]
1946 mov cr4, ecx
1947 mov ecx, [edx + CPUMCPU.Host.cr0]
1948 mov cr0, ecx
1949 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1950 ;mov cr2, ecx
1951
1952 ; restore general registers.
1953 mov edi, [edx + CPUMCPU.Host.edi]
1954 mov esi, [edx + CPUMCPU.Host.esi]
1955 mov ebx, [edx + CPUMCPU.Host.ebx]
1956 mov ebp, [edx + CPUMCPU.Host.ebp]
1957
1958 ; store the return code in eax
1959 DEBUG_CMOS_TRASH_AL 79h
1960 mov eax, [edx + CPUMCPU.u32RetCode]
1961 retf
1962ENDPROC vmmRCToHostAsm
1963
1964
1965GLOBALNAME End
1966;
1967; The description string (in the text section).
1968;
1969NAME(Description):
1970 db SWITCHER_DESCRIPTION
1971 db 0
1972
1973extern NAME(Relocate)
1974
1975;
1976; End the fixup records.
1977;
1978BEGINDATA
1979 db FIX_THE_END ; final entry.
1980GLOBALNAME FixupsEnd
1981
1982;;
1983; The switcher definition structure.
1984ALIGNDATA(16)
1985GLOBALNAME Def
1986 istruc VMMSWITCHERDEF
1987 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1988 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1989 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1990 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1991 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1992 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1993 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1994 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1995 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1996 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1997 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1998 ; disasm help
1999 at VMMSWITCHERDEF.offHCCode0, dd 0
2000 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
2001 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
2002 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
2003 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
2004 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
2005 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
2006 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
2007%ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT.
2008 at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start)
2009%else
2010 at VMMSWITCHERDEF.offGCCode, dd 0
2011%endif
2012 at VMMSWITCHERDEF.cbGCCode, dd 0
2013
2014 iend
2015
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette