VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac@ 56319

Last change on this file since 56319 was 56287, checked in by vboxsync, 9 years ago

VMM: Updated (C) year.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.0 KB
Line 
1; $Id: AMD64andLegacy.mac 56287 2015-06-09 11:15:22Z vboxsync $
2;; @file
3; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19;%define STRICT_IF 1
20
21;*******************************************************************************
22;* Header Files *
23;*******************************************************************************
24%include "VBox/asmdefs.mac"
25%include "VBox/apic.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28%include "VBox/vmm/stam.mac"
29%include "VBox/vmm/vm.mac"
30%include "VBox/err.mac"
31%include "CPUMInternal.mac"
32%include "VMMSwitcher.mac"
33
34
35;
36; Start the fixup records
37; We collect the fixups in the .data section as we go along
38; It is therefore VITAL that no-one is using the .data section
39; for anything else between 'Start' and 'End'.
40;
41BEGINDATA
42GLOBALNAME Fixups
43
44
45
46BEGINCODE
47GLOBALNAME Start
48
49%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
50BITS 64
51
52;;
53; The C interface.
54;
55; @param pVM gcc: rdi msc:rcx The VM handle.
56;
57BEGINPROC vmmR0ToRawMode
58%ifdef DEBUG_STUFF
59 COM64_S_NEWLINE
60 COM64_S_CHAR '^'
61%endif
62 ;
63 ; The ordinary version of the code.
64 ;
65
66 %ifdef STRICT_IF
67 pushf
68 pop rax
69 test eax, X86_EFL_IF
70 jz .if_clear_in
71 mov eax, 0c0ffee00h
72 ret
73.if_clear_in:
74 %endif
75
76 ;
77 ; make r9 = pVM and rdx = pCpum.
78 ; rax, rcx and r8 are scratch here after.
79 %ifdef RT_OS_WINDOWS
80 mov r9, rcx
81 %else
82 mov r9, rdi
83 %endif
84 lea rdx, [r9 + VM.cpum]
85
86 %ifdef VBOX_WITH_STATISTICS
87 ;
88 ; Switcher stats.
89 ;
90 lea r8, [r9 + VM.StatSwitcherToGC]
91 STAM64_PROFILE_ADV_START r8
92 %endif
93
94 ;
95 ; Call worker (far return).
96 ;
97 mov eax, cs
98 push rax
99 call NAME(vmmR0ToRawModeAsm)
100
101 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
102 ; Unblock Local APIC NMI vectors
103 ; Do this here to ensure the host CS is already restored
104 mov r8d, [rdx + CPUM.offCPUMCPU0]
105 mov ecx, [rdx + r8 + CPUMCPU.fApicDisVectors]
106 test ecx, ecx
107 jz gth64_apic_done
108 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
109 je gth64_x2apic
110
111 ; Legacy xAPIC mode:
112 mov r8, [rdx + r8 + CPUMCPU.pvApicBase]
113 shr ecx, 1
114 jnc gth64_nolint0
115 and dword [r8 + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
116gth64_nolint0:
117 shr ecx, 1
118 jnc gth64_nolint1
119 and dword [r8 + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
120gth64_nolint1:
121 shr ecx, 1
122 jnc gth64_nopc
123 and dword [r8 + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
124gth64_nopc:
125 shr ecx, 1
126 jnc gth64_notherm
127 and dword [r8 + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
128gth64_notherm:
129 shr ecx, 1
130 jnc gth64_nocmci
131 and dword [r8 + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
132gth64_nocmci:
133 shr ecx, 1
134 jnc gth64_noeilvt0
135 and dword [r8 + APIC_REG_EILVT0], ~APIC_REG_LVT_MASKED
136gth64_noeilvt0:
137 shr ecx, 1
138 jnc gth64_noeilvt1
139 and dword [r8 + APIC_REG_EILVT1], ~APIC_REG_LVT_MASKED
140gth64_noeilvt1:
141 shr ecx, 1
142 jnc gth64_noeilvt2
143 and dword [r8 + APIC_REG_EILVT2], ~APIC_REG_LVT_MASKED
144gth64_noeilvt2:
145 shr ecx, 1
146 jnc gth64_noeilvt3
147 and dword [r8 + APIC_REG_EILVT3], ~APIC_REG_LVT_MASKED
148gth64_noeilvt3:
149
150 jmp gth64_apic_done
151
152 ; x2APIC mode:
153gth64_x2apic:
154 mov r8, rax ; save rax
155 mov r10, rcx
156 shr r10d, 1
157 jnc gth64_x2_nolint0
158 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
159 rdmsr
160 and eax, ~APIC_REG_LVT_MASKED
161 wrmsr
162gth64_x2_nolint0:
163 shr r10d, 1
164 jnc gth64_x2_nolint1
165 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
166 rdmsr
167 and eax, ~APIC_REG_LVT_MASKED
168 wrmsr
169gth64_x2_nolint1:
170 shr r10d, 1
171 jnc gth64_x2_nopc
172 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
173 rdmsr
174 and eax, ~APIC_REG_LVT_MASKED
175 wrmsr
176gth64_x2_nopc:
177 shr r10d, 1
178 jnc gth64_x2_notherm
179 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
180 rdmsr
181 and eax, ~APIC_REG_LVT_MASKED
182 wrmsr
183gth64_x2_notherm:
184 shr r10d, 1
185 jnc gth64_x2_nocmci
186 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
187 rdmsr
188 and eax, ~APIC_REG_LVT_MASKED
189 wrmsr
190gth64_x2_nocmci:
191 mov rax, r8 ; restore rax
192
193gth64_apic_done:
194 %endif
195
196 %ifdef VBOX_WITH_STATISTICS
197 ;
198 ; Switcher stats.
199 ;
200 lea r8, [r9 + VM.StatSwitcherToGC]
201 STAM64_PROFILE_ADV_STOP r8
202 %endif
203
204 ret
205ENDPROC vmmR0ToRawMode
206
207
208%else ; VBOX_WITH_HYBRID_32BIT_KERNEL
209
210
211BITS 32
212
213;;
214; The C interface.
215;
216BEGINPROC vmmR0ToRawMode
217 %ifdef DEBUG_STUFF
218 COM32_S_NEWLINE
219 COM32_S_CHAR '^'
220 %endif
221
222 %ifdef VBOX_WITH_STATISTICS
223 ;
224 ; Switcher stats.
225 ;
226 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
227 mov edx, 0ffffffffh
228 STAM_PROFILE_ADV_START edx
229 %endif
230
231 ; Thunk to/from 64 bit when invoking the worker routine.
232 ;
233 FIXUP FIX_HC_VM_OFF, 1, VM.cpum
234 mov edx, 0ffffffffh
235
236 push 0
237 push cs
238 push 0
239 FIXUP FIX_HC_32BIT, 1, .vmmR0ToRawModeReturn - NAME(Start)
240 push 0ffffffffh
241
242 FIXUP FIX_HC_64BIT_CS, 1
243 push 0ffffh
244 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0ToRawModeAsm) - NAME(Start)
245 push 0ffffffffh
246 retf
247.vmmR0ToRawModeReturn:
248
249 ;
250 ; This selector reloading is probably not necessary, but we do it anyway to be quite sure
251 ; the CPU has the right idea about the selectors.
252 ;
253 mov edx, ds
254 mov ds, edx
255 mov ecx, es
256 mov es, ecx
257 mov edx, ss
258 mov ss, edx
259
260 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
261 Missing implementation!
262 %endif
263
264
265 %ifdef VBOX_WITH_STATISTICS
266 ;
267 ; Switcher stats.
268 ;
269 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
270 mov edx, 0ffffffffh
271 STAM_PROFILE_ADV_STOP edx
272 %endif
273
274 ret
275ENDPROC vmmR0ToRawMode
276
277BITS 64
278%endif ;!VBOX_WITH_HYBRID_32BIT_KERNEL
279
280
281
282; *****************************************************************************
283; vmmR0ToRawModeAsm
284;
285; Phase one of the switch from host to guest context (host MMU context)
286;
287; INPUT:
288; - edx virtual address of CPUM structure (valid in host context)
289;
290; USES/DESTROYS:
291; - eax, ecx, edx, r8
292;
293; ASSUMPTION:
294; - current CS and DS selectors are wide open
295;
296; *****************************************************************************
297ALIGNCODE(16)
298BEGINPROC vmmR0ToRawModeAsm
299 ;; Store the offset from CPUM to CPUMCPU in r8
300 mov r8d, [rdx + CPUM.offCPUMCPU0]
301
302 ;;
303 ;; Save CPU host context
304 ;; Skip eax, edx and ecx as these are not preserved over calls.
305 ;;
306 ; general registers.
307 ; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
308 mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
309 ; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
310 ; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
311 mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
312 mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
313 mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
314 mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
315 ; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
316 ; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
317 mov [rdx + r8 + CPUMCPU.Host.r10], r10
318 mov [rdx + r8 + CPUMCPU.Host.r11], r11
319 mov [rdx + r8 + CPUMCPU.Host.r12], r12
320 mov [rdx + r8 + CPUMCPU.Host.r13], r13
321 mov [rdx + r8 + CPUMCPU.Host.r14], r14
322 mov [rdx + r8 + CPUMCPU.Host.r15], r15
323 ; selectors.
324 mov [rdx + r8 + CPUMCPU.Host.ds], ds
325 mov [rdx + r8 + CPUMCPU.Host.es], es
326 mov [rdx + r8 + CPUMCPU.Host.fs], fs
327 mov [rdx + r8 + CPUMCPU.Host.gs], gs
328 mov [rdx + r8 + CPUMCPU.Host.ss], ss
329 ; MSRs
330 mov rbx, rdx
331 mov ecx, MSR_K8_FS_BASE
332 rdmsr
333 mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
334 mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
335 mov ecx, MSR_K8_GS_BASE
336 rdmsr
337 mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
338 mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
339 mov ecx, MSR_K6_EFER
340 rdmsr
341 mov [rbx + r8 + CPUMCPU.Host.efer], eax
342 mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
343 mov rdx, rbx
344 ; special registers.
345 sldt [rdx + r8 + CPUMCPU.Host.ldtr]
346 sidt [rdx + r8 + CPUMCPU.Host.idtr]
347 sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
348 str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
349 ; flags
350 pushf
351 pop qword [rdx + r8 + CPUMCPU.Host.rflags]
352
353%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
354 ; Block Local APIC NMI vectors
355 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
356 je htg_x2apic
357
358 ; Legacy xAPIC mode. No write completion required when writing to the
359 ; LVT registers as we have mapped the APIC page non-cacheable and the
360 ; MMIO is CPU-local.
361 mov rbx, [rdx + r8 + CPUMCPU.pvApicBase]
362 or rbx, rbx
363 jz htg_apic_done
364 xor edi, edi ; fApicDisVectors
365 mov eax, [rbx + APIC_REG_LVT_LINT0]
366 mov ecx, eax
367 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
368 cmp ecx, APIC_REG_LVT_MODE_NMI
369 jne htg_nolint0
370 or edi, 0x01
371 or eax, APIC_REG_LVT_MASKED
372 mov [rbx + APIC_REG_LVT_LINT0], eax
373htg_nolint0:
374 mov eax, [rbx + APIC_REG_LVT_LINT1]
375 mov ecx, eax
376 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
377 cmp ecx, APIC_REG_LVT_MODE_NMI
378 jne htg_nolint1
379 or edi, 0x02
380 or eax, APIC_REG_LVT_MASKED
381 mov [rbx + APIC_REG_LVT_LINT1], eax
382htg_nolint1:
383 mov eax, [rbx + APIC_REG_LVT_PC]
384 mov ecx, eax
385 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
386 cmp ecx, APIC_REG_LVT_MODE_NMI
387 jne htg_nopc
388 or edi, 0x04
389 or eax, APIC_REG_LVT_MASKED
390 mov [rbx + APIC_REG_LVT_PC], eax
391htg_nopc:
392 mov eax, [rbx + APIC_REG_VERSION]
393 shr eax, 16
394 push rax
395 cmp al, 5
396 jb htg_notherm
397 je htg_nocmci
398 mov eax, [rbx + APIC_REG_LVT_CMCI]
399 mov ecx, eax
400 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
401 cmp ecx, APIC_REG_LVT_MODE_NMI
402 jne htg_nocmci
403 or edi, 0x10
404 or eax, APIC_REG_LVT_MASKED
405 mov [rbx + APIC_REG_LVT_CMCI], eax
406htg_nocmci:
407 mov eax, [rbx + APIC_REG_LVT_THMR]
408 mov ecx, eax
409 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
410 cmp ecx, APIC_REG_LVT_MODE_NMI
411 jne htg_notherm
412 or edi, 0x08
413 or eax, APIC_REG_LVT_MASKED
414 mov [rbx + APIC_REG_LVT_THMR], eax
415htg_notherm:
416 pop rax
417 test ah, ah
418 jns htg_noeilvt
419
420 ; AMD Extended LVT registers
421 mov esi, [rbx + 0x400]
422 shr esi, 16
423 and esi, 0xff
424 jz htg_noeilvt
425 mov ebp, 0x20
426htg_tsteilvtx:
427 mov eax, [rbx + APIC_REG_EILVT0]
428 mov ecx, eax
429 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
430 cmp ecx, APIC_REG_LVT_MODE_NMI
431 jne htg_noeilvtx
432 or edi, ebp
433 or eax, APIC_REG_LVT_MASKED
434 mov [rbx + APIC_REG_EILVT0], eax
435htg_noeilvtx:
436 add rbx, 0x10 ; clobbers rbx!
437 shl ebp, 1
438 dec esi
439 jnz htg_tsteilvtx
440
441htg_noeilvt:
442 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
443 jmp htg_apic_done
444
445 ; x2APIC mode:
446htg_x2apic:
447 mov r15, rdx ; save rdx
448 xor edi, edi ; fApicDisVectors
449
450 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
451 rdmsr
452 mov ebx, eax
453 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
454 cmp ebx, APIC_REG_LVT_MODE_NMI
455 jne htg_x2_nolint0
456 or edi, 0x01
457 or eax, APIC_REG_LVT_MASKED
458 wrmsr
459htg_x2_nolint0:
460 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
461 rdmsr
462 mov ebx, eax
463 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
464 cmp ebx, APIC_REG_LVT_MODE_NMI
465 jne htg_x2_nolint1
466 or edi, 0x02
467 or eax, APIC_REG_LVT_MASKED
468 wrmsr
469htg_x2_nolint1:
470 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
471 rdmsr
472 mov ebx, eax
473 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
474 cmp ebx, APIC_REG_LVT_MODE_NMI
475 jne htg_x2_nopc
476 or edi, 0x04
477 or eax, APIC_REG_LVT_MASKED
478 wrmsr
479htg_x2_nopc:
480 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
481 rdmsr
482 shr eax, 16
483 cmp al, 5
484 jb htg_x2_notherm
485 je htg_x2_nocmci
486 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
487 rdmsr
488 mov ebx, eax
489 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
490 cmp ebx, APIC_REG_LVT_MODE_NMI
491 jne htg_x2_nocmci
492 or edi, 0x10
493 or eax, APIC_REG_LVT_MASKED
494 wrmsr
495htg_x2_nocmci:
496 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
497 rdmsr
498 mov ebx, eax
499 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
500 cmp ebx, APIC_REG_LVT_MODE_NMI
501 jne htg_x2_notherm
502 or edi, 0x08
503 or eax, APIC_REG_LVT_MASKED
504 wrmsr
505htg_x2_notherm:
506 mov rdx, r15
507 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
508htg_apic_done:
509
510%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
511
512 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
513 ; save MSR_IA32_SYSENTER_CS register.
514 mov rbx, rdx ; save edx
515 mov ecx, MSR_IA32_SYSENTER_CS
516 rdmsr ; edx:eax <- MSR[ecx]
517 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
518 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
519 xor eax, eax ; load 0:0 to cause #GP upon sysenter
520 xor edx, edx
521 wrmsr
522 mov rdx, rbx ; restore edx
523 jmp short htg_no_sysenter
524
525ALIGNCODE(16)
526htg_no_sysenter:
527
528 ;; handle use flags.
529 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
530 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
531 mov [rdx + r8 + CPUMCPU.fUseFlags], esi
532
533 ; debug registers.
534 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
535 jnz htg_debug_regs_save
536htg_debug_regs_no:
537 DEBUG_CHAR('a') ; trashes esi
538
539 ; control registers.
540 mov rax, cr0
541 mov [rdx + r8 + CPUMCPU.Host.cr0], rax
542 ;mov rax, cr2 ; assume host os don't stuff things in cr2. (safe)
543 ;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
544 mov rax, cr3
545 mov [rdx + r8 + CPUMCPU.Host.cr3], rax
546 mov rax, cr4
547 mov [rdx + r8 + CPUMCPU.Host.cr4], rax
548
549 ;;
550 ;; Start switching to VMM context.
551 ;;
552
553 ;
554 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
555 ; Also disable WP. (eax==cr4 now)
556 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
557 ;
558 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
559 mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
560 DEBUG_CHAR('b') ; trashes esi
561 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
562 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
563 ; simplify this operation a bit (and improve locality of the data).
564
565 ;
566 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
567 ; FXSAVE and XSAVE support on the host CPU
568 ;
569 and ecx, [rdx + CPUM.CR4.AndMask]
570 or eax, ecx
571 or eax, [rdx + CPUM.CR4.OrMask]
572 mov cr4, rax
573 DEBUG_CHAR('c') ; trashes esi
574
575 mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
576 and eax, X86_CR0_EM
577 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
578 mov cr0, rax
579 DEBUG_CHAR('0') ; trashes esi
580
581
582 ; Load new gdt so we can do far jump to guest code after cr3 reload.
583 lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
584 DEBUG_CHAR('1') ; trashes esi
585
586 ; Store the hypervisor cr3 for later loading
587 mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
588
589 ;;
590 ;; Load Intermediate memory context.
591 ;;
592 FIXUP FIX_INTER_AMD64_CR3, 1
593 mov eax, 0ffffffffh
594 mov cr3, rax
595 DEBUG_CHAR('2') ; trashes esi
596
597 ;;
598 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
599 ;;
600 jmp far [NAME(fpIDEnterTarget) wrt rip]
601
602; 16:32 Pointer to IDEnterTarget.
603NAME(fpIDEnterTarget):
604 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
605dd 0
606 FIXUP FIX_HYPER_CS, 0
607dd 0
608
609
610;;
611; Detour for saving the host DR7 and DR6.
612; esi and rdx must be preserved.
613htg_debug_regs_save:
614DEBUG_S_CHAR('s');
615 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
616 mov [rdx + r8 + CPUMCPU.Host.dr7], rax
617 mov ecx, X86_DR7_INIT_VAL
618 cmp eax, ecx
619 je .htg_debug_regs_dr7_disabled
620 mov dr7, rcx
621.htg_debug_regs_dr7_disabled:
622 mov rax, dr6 ; just in case we save the state register too.
623 mov [rdx + r8 + CPUMCPU.Host.dr6], rax
624 ; save host DR0-3?
625 test esi, CPUM_USE_DEBUG_REGS_HYPER
626 jz htg_debug_regs_no
627DEBUG_S_CHAR('S');
628 mov rax, dr0
629 mov [rdx + r8 + CPUMCPU.Host.dr0], rax
630 mov rbx, dr1
631 mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
632 mov rcx, dr2
633 mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
634 mov rax, dr3
635 mov [rdx + r8 + CPUMCPU.Host.dr3], rax
636 or dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
637 jmp htg_debug_regs_no
638
639
640 ; We're now on identity mapped pages in 32-bit compatibility mode.
641BITS 32
642ALIGNCODE(16)
643GLOBALNAME IDEnterTarget
644 DEBUG_CHAR('3')
645
646 ; 2. Deactivate long mode by turning off paging.
647 mov ebx, cr0
648 and ebx, ~X86_CR0_PG
649 mov cr0, ebx
650 DEBUG_CHAR('4')
651
652 ; 3. Load intermediate page table.
653 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
654 mov edx, 0ffffffffh
655 mov cr3, edx
656
657 ; 4. Disable long mode.
658 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
659 mov ecx, MSR_K6_EFER
660 rdmsr
661 DEBUG_CHAR('5')
662 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
663 wrmsr
664 DEBUG_CHAR('6')
665
666%ifndef SWITCHER_TO_PAE
667 ; 4b. Disable PAE.
668 mov eax, cr4
669 and eax, ~X86_CR4_PAE
670 mov cr4, eax
671%else
672%endif
673
674 ; 5. Enable paging.
675 or ebx, X86_CR0_PG
676 mov cr0, ebx
677 jmp short just_a_jump
678just_a_jump:
679 DEBUG_CHAR('7')
680
681 ;;
682 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
683 ;;
684 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
685 jmp near NAME(JmpGCTarget)
686
687
688 ;;
689 ;; When we arrive at this label we're at the
690 ;; guest code mapping of the switching code.
691 ;;
692ALIGNCODE(16)
693GLOBALNAME JmpGCTarget
694 DEBUG_CHAR('-')
695 ; load final cr3 and do far jump to load cs.
696 mov cr3, ebp ; ebp set above
697 DEBUG_CHAR('0')
698
699 ;;
700 ;; We're in VMM MMU context and VMM CS is loaded.
701 ;; Setup the rest of the VMM state.
702 ;;
703 ; Load selectors
704 DEBUG_CHAR('1')
705 FIXUP FIX_HYPER_DS, 1
706 mov eax, 0ffffh
707 mov ds, eax
708 mov es, eax
709 xor eax, eax
710 mov gs, eax
711 mov fs, eax
712 ; Load pCpum into EDX
713 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
714 mov edx, 0ffffffffh
715 ; Activate guest IDT
716 DEBUG_CHAR('2')
717 lidt [edx + CPUMCPU.Hyper.idtr]
718
719 ; Setup the stack.
720 DEBUG_CHAR('3')
721 mov ax, [edx + CPUMCPU.Hyper.ss.Sel]
722 mov ss, ax
723 mov esp, [edx + CPUMCPU.Hyper.esp]
724
725 ; Restore TSS selector; must mark it as not busy before using ltr (!)
726 DEBUG_S_CHAR('4')
727 FIXUP FIX_GC_TSS_GDTE_DW2, 2
728 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
729 DEBUG_S_CHAR('5')
730 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
731 DEBUG_S_CHAR('6')
732
733 ; Activate the ldt (now we can safely crash).
734 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
735 DEBUG_S_CHAR('7')
736
737 ;; Use flags.
738 mov esi, [edx + CPUMCPU.fUseFlags]
739
740 ; debug registers
741 test esi, CPUM_USE_DEBUG_REGS_HYPER
742 jnz htg_debug_regs_guest
743htg_debug_regs_guest_done:
744 DEBUG_S_CHAR('9')
745
746 ; General registers (sans edx).
747 mov eax, [edx + CPUMCPU.Hyper.eax]
748 mov ebx, [edx + CPUMCPU.Hyper.ebx]
749 mov ecx, [edx + CPUMCPU.Hyper.ecx]
750 mov ebp, [edx + CPUMCPU.Hyper.ebp]
751 mov esi, [edx + CPUMCPU.Hyper.esi]
752 mov edi, [edx + CPUMCPU.Hyper.edi]
753 DEBUG_S_CHAR('!')
754
755 ;;
756 ;; Return to the VMM code which either called the switcher or
757 ;; the code set up to run by HC.
758 ;;
759 push dword [edx + CPUMCPU.Hyper.eflags]
760 push cs
761 push dword [edx + CPUMCPU.Hyper.eip]
762 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
763
764%ifdef DEBUG_STUFF
765 COM32_S_PRINT ';eip='
766 push eax
767 mov eax, [esp + 8]
768 COM32_S_DWORD_REG eax
769 pop eax
770 COM32_S_CHAR ';'
771%endif
772%ifdef VBOX_WITH_STATISTICS
773 push eax
774 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
775 mov eax, 0ffffffffh
776 STAM32_PROFILE_ADV_STOP eax
777 pop eax
778%endif
779
780 iret ; Use iret to make debugging and TF/RF work.
781
782;;
783; Detour for saving host DR0-3 and loading hypervisor debug registers.
784; esi and edx must be preserved.
785htg_debug_regs_guest:
786 DEBUG_S_CHAR('D')
787 DEBUG_S_CHAR('R')
788 DEBUG_S_CHAR('x')
789 ; load hyper DR0-7
790 mov ebx, [edx + CPUMCPU.Hyper.dr]
791 mov dr0, ebx
792 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
793 mov dr1, ecx
794 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
795 mov dr2, eax
796 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
797 mov dr3, ebx
798 mov ecx, X86_DR6_INIT_VAL
799 mov dr6, ecx
800 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
801 mov dr7, eax
802 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
803 jmp htg_debug_regs_guest_done
804
805ENDPROC vmmR0ToRawModeAsm
806
807
808;;
809; Trampoline for doing a call when starting the hyper visor execution.
810;
811; Push any arguments to the routine.
812; Push the argument frame size (cArg * 4).
813; Push the call target (_cdecl convention).
814; Push the address of this routine.
815;
816;
817ALIGNCODE(16)
818BEGINPROC vmmRCCallTrampoline
819%ifdef DEBUG_STUFF
820 COM32_S_CHAR 'c'
821 COM32_S_CHAR 't'
822 COM32_S_CHAR '!'
823%endif
824
825 ; call routine
826 pop eax ; call address
827 pop edi ; argument count.
828%ifdef DEBUG_STUFF
829 COM32_S_PRINT ';eax='
830 COM32_S_DWORD_REG eax
831 COM32_S_CHAR ';'
832%endif
833 call eax ; do call
834 add esp, edi ; cleanup stack
835
836 ; return to the host context (eax = C returncode).
837%ifdef DEBUG_STUFF
838 COM32_S_CHAR '`'
839%endif
840.to_host_again:
841 call NAME(vmmRCToHostAsm)
842 mov eax, VERR_VMM_SWITCHER_IPE_1
843 jmp .to_host_again
844ENDPROC vmmRCCallTrampoline
845
846
847
848;;
849; The C interface.
850;
851ALIGNCODE(16)
852BEGINPROC vmmRCToHost
853%ifdef DEBUG_STUFF
854 push esi
855 COM_NEWLINE
856 DEBUG_CHAR('b')
857 DEBUG_CHAR('a')
858 DEBUG_CHAR('c')
859 DEBUG_CHAR('k')
860 DEBUG_CHAR('!')
861 COM_NEWLINE
862 pop esi
863%endif
864 mov eax, [esp + 4]
865 jmp NAME(vmmRCToHostAsm)
866ENDPROC vmmRCToHost
867
868
869;;
870; vmmRCToHostAsmNoReturn
871;
872; This is an entry point used by TRPM when dealing with raw-mode traps,
873; i.e. traps in the hypervisor code. This will not return and saves no
874; state, because the caller has already saved the state.
875;
876; @param eax Return code.
877;
878ALIGNCODE(16)
879BEGINPROC vmmRCToHostAsmNoReturn
880 DEBUG_S_CHAR('%')
881
882%ifdef VBOX_WITH_STATISTICS
883 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
884 mov edx, 0ffffffffh
885 STAM32_PROFILE_ADV_STOP edx
886
887 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
888 mov edx, 0ffffffffh
889 STAM32_PROFILE_ADV_START edx
890
891 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
892 mov edx, 0ffffffffh
893 STAM32_PROFILE_ADV_START edx
894%endif
895
896 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
897 mov edx, 0ffffffffh
898
899 jmp vmmRCToHostAsm_SaveNoGeneralRegs
900ENDPROC vmmRCToHostAsmNoReturn
901
902
903;;
904; vmmRCToHostAsm
905;
906; This is an entry point used by TRPM to return to host context when an
907; interrupt occured or an guest trap needs handling in host context. It
908; is also used by the C interface above.
909;
910; The hypervisor context is saved and it will return to the caller if
911; host context so desires.
912;
913; @param eax Return code.
914; @uses eax, edx, ecx (or it may use them in the future)
915;
916ALIGNCODE(16)
917BEGINPROC vmmRCToHostAsm
918 DEBUG_S_CHAR('%')
919 push edx
920
921%ifdef VBOX_WITH_STATISTICS
922 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
923 mov edx, 0ffffffffh
924 STAM32_PROFILE_ADV_STOP edx
925
926 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
927 mov edx, 0ffffffffh
928 STAM32_PROFILE_ADV_START edx
929
930 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
931 mov edx, 0ffffffffh
932 STAM32_PROFILE_ADV_START edx
933%endif
934
935 ;
936 ; Load the CPUM pointer.
937 ;
938 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
939 mov edx, 0ffffffffh
940
941 ; Save register context.
942 pop dword [edx + CPUMCPU.Hyper.edx]
943 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
944 mov dword [edx + CPUMCPU.Hyper.esp], esp
945 mov dword [edx + CPUMCPU.Hyper.eax], eax
946 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
947 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
948 mov dword [edx + CPUMCPU.Hyper.esi], esi
949 mov dword [edx + CPUMCPU.Hyper.edi], edi
950 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
951
952 ; special registers which may change.
953vmmRCToHostAsm_SaveNoGeneralRegs:
954%ifdef STRICT_IF
955 pushf
956 pop ecx
957 test ecx, X86_EFL_IF
958 jz .if_clear_out
959 mov eax, 0c0ffee01h
960 cli
961.if_clear_out:
962%endif
963 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
964
965 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
966 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
967
968 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
969 ; FPU context is saved before restore of host saving (another) branch.
970
971 ; Disable debug registers if active so they cannot trigger while switching.
972 test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
973 jz .gth_disabled_dr7
974 mov eax, X86_DR7_INIT_VAL
975 mov dr7, eax
976.gth_disabled_dr7:
977
978 ;;
979 ;; Load Intermediate memory context.
980 ;;
981 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
982 mov eax, 0ffffffffh
983 mov cr3, eax
984 DEBUG_CHAR('?')
985
986 ;; We're now in intermediate memory context!
987
988 ;;
989 ;; 0. Jump to identity mapped location
990 ;;
991 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
992 jmp near NAME(IDExitTarget)
993
994 ; We're now on identity mapped pages!
995ALIGNCODE(16)
996GLOBALNAME IDExitTarget
997 DEBUG_CHAR('1')
998
999 ; 1. Disable paging.
1000 mov ebx, cr0
1001 and ebx, ~X86_CR0_PG
1002 mov cr0, ebx
1003 DEBUG_CHAR('2')
1004
1005 ; 2. Enable PAE.
1006%ifdef SWITCHER_TO_PAE
1007 ; - already enabled
1008%else
1009 mov ecx, cr4
1010 or ecx, X86_CR4_PAE
1011 mov cr4, ecx
1012%endif
1013
1014 ; 3. Load long mode intermediate CR3.
1015 FIXUP FIX_INTER_AMD64_CR3, 1
1016 mov ecx, 0ffffffffh
1017 mov cr3, ecx
1018 DEBUG_CHAR('3')
1019
1020 ; 4. Enable long mode.
1021 mov ebp, edx
1022 mov ecx, MSR_K6_EFER
1023 rdmsr
1024 or eax, MSR_K6_EFER_LME
1025 wrmsr
1026 mov edx, ebp
1027 DEBUG_CHAR('4')
1028
1029 ; 5. Enable paging.
1030 or ebx, X86_CR0_PG
1031 mov cr0, ebx
1032 DEBUG_CHAR('5')
1033
1034 ; Jump from compatibility mode to 64-bit mode.
1035 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
1036 jmp 0ffffh:0fffffffeh
1037
1038 ;
1039 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
1040 ; Move on to the HC mapping.
1041 ;
1042BITS 64
1043ALIGNCODE(16)
1044NAME(IDExit64Mode):
1045 DEBUG_CHAR('6')
1046 jmp [NAME(pHCExitTarget) wrt rip]
1047
1048; 64-bit jump target
1049NAME(pHCExitTarget):
1050FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
1051dq 0ffffffffffffffffh
1052
1053; 64-bit pCpum address.
1054NAME(pCpumHC):
1055FIXUP FIX_HC_64BIT_CPUM, 0
1056dq 0ffffffffffffffffh
1057
1058 ;
1059 ; When we arrive here we're at the host context
1060 ; mapping of the switcher code.
1061 ;
1062ALIGNCODE(16)
1063GLOBALNAME HCExitTarget
1064 DEBUG_CHAR('9')
1065
1066 ; Clear high dword of the CPUMCPU pointer
1067 and rdx, 0ffffffffh
1068
1069 ; load final cr3
1070 mov rsi, [rdx + CPUMCPU.Host.cr3]
1071 mov cr3, rsi
1072 DEBUG_CHAR('@')
1073
1074 ;;
1075 ;; Restore Host context.
1076 ;;
1077 ; Load CPUM pointer into edx
1078 mov rdx, [NAME(pCpumHC) wrt rip]
1079 ; Load the CPUMCPU offset.
1080 mov r8d, [rdx + CPUM.offCPUMCPU0]
1081
1082 ; activate host gdt and idt
1083 lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
1084 DEBUG_CHAR('0')
1085 lidt [rdx + r8 + CPUMCPU.Host.idtr]
1086 DEBUG_CHAR('1')
1087 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1088%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1089 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1090 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1091 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1092 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1093 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1094%else
1095 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1096 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1097 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1098 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
1099 mov ebx, ecx ; save original value
1100 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
1101 mov [rax + 4], ccx ; not using xchg here is paranoia..
1102 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1103 xchg [rax + 4], ebx ; using xchg is paranoia too...
1104%endif
1105 ; activate ldt
1106 DEBUG_CHAR('2')
1107 lldt [rdx + r8 + CPUMCPU.Host.ldtr]
1108 ; Restore segment registers
1109 mov eax, [rdx + r8 + CPUMCPU.Host.ds]
1110 mov ds, eax
1111 mov eax, [rdx + r8 + CPUMCPU.Host.es]
1112 mov es, eax
1113 mov eax, [rdx + r8 + CPUMCPU.Host.fs]
1114 mov fs, eax
1115 mov eax, [rdx + r8 + CPUMCPU.Host.gs]
1116 mov gs, eax
1117 ; restore stack
1118 mov eax, [rdx + r8 + CPUMCPU.Host.ss]
1119 mov ss, eax
1120 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1121
1122 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
1123 ; restore MSR_IA32_SYSENTER_CS register.
1124 mov rbx, rdx ; save edx
1125 mov ecx, MSR_IA32_SYSENTER_CS
1126 mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
1127 mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
1128 wrmsr ; MSR[ecx] <- edx:eax
1129 mov rdx, rbx ; restore edx
1130 jmp short gth_sysenter_no
1131
1132ALIGNCODE(16)
1133gth_sysenter_no:
1134
1135 ;; @todo AMD syscall
1136
1137 ; Restore FPU if guest has used it.
1138 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
1139 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
1140 test esi, CPUM_USED_FPU
1141 jz gth_fpu_no
1142 mov rcx, cr0
1143 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
1144 mov cr0, rcx
1145
1146 mov r10, rdx ; Save rdx.
1147
1148 mov eax, [r10 + r8 + CPUMCPU.Guest.fXStateMask]
1149 mov r9, [r10 + r8 + CPUMCPU.Guest.pXStateR0]
1150 or eax, eax
1151 jz gth_fpu_guest_fxsave
1152 mov edx, [r10 + r8 + CPUMCPU.Guest.fXStateMask + 4]
1153 xsave [r9]
1154 jmp gth_fpu_host
1155gth_fpu_guest_fxsave:
1156 fxsave [r9]
1157
1158gth_fpu_host:
1159 mov eax, [r10 + r8 + CPUMCPU.Host.fXStateMask]
1160 mov r9, [r10 + r8 + CPUMCPU.Host.pXStateR0]
1161 or eax, eax
1162 jz gth_fpu_host_fxrstor
1163 mov edx, [r10 + r8 + CPUMCPU.Host.fXStateMask + 4]
1164 xrstor [r9] ; We saved 32-bit state, so only restore 32-bit.
1165 jmp gth_fpu_done
1166gth_fpu_host_fxrstor:
1167 fxrstor [r9] ; We saved 32-bit state, so only restore 32-bit.
1168
1169gth_fpu_done:
1170 mov rdx, r10 ; Restore rdx.
1171 jmp gth_fpu_no
1172
1173ALIGNCODE(16)
1174gth_fpu_no:
1175
1176 ; Control registers.
1177 ; Would've liked to have these higher up in case of crashes, but
1178 ; the fpu stuff must be done before we restore cr0.
1179 mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
1180 test rcx, X86_CR4_PCIDE
1181 jz gth_no_pcide
1182 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
1183 and rax, ~0xfff ; clear the PCID in cr3
1184 mov cr3, rax
1185 mov cr4, rcx
1186 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
1187 mov cr3, rax ; reload it with the right PCID.
1188 jmp gth_restored_cr4
1189gth_no_pcide:
1190 mov cr4, rcx
1191gth_restored_cr4:
1192 mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
1193 mov cr0, rcx
1194 ;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1195 ;mov cr2, rcx
1196
1197 ; Restore MSRs
1198 mov rbx, rdx
1199 mov ecx, MSR_K8_FS_BASE
1200 mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
1201 mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
1202 wrmsr
1203 mov ecx, MSR_K8_GS_BASE
1204 mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
1205 mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
1206 wrmsr
1207 mov ecx, MSR_K6_EFER
1208 mov eax, [rbx + r8 + CPUMCPU.Host.efer]
1209 mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
1210 wrmsr
1211 mov rdx, rbx
1212
1213 ; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!)
1214 test esi, CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER
1215 jnz gth_debug_regs_restore
1216gth_debug_regs_done:
1217 and dword [rdx + r8 + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
1218
1219 ; Restore general registers.
1220 mov eax, edi ; restore return code. eax = return code !!
1221 ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
1222 mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
1223 ; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
1224 ; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
1225 mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
1226 mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
1227 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1228 mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
1229 ; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
1230 ; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
1231 mov r10, [rdx + r8 + CPUMCPU.Host.r10]
1232 mov r11, [rdx + r8 + CPUMCPU.Host.r11]
1233 mov r12, [rdx + r8 + CPUMCPU.Host.r12]
1234 mov r13, [rdx + r8 + CPUMCPU.Host.r13]
1235 mov r14, [rdx + r8 + CPUMCPU.Host.r14]
1236 mov r15, [rdx + r8 + CPUMCPU.Host.r15]
1237
1238 ; finally restore flags. (probably not required)
1239 push qword [rdx + r8 + CPUMCPU.Host.rflags]
1240 popf
1241
1242
1243%ifdef DEBUG_STUFF
1244 COM64_S_CHAR '4'
1245%endif
1246 db 048h
1247 retf
1248
1249;;
1250; Detour for restoring the host debug registers.
1251; edx and edi must be preserved.
1252gth_debug_regs_restore:
1253 DEBUG_S_CHAR('d')
1254 mov rax, dr7 ; Some DR7 paranoia first...
1255 mov ecx, X86_DR7_INIT_VAL
1256 cmp rax, rcx
1257 je .gth_debug_skip_dr7_disabling
1258 mov dr7, rcx
1259.gth_debug_skip_dr7_disabling:
1260 test esi, CPUM_USED_DEBUG_REGS_HOST
1261 jz .gth_debug_regs_dr7
1262
1263 DEBUG_S_CHAR('r')
1264 mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
1265 mov dr0, rax
1266 mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
1267 mov dr1, rbx
1268 mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
1269 mov dr2, rcx
1270 mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
1271 mov dr3, rax
1272.gth_debug_regs_dr7:
1273 mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
1274 mov dr6, rbx
1275 mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
1276 mov dr7, rcx
1277
1278 ; We clear the USED flags in the main code path.
1279 jmp gth_debug_regs_done
1280
1281ENDPROC vmmRCToHostAsm
1282
1283
1284GLOBALNAME End
1285;
1286; The description string (in the text section).
1287;
1288NAME(Description):
1289 db SWITCHER_DESCRIPTION
1290 db 0
1291
1292extern NAME(Relocate)
1293
1294;
1295; End the fixup records.
1296;
1297BEGINDATA
1298 db FIX_THE_END ; final entry.
1299GLOBALNAME FixupsEnd
1300
1301;;
1302; The switcher definition structure.
1303ALIGNDATA(16)
1304GLOBALNAME Def
1305 istruc VMMSWITCHERDEF
1306 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1307 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1308 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1309 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1310 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1311 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1312 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1313 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1314 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1315 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1316 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1317 ; disasm help
1318 at VMMSWITCHERDEF.offHCCode0, dd 0
1319 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1320 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1321 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1322 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1323 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1324 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1325 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1326 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1327 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1328
1329 iend
1330
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette