VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac@ 53835

Last change on this file since 53835 was 53835, checked in by vboxsync, 10 years ago

VMMSwitcher: also mask NMI in APIC_REG_LVT_CMCI

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.3 KB
Line 
1; $Id: AMD64andLegacy.mac 53835 2015-01-15 20:39:49Z vboxsync $
2;; @file
3; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
4;
5
6;
7; Copyright (C) 2006-2014 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19;%define STRICT_IF 1
20
21;*******************************************************************************
22;* Header Files *
23;*******************************************************************************
24%include "VBox/asmdefs.mac"
25%include "VBox/apic.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28%include "VBox/vmm/stam.mac"
29%include "VBox/vmm/vm.mac"
30%include "VBox/err.mac"
31%include "CPUMInternal.mac"
32%include "VMMSwitcher.mac"
33
34
35;
36; Start the fixup records
37; We collect the fixups in the .data section as we go along
38; It is therefore VITAL that no-one is using the .data section
39; for anything else between 'Start' and 'End'.
40;
41BEGINDATA
42GLOBALNAME Fixups
43
44
45
46BEGINCODE
47GLOBALNAME Start
48
49%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
50BITS 64
51
52;;
53; The C interface.
54;
55; @param pVM gcc: rdi msc:rcx The VM handle.
56;
57BEGINPROC vmmR0ToRawMode
58%ifdef DEBUG_STUFF
59 COM64_S_NEWLINE
60 COM64_S_CHAR '^'
61%endif
62 ;
63 ; The ordinary version of the code.
64 ;
65
66 %ifdef STRICT_IF
67 pushf
68 pop rax
69 test eax, X86_EFL_IF
70 jz .if_clear_in
71 mov eax, 0c0ffee00h
72 ret
73.if_clear_in:
74 %endif
75
76 ;
77 ; make r9 = pVM and rdx = pCpum.
78 ; rax, rcx and r8 are scratch here after.
79 %ifdef RT_OS_WINDOWS
80 mov r9, rcx
81 %else
82 mov r9, rdi
83 %endif
84 lea rdx, [r9 + VM.cpum]
85
86 %ifdef VBOX_WITH_STATISTICS
87 ;
88 ; Switcher stats.
89 ;
90 lea r8, [r9 + VM.StatSwitcherToGC]
91 STAM64_PROFILE_ADV_START r8
92 %endif
93
94 ;
95 ; Call worker (far return).
96 ;
97 mov eax, cs
98 push rax
99 call NAME(vmmR0ToRawModeAsm)
100
101 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
102 ; Unblock Local APIC NMI vectors
103 ; Do this here to ensure the host CS is already restored
104 mov r8d, [rdx + CPUM.offCPUMCPU0]
105 mov ecx, [rdx + r8 + CPUMCPU.fApicDisVectors]
106 test ecx, ecx
107 jz gth64_apic_done
108 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
109 je gth64_x2apic
110
111 ; Legacy APIC mode:
112 mov r8, [rdx + r8 + CPUMCPU.pvApicBase]
113 shr ecx, 1
114 jnc gth64_nolint0
115 and dword [r8 + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
116gth64_nolint0:
117 shr ecx, 1
118 jnc gth64_nolint1
119 and dword [r8 + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
120gth64_nolint1:
121 shr ecx, 1
122 jnc gth64_nopc
123 and dword [r8 + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
124gth64_nopc:
125 shr ecx, 1
126 jnc gth64_notherm
127 and dword [r8 + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
128gth64_notherm:
129 shr ecx, 1
130 jnc gth64_nocmci
131 and dword [r8 + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
132gth64_nocmci:
133 jmp gth64_apic_done
134
135 ; X2 APIC mode:
136gth64_x2apic:
137 mov r8, rax ; save rax
138 mov r10, rcx
139 shr r10d, 1
140 jnc gth64_x2_nolint0
141 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
142 rdmsr
143 and eax, ~APIC_REG_LVT_MASKED
144 wrmsr
145gth64_x2_nolint0:
146 shr r10d, 1
147 jnc gth64_x2_nolint1
148 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
149 rdmsr
150 and eax, ~APIC_REG_LVT_MASKED
151 wrmsr
152gth64_x2_nolint1:
153 shr r10d, 1
154 jnc gth64_x2_nopc
155 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
156 rdmsr
157 and eax, ~APIC_REG_LVT_MASKED
158 wrmsr
159gth64_x2_nopc:
160 shr r10d, 1
161 jnc gth64_x2_notherm
162 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
163 rdmsr
164 and eax, ~APIC_REG_LVT_MASKED
165 wrmsr
166gth64_x2_notherm:
167 shr r10d, 1
168 jnc gth64_x2_nocmci
169 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
170 rdmsr
171 and eax, ~APIC_REG_LVT_MASKED
172 wrmsr
173gth64_x2_nocmci:
174 mov rax, r8 ; restore rax
175
176gth64_apic_done:
177 %endif
178
179 %ifdef VBOX_WITH_STATISTICS
180 ;
181 ; Switcher stats.
182 ;
183 lea r8, [r9 + VM.StatSwitcherToGC]
184 STAM64_PROFILE_ADV_STOP r8
185 %endif
186
187 ret
188ENDPROC vmmR0ToRawMode
189
190
191%else ; VBOX_WITH_HYBRID_32BIT_KERNEL
192
193
194BITS 32
195
196;;
197; The C interface.
198;
199BEGINPROC vmmR0ToRawMode
200 %ifdef DEBUG_STUFF
201 COM32_S_NEWLINE
202 COM32_S_CHAR '^'
203 %endif
204
205 %ifdef VBOX_WITH_STATISTICS
206 ;
207 ; Switcher stats.
208 ;
209 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
210 mov edx, 0ffffffffh
211 STAM_PROFILE_ADV_START edx
212 %endif
213
214 ; Thunk to/from 64 bit when invoking the worker routine.
215 ;
216 FIXUP FIX_HC_VM_OFF, 1, VM.cpum
217 mov edx, 0ffffffffh
218
219 push 0
220 push cs
221 push 0
222 FIXUP FIX_HC_32BIT, 1, .vmmR0ToRawModeReturn - NAME(Start)
223 push 0ffffffffh
224
225 FIXUP FIX_HC_64BIT_CS, 1
226 push 0ffffh
227 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0ToRawModeAsm) - NAME(Start)
228 push 0ffffffffh
229 retf
230.vmmR0ToRawModeReturn:
231
232 ;
233 ; This selector reloading is probably not necessary, but we do it anyway to be quite sure
234 ; the CPU has the right idea about the selectors.
235 ;
236 mov edx, ds
237 mov ds, edx
238 mov ecx, es
239 mov es, ecx
240 mov edx, ss
241 mov ss, edx
242
243 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
244 Missing implementation!
245 %endif
246
247
248 %ifdef VBOX_WITH_STATISTICS
249 ;
250 ; Switcher stats.
251 ;
252 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
253 mov edx, 0ffffffffh
254 STAM_PROFILE_ADV_STOP edx
255 %endif
256
257 ret
258ENDPROC vmmR0ToRawMode
259
260BITS 64
261%endif ;!VBOX_WITH_HYBRID_32BIT_KERNEL
262
263
264
265; *****************************************************************************
266; vmmR0ToRawModeAsm
267;
268; Phase one of the switch from host to guest context (host MMU context)
269;
270; INPUT:
271; - edx virtual address of CPUM structure (valid in host context)
272;
273; USES/DESTROYS:
274; - eax, ecx, edx, r8
275;
276; ASSUMPTION:
277; - current CS and DS selectors are wide open
278;
279; *****************************************************************************
280ALIGNCODE(16)
281BEGINPROC vmmR0ToRawModeAsm
282 ;; Store the offset from CPUM to CPUMCPU in r8
283 mov r8d, [rdx + CPUM.offCPUMCPU0]
284
285 ;;
286 ;; Save CPU host context
287 ;; Skip eax, edx and ecx as these are not preserved over calls.
288 ;;
289 ; general registers.
290 ; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
291 mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
292 ; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
293 ; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
294 mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
295 mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
296 mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
297 mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
298 ; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
299 ; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
300 mov [rdx + r8 + CPUMCPU.Host.r10], r10
301 mov [rdx + r8 + CPUMCPU.Host.r11], r11
302 mov [rdx + r8 + CPUMCPU.Host.r12], r12
303 mov [rdx + r8 + CPUMCPU.Host.r13], r13
304 mov [rdx + r8 + CPUMCPU.Host.r14], r14
305 mov [rdx + r8 + CPUMCPU.Host.r15], r15
306 ; selectors.
307 mov [rdx + r8 + CPUMCPU.Host.ds], ds
308 mov [rdx + r8 + CPUMCPU.Host.es], es
309 mov [rdx + r8 + CPUMCPU.Host.fs], fs
310 mov [rdx + r8 + CPUMCPU.Host.gs], gs
311 mov [rdx + r8 + CPUMCPU.Host.ss], ss
312 ; MSRs
313 mov rbx, rdx
314 mov ecx, MSR_K8_FS_BASE
315 rdmsr
316 mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
317 mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
318 mov ecx, MSR_K8_GS_BASE
319 rdmsr
320 mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
321 mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
322 mov ecx, MSR_K6_EFER
323 rdmsr
324 mov [rbx + r8 + CPUMCPU.Host.efer], eax
325 mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
326 mov rdx, rbx
327 ; special registers.
328 sldt [rdx + r8 + CPUMCPU.Host.ldtr]
329 sidt [rdx + r8 + CPUMCPU.Host.idtr]
330 sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
331 str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
332 ; flags
333 pushf
334 pop qword [rdx + r8 + CPUMCPU.Host.rflags]
335
336%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
337 ; Block Local APIC NMI vectors
338 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
339 je htg_x2apic
340 mov rbx, [rdx + r8 + CPUMCPU.pvApicBase]
341 or rbx, rbx
342 jz htg_apic_done
343 xor edi, edi ; fApicDisVectors
344 mov eax, [rbx + APIC_REG_LVT_LINT0]
345 mov ecx, eax
346 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
347 cmp ecx, APIC_REG_LVT_MODE_NMI
348 jne htg_nolint0
349 or edi, 0x01
350 or eax, APIC_REG_LVT_MASKED
351 mov [rbx + APIC_REG_LVT_LINT0], eax
352 mov eax, [rbx + APIC_REG_LVT_LINT0] ; write completion
353htg_nolint0:
354 mov eax, [rbx + APIC_REG_LVT_LINT1]
355 mov ecx, eax
356 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
357 cmp ecx, APIC_REG_LVT_MODE_NMI
358 jne htg_nolint1
359 or edi, 0x02
360 or eax, APIC_REG_LVT_MASKED
361 mov [rbx + APIC_REG_LVT_LINT1], eax
362 mov eax, [rbx + APIC_REG_LVT_LINT1] ; write completion
363htg_nolint1:
364 mov eax, [rbx + APIC_REG_LVT_PC]
365 mov ecx, eax
366 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
367 cmp ecx, APIC_REG_LVT_MODE_NMI
368 jne htg_nopc
369 or edi, 0x04
370 or eax, APIC_REG_LVT_MASKED
371 mov [rbx + APIC_REG_LVT_PC], eax
372 mov eax, [rbx + APIC_REG_LVT_PC] ; write completion
373htg_nopc:
374 mov eax, [rbx + APIC_REG_VERSION]
375 shr eax, 16
376 cmp al, 5
377 jb htg_notherm
378 je htg_nocmci
379 mov eax, [rbx + APIC_REG_LVT_CMCI]
380 mov ecx, eax
381 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
382 cmp ecx, APIC_REG_LVT_MODE_NMI
383 jne htg_nocmci
384 or edi, 0x10
385 or eax, APIC_REG_LVT_MASKED
386 mov [rbx + APIC_REG_LVT_CMCI], eax
387 mov eax, [rbx + APIC_REG_LVT_CMCI] ; write completion
388htg_nocmci:
389 mov eax, [rbx + APIC_REG_LVT_THMR]
390 mov ecx, eax
391 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
392 cmp ecx, APIC_REG_LVT_MODE_NMI
393 jne htg_notherm
394 or edi, 0x08
395 or eax, APIC_REG_LVT_MASKED
396 mov [rbx + APIC_REG_LVT_THMR], eax
397 mov eax, [rbx + APIC_REG_LVT_THMR] ; write completion
398htg_notherm:
399 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
400 jmp htg_apic_done
401
402 ; X2APIC?
403htg_x2apic:
404 mov r15, rdx ; save rdx
405 xor edi, edi ; fApicDisVectors
406
407 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
408 rdmsr
409 mov ebx, eax
410 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
411 cmp ebx, APIC_REG_LVT_MODE_NMI
412 jne htg_x2_nolint0
413 or edi, 0x01
414 or eax, APIC_REG_LVT_MASKED
415 wrmsr
416htg_x2_nolint0:
417 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
418 rdmsr
419 mov ebx, eax
420 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
421 cmp ebx, APIC_REG_LVT_MODE_NMI
422 jne htg_x2_nolint1
423 or edi, 0x02
424 or eax, APIC_REG_LVT_MASKED
425 wrmsr
426htg_x2_nolint1:
427 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
428 rdmsr
429 mov ebx, eax
430 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
431 cmp ebx, APIC_REG_LVT_MODE_NMI
432 jne htg_x2_nopc
433 or edi, 0x04
434 or eax, APIC_REG_LVT_MASKED
435 wrmsr
436htg_x2_nopc:
437 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
438 rdmsr
439 shr eax, 16
440 cmp al, 5
441 jb htg_x2_notherm
442 je htg_x2_nocmci
443 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
444 rdmsr
445 mov ebx, eax
446 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
447 cmp ebx, APIC_REG_LVT_MODE_NMI
448 jne htg_x2_nocmci
449 or edi, 0x10
450 or eax, APIC_REG_LVT_MASKED
451 wrmsr
452htg_x2_nocmci:
453 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
454 rdmsr
455 mov ebx, eax
456 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
457 cmp ebx, APIC_REG_LVT_MODE_NMI
458 jne htg_x2_notherm
459 or edi, 0x08
460 or eax, APIC_REG_LVT_MASKED
461 wrmsr
462htg_x2_notherm:
463 mov rdx, r15
464 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
465htg_apic_done:
466
467%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
468
469 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
470 ; save MSR_IA32_SYSENTER_CS register.
471 mov rbx, rdx ; save edx
472 mov ecx, MSR_IA32_SYSENTER_CS
473 rdmsr ; edx:eax <- MSR[ecx]
474 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
475 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
476 xor eax, eax ; load 0:0 to cause #GP upon sysenter
477 xor edx, edx
478 wrmsr
479 mov rdx, rbx ; restore edx
480 jmp short htg_no_sysenter
481
482ALIGNCODE(16)
483htg_no_sysenter:
484
485 ;; handle use flags.
486 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
487 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
488 mov [rdx + r8 + CPUMCPU.fUseFlags], esi
489
490 ; debug registers.
491 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
492 jnz htg_debug_regs_save
493htg_debug_regs_no:
494 DEBUG_CHAR('a') ; trashes esi
495
496 ; control registers.
497 mov rax, cr0
498 mov [rdx + r8 + CPUMCPU.Host.cr0], rax
499 ;mov rax, cr2 ; assume host os don't stuff things in cr2. (safe)
500 ;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
501 mov rax, cr3
502 mov [rdx + r8 + CPUMCPU.Host.cr3], rax
503 mov rax, cr4
504 mov [rdx + r8 + CPUMCPU.Host.cr4], rax
505
506 ;;
507 ;; Start switching to VMM context.
508 ;;
509
510 ;
511 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
512 ; Also disable WP. (eax==cr4 now)
513 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
514 ;
515 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
516 mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
517 DEBUG_CHAR('b') ; trashes esi
518 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
519 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
520 ; simplify this operation a bit (and improve locality of the data).
521
522 ;
523 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
524 ; FXSAVE support on the host CPU
525 ;
526 and ecx, [rdx + CPUM.CR4.AndMask]
527 or eax, ecx
528 or eax, [rdx + CPUM.CR4.OrMask]
529 mov cr4, rax
530 DEBUG_CHAR('c') ; trashes esi
531
532 mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
533 and eax, X86_CR0_EM
534 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
535 mov cr0, rax
536 DEBUG_CHAR('0') ; trashes esi
537
538
539 ; Load new gdt so we can do far jump to guest code after cr3 reload.
540 lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
541 DEBUG_CHAR('1') ; trashes esi
542
543 ; Store the hypervisor cr3 for later loading
544 mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
545
546 ;;
547 ;; Load Intermediate memory context.
548 ;;
549 FIXUP FIX_INTER_AMD64_CR3, 1
550 mov eax, 0ffffffffh
551 mov cr3, rax
552 DEBUG_CHAR('2') ; trashes esi
553
554 ;;
555 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
556 ;;
557 jmp far [NAME(fpIDEnterTarget) wrt rip]
558
559; 16:32 Pointer to IDEnterTarget.
560NAME(fpIDEnterTarget):
561 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
562dd 0
563 FIXUP FIX_HYPER_CS, 0
564dd 0
565
566
567;;
568; Detour for saving the host DR7 and DR6.
569; esi and rdx must be preserved.
570htg_debug_regs_save:
571DEBUG_S_CHAR('s');
572 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
573 mov [rdx + r8 + CPUMCPU.Host.dr7], rax
574 mov ecx, X86_DR7_INIT_VAL
575 cmp eax, ecx
576 je .htg_debug_regs_dr7_disabled
577 mov dr7, rcx
578.htg_debug_regs_dr7_disabled:
579 mov rax, dr6 ; just in case we save the state register too.
580 mov [rdx + r8 + CPUMCPU.Host.dr6], rax
581 ; save host DR0-3?
582 test esi, CPUM_USE_DEBUG_REGS_HYPER
583 jz htg_debug_regs_no
584DEBUG_S_CHAR('S');
585 mov rax, dr0
586 mov [rdx + r8 + CPUMCPU.Host.dr0], rax
587 mov rbx, dr1
588 mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
589 mov rcx, dr2
590 mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
591 mov rax, dr3
592 mov [rdx + r8 + CPUMCPU.Host.dr3], rax
593 or dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
594 jmp htg_debug_regs_no
595
596
597 ; We're now on identity mapped pages in 32-bit compatibility mode.
598BITS 32
599ALIGNCODE(16)
600GLOBALNAME IDEnterTarget
601 DEBUG_CHAR('3')
602
603 ; 2. Deactivate long mode by turning off paging.
604 mov ebx, cr0
605 and ebx, ~X86_CR0_PG
606 mov cr0, ebx
607 DEBUG_CHAR('4')
608
609 ; 3. Load intermediate page table.
610 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
611 mov edx, 0ffffffffh
612 mov cr3, edx
613
614 ; 4. Disable long mode.
615 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
616 mov ecx, MSR_K6_EFER
617 rdmsr
618 DEBUG_CHAR('5')
619 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
620 wrmsr
621 DEBUG_CHAR('6')
622
623%ifndef SWITCHER_TO_PAE
624 ; 4b. Disable PAE.
625 mov eax, cr4
626 and eax, ~X86_CR4_PAE
627 mov cr4, eax
628%else
629%endif
630
631 ; 5. Enable paging.
632 or ebx, X86_CR0_PG
633 mov cr0, ebx
634 jmp short just_a_jump
635just_a_jump:
636 DEBUG_CHAR('7')
637
638 ;;
639 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
640 ;;
641 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
642 jmp near NAME(JmpGCTarget)
643
644
645 ;;
646 ;; When we arrive at this label we're at the
647 ;; guest code mapping of the switching code.
648 ;;
649ALIGNCODE(16)
650GLOBALNAME JmpGCTarget
651 DEBUG_CHAR('-')
652 ; load final cr3 and do far jump to load cs.
653 mov cr3, ebp ; ebp set above
654 DEBUG_CHAR('0')
655
656 ;;
657 ;; We're in VMM MMU context and VMM CS is loaded.
658 ;; Setup the rest of the VMM state.
659 ;;
660 ; Load selectors
661 DEBUG_CHAR('1')
662 FIXUP FIX_HYPER_DS, 1
663 mov eax, 0ffffh
664 mov ds, eax
665 mov es, eax
666 xor eax, eax
667 mov gs, eax
668 mov fs, eax
669 ; Load pCpum into EDX
670 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
671 mov edx, 0ffffffffh
672 ; Activate guest IDT
673 DEBUG_CHAR('2')
674 lidt [edx + CPUMCPU.Hyper.idtr]
675
676 ; Setup the stack.
677 DEBUG_CHAR('3')
678 mov ax, [edx + CPUMCPU.Hyper.ss.Sel]
679 mov ss, ax
680 mov esp, [edx + CPUMCPU.Hyper.esp]
681
682 ; Restore TSS selector; must mark it as not busy before using ltr (!)
683 DEBUG_S_CHAR('4')
684 FIXUP FIX_GC_TSS_GDTE_DW2, 2
685 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
686 DEBUG_S_CHAR('5')
687 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
688 DEBUG_S_CHAR('6')
689
690 ; Activate the ldt (now we can safely crash).
691 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
692 DEBUG_S_CHAR('7')
693
694 ;; Use flags.
695 mov esi, [edx + CPUMCPU.fUseFlags]
696
697 ; debug registers
698 test esi, CPUM_USE_DEBUG_REGS_HYPER
699 jnz htg_debug_regs_guest
700htg_debug_regs_guest_done:
701 DEBUG_S_CHAR('9')
702
703 ; General registers (sans edx).
704 mov eax, [edx + CPUMCPU.Hyper.eax]
705 mov ebx, [edx + CPUMCPU.Hyper.ebx]
706 mov ecx, [edx + CPUMCPU.Hyper.ecx]
707 mov ebp, [edx + CPUMCPU.Hyper.ebp]
708 mov esi, [edx + CPUMCPU.Hyper.esi]
709 mov edi, [edx + CPUMCPU.Hyper.edi]
710 DEBUG_S_CHAR('!')
711
712 ;;
713 ;; Return to the VMM code which either called the switcher or
714 ;; the code set up to run by HC.
715 ;;
716 push dword [edx + CPUMCPU.Hyper.eflags]
717 push cs
718 push dword [edx + CPUMCPU.Hyper.eip]
719 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
720
721%ifdef DEBUG_STUFF
722 COM32_S_PRINT ';eip='
723 push eax
724 mov eax, [esp + 8]
725 COM32_S_DWORD_REG eax
726 pop eax
727 COM32_S_CHAR ';'
728%endif
729%ifdef VBOX_WITH_STATISTICS
730 push eax
731 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
732 mov eax, 0ffffffffh
733 STAM32_PROFILE_ADV_STOP eax
734 pop eax
735%endif
736
737 iret ; Use iret to make debugging and TF/RF work.
738
739;;
740; Detour for saving host DR0-3 and loading hypervisor debug registers.
741; esi and edx must be preserved.
742htg_debug_regs_guest:
743 DEBUG_S_CHAR('D')
744 DEBUG_S_CHAR('R')
745 DEBUG_S_CHAR('x')
746 ; load hyper DR0-7
747 mov ebx, [edx + CPUMCPU.Hyper.dr]
748 mov dr0, ebx
749 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
750 mov dr1, ecx
751 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
752 mov dr2, eax
753 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
754 mov dr3, ebx
755 mov ecx, X86_DR6_INIT_VAL
756 mov dr6, ecx
757 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
758 mov dr7, eax
759 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
760 jmp htg_debug_regs_guest_done
761
762ENDPROC vmmR0ToRawModeAsm
763
764
765;;
766; Trampoline for doing a call when starting the hyper visor execution.
767;
768; Push any arguments to the routine.
769; Push the argument frame size (cArg * 4).
770; Push the call target (_cdecl convention).
771; Push the address of this routine.
772;
773;
774ALIGNCODE(16)
775BEGINPROC vmmRCCallTrampoline
776%ifdef DEBUG_STUFF
777 COM32_S_CHAR 'c'
778 COM32_S_CHAR 't'
779 COM32_S_CHAR '!'
780%endif
781
782 ; call routine
783 pop eax ; call address
784 pop edi ; argument count.
785%ifdef DEBUG_STUFF
786 COM32_S_PRINT ';eax='
787 COM32_S_DWORD_REG eax
788 COM32_S_CHAR ';'
789%endif
790 call eax ; do call
791 add esp, edi ; cleanup stack
792
793 ; return to the host context (eax = C returncode).
794%ifdef DEBUG_STUFF
795 COM32_S_CHAR '`'
796%endif
797.to_host_again:
798 call NAME(vmmRCToHostAsm)
799 mov eax, VERR_VMM_SWITCHER_IPE_1
800 jmp .to_host_again
801ENDPROC vmmRCCallTrampoline
802
803
804
805;;
806; The C interface.
807;
808ALIGNCODE(16)
809BEGINPROC vmmRCToHost
810%ifdef DEBUG_STUFF
811 push esi
812 COM_NEWLINE
813 DEBUG_CHAR('b')
814 DEBUG_CHAR('a')
815 DEBUG_CHAR('c')
816 DEBUG_CHAR('k')
817 DEBUG_CHAR('!')
818 COM_NEWLINE
819 pop esi
820%endif
821 mov eax, [esp + 4]
822 jmp NAME(vmmRCToHostAsm)
823ENDPROC vmmRCToHost
824
825
826;;
827; vmmRCToHostAsmNoReturn
828;
829; This is an entry point used by TRPM when dealing with raw-mode traps,
830; i.e. traps in the hypervisor code. This will not return and saves no
831; state, because the caller has already saved the state.
832;
833; @param eax Return code.
834;
835ALIGNCODE(16)
836BEGINPROC vmmRCToHostAsmNoReturn
837 DEBUG_S_CHAR('%')
838
839%ifdef VBOX_WITH_STATISTICS
840 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
841 mov edx, 0ffffffffh
842 STAM32_PROFILE_ADV_STOP edx
843
844 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
845 mov edx, 0ffffffffh
846 STAM32_PROFILE_ADV_START edx
847
848 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
849 mov edx, 0ffffffffh
850 STAM32_PROFILE_ADV_START edx
851%endif
852
853 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
854 mov edx, 0ffffffffh
855
856 jmp vmmRCToHostAsm_SaveNoGeneralRegs
857ENDPROC vmmRCToHostAsmNoReturn
858
859
860;;
861; vmmRCToHostAsm
862;
863; This is an entry point used by TRPM to return to host context when an
864; interrupt occured or an guest trap needs handling in host context. It
865; is also used by the C interface above.
866;
867; The hypervisor context is saved and it will return to the caller if
868; host context so desires.
869;
870; @param eax Return code.
871; @uses eax, edx, ecx (or it may use them in the future)
872;
873ALIGNCODE(16)
874BEGINPROC vmmRCToHostAsm
875 DEBUG_S_CHAR('%')
876 push edx
877
878%ifdef VBOX_WITH_STATISTICS
879 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
880 mov edx, 0ffffffffh
881 STAM32_PROFILE_ADV_STOP edx
882
883 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
884 mov edx, 0ffffffffh
885 STAM32_PROFILE_ADV_START edx
886
887 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
888 mov edx, 0ffffffffh
889 STAM32_PROFILE_ADV_START edx
890%endif
891
892 ;
893 ; Load the CPUM pointer.
894 ;
895 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
896 mov edx, 0ffffffffh
897
898 ; Save register context.
899 pop dword [edx + CPUMCPU.Hyper.edx]
900 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
901 mov dword [edx + CPUMCPU.Hyper.esp], esp
902 mov dword [edx + CPUMCPU.Hyper.eax], eax
903 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
904 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
905 mov dword [edx + CPUMCPU.Hyper.esi], esi
906 mov dword [edx + CPUMCPU.Hyper.edi], edi
907 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
908
909 ; special registers which may change.
910vmmRCToHostAsm_SaveNoGeneralRegs:
911%ifdef STRICT_IF
912 pushf
913 pop ecx
914 test ecx, X86_EFL_IF
915 jz .if_clear_out
916 mov eax, 0c0ffee01h
917 cli
918.if_clear_out:
919%endif
920 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
921
922 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
923 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
924
925 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
926 ; FPU context is saved before restore of host saving (another) branch.
927
928 ; Disable debug registers if active so they cannot trigger while switching.
929 test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
930 jz .gth_disabled_dr7
931 mov eax, X86_DR7_INIT_VAL
932 mov dr7, eax
933.gth_disabled_dr7:
934
935 ;;
936 ;; Load Intermediate memory context.
937 ;;
938 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
939 mov eax, 0ffffffffh
940 mov cr3, eax
941 DEBUG_CHAR('?')
942
943 ;; We're now in intermediate memory context!
944
945 ;;
946 ;; 0. Jump to identity mapped location
947 ;;
948 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
949 jmp near NAME(IDExitTarget)
950
951 ; We're now on identity mapped pages!
952ALIGNCODE(16)
953GLOBALNAME IDExitTarget
954 DEBUG_CHAR('1')
955
956 ; 1. Disable paging.
957 mov ebx, cr0
958 and ebx, ~X86_CR0_PG
959 mov cr0, ebx
960 DEBUG_CHAR('2')
961
962 ; 2. Enable PAE.
963%ifdef SWITCHER_TO_PAE
964 ; - already enabled
965%else
966 mov ecx, cr4
967 or ecx, X86_CR4_PAE
968 mov cr4, ecx
969%endif
970
971 ; 3. Load long mode intermediate CR3.
972 FIXUP FIX_INTER_AMD64_CR3, 1
973 mov ecx, 0ffffffffh
974 mov cr3, ecx
975 DEBUG_CHAR('3')
976
977 ; 4. Enable long mode.
978 mov ebp, edx
979 mov ecx, MSR_K6_EFER
980 rdmsr
981 or eax, MSR_K6_EFER_LME
982 wrmsr
983 mov edx, ebp
984 DEBUG_CHAR('4')
985
986 ; 5. Enable paging.
987 or ebx, X86_CR0_PG
988 mov cr0, ebx
989 DEBUG_CHAR('5')
990
991 ; Jump from compatibility mode to 64-bit mode.
992 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
993 jmp 0ffffh:0fffffffeh
994
995 ;
996 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
997 ; Move on to the HC mapping.
998 ;
999BITS 64
1000ALIGNCODE(16)
1001NAME(IDExit64Mode):
1002 DEBUG_CHAR('6')
1003 jmp [NAME(pHCExitTarget) wrt rip]
1004
1005; 64-bit jump target
1006NAME(pHCExitTarget):
1007FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
1008dq 0ffffffffffffffffh
1009
1010; 64-bit pCpum address.
1011NAME(pCpumHC):
1012FIXUP FIX_HC_64BIT_CPUM, 0
1013dq 0ffffffffffffffffh
1014
1015 ;
1016 ; When we arrive here we're at the host context
1017 ; mapping of the switcher code.
1018 ;
1019ALIGNCODE(16)
1020GLOBALNAME HCExitTarget
1021 DEBUG_CHAR('9')
1022
1023 ; Clear high dword of the CPUMCPU pointer
1024 and rdx, 0ffffffffh
1025
1026 ; load final cr3
1027 mov rsi, [rdx + CPUMCPU.Host.cr3]
1028 mov cr3, rsi
1029 DEBUG_CHAR('@')
1030
1031 ;;
1032 ;; Restore Host context.
1033 ;;
1034 ; Load CPUM pointer into edx
1035 mov rdx, [NAME(pCpumHC) wrt rip]
1036 ; Load the CPUMCPU offset.
1037 mov r8d, [rdx + CPUM.offCPUMCPU0]
1038
1039 ; activate host gdt and idt
1040 lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
1041 DEBUG_CHAR('0')
1042 lidt [rdx + r8 + CPUMCPU.Host.idtr]
1043 DEBUG_CHAR('1')
1044 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1045%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1046 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1047 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1048 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1049 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1050 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1051%else
1052 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
1053 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1054 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1055 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
1056 mov ebx, ecx ; save original value
1057 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
1058 mov [rax + 4], ccx ; not using xchg here is paranoia..
1059 ltr word [rdx + r8 + CPUMCPU.Host.tr]
1060 xchg [rax + 4], ebx ; using xchg is paranoia too...
1061%endif
1062 ; activate ldt
1063 DEBUG_CHAR('2')
1064 lldt [rdx + r8 + CPUMCPU.Host.ldtr]
1065 ; Restore segment registers
1066 mov eax, [rdx + r8 + CPUMCPU.Host.ds]
1067 mov ds, eax
1068 mov eax, [rdx + r8 + CPUMCPU.Host.es]
1069 mov es, eax
1070 mov eax, [rdx + r8 + CPUMCPU.Host.fs]
1071 mov fs, eax
1072 mov eax, [rdx + r8 + CPUMCPU.Host.gs]
1073 mov gs, eax
1074 ; restore stack
1075 mov eax, [rdx + r8 + CPUMCPU.Host.ss]
1076 mov ss, eax
1077 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1078
1079 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
1080 ; restore MSR_IA32_SYSENTER_CS register.
1081 mov rbx, rdx ; save edx
1082 mov ecx, MSR_IA32_SYSENTER_CS
1083 mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
1084 mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
1085 wrmsr ; MSR[ecx] <- edx:eax
1086 mov rdx, rbx ; restore edx
1087 jmp short gth_sysenter_no
1088
1089ALIGNCODE(16)
1090gth_sysenter_no:
1091
1092 ;; @todo AMD syscall
1093
1094 ; Restore FPU if guest has used it.
1095 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
1096 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
1097 test esi, CPUM_USED_FPU
1098 jz short gth_fpu_no
1099 mov rcx, cr0
1100 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
1101 mov cr0, rcx
1102
1103 fxsave [rdx + r8 + CPUMCPU.Guest.fpu]
1104 o64 fxrstor [rdx + r8 + CPUMCPU.Host.fpu] ; Restore 64-bit host FPU state. See @bugref{7138}
1105 jmp short gth_fpu_no
1106
1107ALIGNCODE(16)
1108gth_fpu_no:
1109
1110 ; Control registers.
1111 ; Would've liked to have these higher up in case of crashes, but
1112 ; the fpu stuff must be done before we restore cr0.
1113 mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
1114 test rcx, X86_CR4_PCIDE
1115 jz gth_no_pcide
1116 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
1117 and rax, ~0xfff ; clear the PCID in cr3
1118 mov cr3, rax
1119 mov cr4, rcx
1120 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
1121 mov cr3, rax ; reload it with the right PCID.
1122 jmp gth_restored_cr4
1123gth_no_pcide:
1124 mov cr4, rcx
1125gth_restored_cr4:
1126 mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
1127 mov cr0, rcx
1128 ;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1129 ;mov cr2, rcx
1130
1131 ; Restore MSRs
1132 mov rbx, rdx
1133 mov ecx, MSR_K8_FS_BASE
1134 mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
1135 mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
1136 wrmsr
1137 mov ecx, MSR_K8_GS_BASE
1138 mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
1139 mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
1140 wrmsr
1141 mov ecx, MSR_K6_EFER
1142 mov eax, [rbx + r8 + CPUMCPU.Host.efer]
1143 mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
1144 wrmsr
1145 mov rdx, rbx
1146
1147 ; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!)
1148 test esi, CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER
1149 jnz gth_debug_regs_restore
1150gth_debug_regs_done:
1151 and dword [rdx + r8 + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
1152
1153 ; Restore general registers.
1154 mov eax, edi ; restore return code. eax = return code !!
1155 ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
1156 mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
1157 ; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
1158 ; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
1159 mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
1160 mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
1161 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1162 mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
1163 ; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
1164 ; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
1165 mov r10, [rdx + r8 + CPUMCPU.Host.r10]
1166 mov r11, [rdx + r8 + CPUMCPU.Host.r11]
1167 mov r12, [rdx + r8 + CPUMCPU.Host.r12]
1168 mov r13, [rdx + r8 + CPUMCPU.Host.r13]
1169 mov r14, [rdx + r8 + CPUMCPU.Host.r14]
1170 mov r15, [rdx + r8 + CPUMCPU.Host.r15]
1171
1172 ; finally restore flags. (probably not required)
1173 push qword [rdx + r8 + CPUMCPU.Host.rflags]
1174 popf
1175
1176
1177%ifdef DEBUG_STUFF
1178 COM64_S_CHAR '4'
1179%endif
1180 db 048h
1181 retf
1182
1183;;
1184; Detour for restoring the host debug registers.
1185; edx and edi must be preserved.
1186gth_debug_regs_restore:
1187 DEBUG_S_CHAR('d')
1188 mov rax, dr7 ; Some DR7 paranoia first...
1189 mov ecx, X86_DR7_INIT_VAL
1190 cmp rax, rcx
1191 je .gth_debug_skip_dr7_disabling
1192 mov dr7, rcx
1193.gth_debug_skip_dr7_disabling:
1194 test esi, CPUM_USED_DEBUG_REGS_HOST
1195 jz .gth_debug_regs_dr7
1196
1197 DEBUG_S_CHAR('r')
1198 mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
1199 mov dr0, rax
1200 mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
1201 mov dr1, rbx
1202 mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
1203 mov dr2, rcx
1204 mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
1205 mov dr3, rax
1206.gth_debug_regs_dr7:
1207 mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
1208 mov dr6, rbx
1209 mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
1210 mov dr7, rcx
1211
1212 ; We clear the USED flags in the main code path.
1213 jmp gth_debug_regs_done
1214
1215ENDPROC vmmRCToHostAsm
1216
1217
1218GLOBALNAME End
1219;
1220; The description string (in the text section).
1221;
1222NAME(Description):
1223 db SWITCHER_DESCRIPTION
1224 db 0
1225
1226extern NAME(Relocate)
1227
1228;
1229; End the fixup records.
1230;
1231BEGINDATA
1232 db FIX_THE_END ; final entry.
1233GLOBALNAME FixupsEnd
1234
1235;;
1236; The switcher definition structure.
1237ALIGNDATA(16)
1238GLOBALNAME Def
1239 istruc VMMSWITCHERDEF
1240 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1241 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1242 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1243 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1244 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1245 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1246 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1247 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1248 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1249 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1250 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1251 ; disasm help
1252 at VMMSWITCHERDEF.offHCCode0, dd 0
1253 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1254 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1255 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1256 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1257 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1258 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1259 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1260 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1261 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1262
1263 iend
1264
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette