VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 53835

Last change on this file since 53835 was 53835, checked in by vboxsync, 10 years ago

VMMSwitcher: also mask NMI in APIC_REG_LVT_CMCI

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.2 KB
Line 
1; $Id: PAEand32Bit.mac 53835 2015-01-15 20:39:49Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19
20;*******************************************************************************
21;* Header Files *
22;*******************************************************************************
23%include "VBox/asmdefs.mac"
24%include "VBox/apic.mac"
25%include "iprt/x86.mac"
26%include "VBox/vmm/cpum.mac"
27%include "VBox/vmm/stam.mac"
28%include "VBox/vmm/vm.mac"
29%include "VBox/err.mac"
30%include "CPUMInternal.mac"
31%include "VMMSwitcher.mac"
32
33%undef NEED_ID
34%ifdef NEED_PAE_ON_32BIT_HOST
35%define NEED_ID
36%endif
37%ifdef NEED_32BIT_ON_PAE_HOST
38%define NEED_ID
39%endif
40
41
42
43;
44; Start the fixup records
45; We collect the fixups in the .data section as we go along
46; It is therefore VITAL that no-one is using the .data section
47; for anything else between 'Start' and 'End'.
48;
49BEGINDATA
50GLOBALNAME Fixups
51
52
53
54BEGINCODE
55GLOBALNAME Start
56
57;;
58; The C interface.
59;
60BEGINPROC vmmR0ToRawMode
61
62%ifdef DEBUG_STUFF
63 COM_S_NEWLINE
64 COM_S_CHAR '^'
65%endif
66
67%ifdef VBOX_WITH_STATISTICS
68 ;
69 ; Switcher stats.
70 ;
71 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
72 mov edx, 0ffffffffh
73 STAM_PROFILE_ADV_START edx
74%endif
75
76 ;
77 ; Call worker.
78 ;
79 FIXUP FIX_HC_CPUM_OFF, 1, 0
80 mov edx, 0ffffffffh
81 push cs ; allow for far return and restore cs correctly.
82 call NAME(vmmR0ToRawModeAsm)
83
84%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
85 ; Restore blocked Local APIC NMI vectors
86 ; Do this here to ensure the host CS is already restored
87 mov ecx, [edx + CPUMCPU.fApicDisVectors]
88 test ecx, ecx
89 jz gth_apic_done
90 cmp byte [edx + CPUMCPU.fX2Apic], 1
91 je gth_x2apic
92
93 mov edx, [edx + CPUMCPU.pvApicBase]
94 shr ecx, 1
95 jnc gth_nolint0
96 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
97gth_nolint0:
98 shr ecx, 1
99 jnc gth_nolint1
100 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
101gth_nolint1:
102 shr ecx, 1
103 jnc gth_nopc
104 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
105gth_nopc:
106 shr ecx, 1
107 jnc gth_notherm
108 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
109gth_notherm:
110 shr ecx, 1
111 jnc gth_nocmci
112 and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
113gth_nocmci:
114 jmp gth_apic_done
115
116gth_x2apic:
117 push eax ; save eax
118 push ebx ; save it for fApicDisVectors
119 push edx ; save edx just in case.
120 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
121 shr ebx, 1
122 jnc gth_x2_nolint0
123 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
124 rdmsr
125 and eax, ~APIC_REG_LVT_MASKED
126 wrmsr
127gth_x2_nolint0:
128 shr ebx, 1
129 jnc gth_x2_nolint1
130 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
131 rdmsr
132 and eax, ~APIC_REG_LVT_MASKED
133 wrmsr
134gth_x2_nolint1:
135 shr ebx, 1
136 jnc gth_x2_nopc
137 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
138 rdmsr
139 and eax, ~APIC_REG_LVT_MASKED
140 wrmsr
141gth_x2_nopc:
142 shr ebx, 1
143 jnc gth_x2_notherm
144 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
145 rdmsr
146 and eax, ~APIC_REG_LVT_MASKED
147 wrmsr
148gth_x2_notherm:
149 shr ebx, 1
150 jnc gth_x2_nocmci
151 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
152 rdmsr
153 and eax, ~APIC_REG_LVT_MASKED
154 wrmsr
155gth_x2_nocmci:
156 pop edx
157 pop ebx
158 pop eax
159
160gth_apic_done:
161%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
162
163%ifdef VBOX_WITH_STATISTICS
164 ;
165 ; Switcher stats.
166 ;
167 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
168 mov edx, 0ffffffffh
169 STAM_PROFILE_ADV_STOP edx
170%endif
171
172 ret
173ENDPROC vmmR0ToRawMode
174
175
176
177; *****************************************************************************
178; vmmR0ToRawModeAsm
179;
180; Phase one of the switch from host to guest context (host MMU context)
181;
182; INPUT:
183; - edx virtual address of CPUM structure (valid in host context)
184;
185; USES/DESTROYS:
186; - eax, ecx, edx
187;
188; ASSUMPTION:
189; - current CS and DS selectors are wide open
190;
191; *****************************************************************************
192ALIGNCODE(16)
193BEGINPROC vmmR0ToRawModeAsm
194 ;;
195 ;; Save CPU host context
196 ;; Skip eax, edx and ecx as these are not preserved over calls.
197 ;;
198 CPUMCPU_FROM_CPUM(edx)
199 ; general registers.
200 mov [edx + CPUMCPU.Host.ebx], ebx
201 mov [edx + CPUMCPU.Host.edi], edi
202 mov [edx + CPUMCPU.Host.esi], esi
203 mov [edx + CPUMCPU.Host.esp], esp
204 mov [edx + CPUMCPU.Host.ebp], ebp
205 ; selectors.
206 mov [edx + CPUMCPU.Host.ds], ds
207 mov [edx + CPUMCPU.Host.es], es
208 mov [edx + CPUMCPU.Host.fs], fs
209 mov [edx + CPUMCPU.Host.gs], gs
210 mov [edx + CPUMCPU.Host.ss], ss
211 ; special registers.
212 sldt [edx + CPUMCPU.Host.ldtr]
213 sidt [edx + CPUMCPU.Host.idtr]
214 sgdt [edx + CPUMCPU.Host.gdtr]
215 str [edx + CPUMCPU.Host.tr]
216 ; flags
217 pushfd
218 pop dword [edx + CPUMCPU.Host.eflags]
219
220 ; Block Local APIC NMI vectors
221%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
222 cmp byte [edx + CPUMCPU.pvApicBase], 1
223 je htg_x2apic
224
225 mov ebx, [edx + CPUMCPU.pvApicBase]
226 or ebx, ebx
227 jz htg_apic_done
228 xor edi, edi ; fApicDisVectors
229
230 mov eax, [ebx + APIC_REG_LVT_LINT0]
231 mov ecx, eax
232 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
233 cmp ecx, APIC_REG_LVT_MODE_NMI
234 jne htg_nolint0
235 or edi, 0x01
236 or eax, APIC_REG_LVT_MASKED
237 mov [ebx + APIC_REG_LVT_LINT0], eax
238 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
239htg_nolint0:
240 mov eax, [ebx + APIC_REG_LVT_LINT1]
241 mov ecx, eax
242 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
243 cmp ecx, APIC_REG_LVT_MODE_NMI
244 jne htg_nolint1
245 or edi, 0x02
246 or eax, APIC_REG_LVT_MASKED
247 mov [ebx + APIC_REG_LVT_LINT1], eax
248 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
249htg_nolint1:
250 mov eax, [ebx + APIC_REG_LVT_PC]
251 mov ecx, eax
252 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
253 cmp ecx, APIC_REG_LVT_MODE_NMI
254 jne htg_nopc
255 or edi, 0x04
256 or eax, APIC_REG_LVT_MASKED
257 mov [ebx + APIC_REG_LVT_PC], eax
258 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
259htg_nopc:
260 mov eax, [ebx + APIC_REG_VERSION]
261 shr eax, 16
262 cmp al, 5
263 jb htg_notherm
264 je htg_nocmci
265 mov eax, [ebx + APIC_REG_LVT_CMCI]
266 mov ecx, eax
267 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
268 cmp ecx, APIC_REG_LVT_MODE_NMI
269 jne htg_nocmci
270 or edi, 0x10
271 or eax, APIC_REG_LVT_MASKED
272 mov [ebx + APIC_REG_LVT_CMCI], eax
273 mov eax, [ebx + APIC_REG_LVT_CMCI] ; write completion
274htg_nocmci:
275 mov eax, [ebx + APIC_REG_LVT_THMR]
276 mov ecx, eax
277 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
278 cmp ecx, APIC_REG_LVT_MODE_NMI
279 jne htg_notherm
280 or edi, 0x08
281 or eax, APIC_REG_LVT_MASKED
282 mov [ebx + APIC_REG_LVT_THMR], eax
283 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
284htg_notherm:
285 mov [edx + CPUMCPU.fApicDisVectors], edi
286 jmp htg_apic_done
287
288htg_x2apic:
289 mov esi, edx ; Save edx.
290 xor edi, edi ; fApicDisVectors
291
292 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
293 rdmsr
294 mov ebx, eax
295 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
296 cmp ebx, APIC_REG_LVT_MODE_NMI
297 jne htg_x2_nolint0
298 or edi, 0x01
299 or eax, APIC_REG_LVT_MASKED
300 wrmsr
301htg_x2_nolint0:
302 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
303 rdmsr
304 mov ebx, eax
305 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
306 cmp ebx, APIC_REG_LVT_MODE_NMI
307 jne htg_x2_nolint1
308 or edi, 0x02
309 or eax, APIC_REG_LVT_MASKED
310 wrmsr
311htg_x2_nolint1:
312 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
313 rdmsr
314 mov ebx, eax
315 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
316 cmp ebx, APIC_REG_LVT_MODE_NMI
317 jne htg_x2_nopc
318 or edi, 0x04
319 or eax, APIC_REG_LVT_MASKED
320 wrmsr
321htg_x2_nopc:
322 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
323 rdmsr
324 shr eax, 16
325 cmp al, 5
326 jb htg_x2_notherm
327 je htg_x2_nocmci
328 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
329 rdmsr
330 mov ebx, eax
331 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
332 cmp ebx, APIC_REG_LVT_MODE_NMI
333 jne htg_x2_nocmci
334 or edi, 0x10
335 or eax, APIC_REG_LVT_MASKED
336 wrmsr
337htg_x2_nocmci:
338 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
339 rdmsr
340 mov ebx, eax
341 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
342 cmp ebx, APIC_REG_LVT_MODE_NMI
343 jne htg_x2_notherm
344 or edi, 0x08
345 or eax, APIC_REG_LVT_MASKED
346 wrmsr
347htg_x2_notherm:
348 mov edx, esi ; Restore edx.
349 mov [edx + CPUMCPU.fApicDisVectors], edi
350
351htg_apic_done:
352%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
353
354 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
355 ; save MSR_IA32_SYSENTER_CS register.
356 mov ecx, MSR_IA32_SYSENTER_CS
357 mov ebx, edx ; save edx
358 rdmsr ; edx:eax <- MSR[ecx]
359 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
360 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
361 xor eax, eax ; load 0:0 to cause #GP upon sysenter
362 xor edx, edx
363 wrmsr
364 xchg ebx, edx ; restore edx
365 jmp short htg_no_sysenter
366
367ALIGNCODE(16)
368htg_no_sysenter:
369
370 FIXUP FIX_NO_SYSCALL_JMP, 0, htg_no_syscall - NAME(Start) ; this will insert a jmp htg_no_syscall if host doesn't use syscall.
371 ; clear MSR_K6_EFER_SCE.
372 mov ebx, edx ; save edx
373 mov ecx, MSR_K6_EFER
374 rdmsr ; edx:eax <- MSR[ecx]
375 and eax, ~MSR_K6_EFER_SCE
376 wrmsr
377 mov edx, ebx ; restore edx
378 jmp short htg_no_syscall
379
380ALIGNCODE(16)
381htg_no_syscall:
382
383 ;; handle use flags.
384 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
385 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
386 mov [edx + CPUMCPU.fUseFlags], esi
387
388 ; debug registers.
389 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
390 jnz htg_debug_regs_save_dr7and6
391htg_debug_regs_no:
392
393 ; control registers.
394 mov eax, cr0
395 mov [edx + CPUMCPU.Host.cr0], eax
396 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
397 ;mov [edx + CPUMCPU.Host.cr2], eax
398 mov eax, cr3
399 mov [edx + CPUMCPU.Host.cr3], eax
400 mov eax, cr4
401 mov [edx + CPUMCPU.Host.cr4], eax
402
403 ;;
404 ;; Start switching to VMM context.
405 ;;
406
407 ;
408 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
409 ; Also disable WP. (eax==cr4 now)
410 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
411 ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
412 ;
413 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
414 mov ecx, [edx + CPUMCPU.Guest.cr4]
415 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
416 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
417 ; simplify this operation a bit (and improve locality of the data).
418
419 ;
420 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
421 ; FXSAVE support on the host CPU
422 ;
423 CPUM_FROM_CPUMCPU(edx)
424 and ecx, [edx + CPUM.CR4.AndMask]
425 or eax, ecx
426 or eax, [edx + CPUM.CR4.OrMask]
427 mov cr4, eax
428
429 CPUMCPU_FROM_CPUM(edx)
430 mov eax, [edx + CPUMCPU.Guest.cr0]
431 and eax, X86_CR0_EM
432 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
433 mov cr0, eax
434
435 ; Load new gdt so we can do far jump to guest code after cr3 reload.
436 lgdt [edx + CPUMCPU.Hyper.gdtr]
437 DEBUG_CHAR('1') ; trashes esi
438
439 ; Store the hypervisor cr3 for later loading
440 mov ebp, [edx + CPUMCPU.Hyper.cr3]
441
442 ;;
443 ;; Load Intermediate memory context.
444 ;;
445 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
446 mov eax, 0ffffffffh
447 mov cr3, eax
448 DEBUG_CHAR('2') ; trashes esi
449
450%ifdef NEED_ID
451 ;;
452 ;; Jump to identity mapped location
453 ;;
454 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
455 jmp near NAME(IDEnterTarget)
456
457 ; We're now on identity mapped pages!
458ALIGNCODE(16)
459GLOBALNAME IDEnterTarget
460 DEBUG_CHAR('3')
461 mov edx, cr4
462%ifdef NEED_PAE_ON_32BIT_HOST
463 or edx, X86_CR4_PAE
464%else
465 and edx, ~X86_CR4_PAE
466%endif
467 mov eax, cr0
468 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
469 mov cr0, eax
470 DEBUG_CHAR('4')
471 mov cr4, edx
472 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
473 mov edx, 0ffffffffh
474 mov cr3, edx
475 or eax, X86_CR0_PG
476 DEBUG_CHAR('5')
477 mov cr0, eax
478 DEBUG_CHAR('6')
479%endif
480
481 ;;
482 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
483 ;;
484 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
485 jmp 0fff8h:0deadfaceh
486
487
488 ;;
489 ;; When we arrive at this label we're at the
490 ;; guest code mapping of the switching code.
491 ;;
492ALIGNCODE(16)
493GLOBALNAME FarJmpGCTarget
494 DEBUG_CHAR('-')
495 ; load final cr3 and do far jump to load cs.
496 mov cr3, ebp ; ebp set above
497 DEBUG_CHAR('0')
498
499 ;;
500 ;; We're in VMM MMU context and VMM CS is loaded.
501 ;; Setup the rest of the VMM state.
502 ;;
503 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
504 mov edx, 0ffffffffh
505 ; Activate guest IDT
506 DEBUG_CHAR('1')
507 lidt [edx + CPUMCPU.Hyper.idtr]
508 ; Load selectors
509 DEBUG_CHAR('2')
510 FIXUP FIX_HYPER_DS, 1
511 mov eax, 0ffffh
512 mov ds, eax
513 mov es, eax
514 xor eax, eax
515 mov gs, eax
516 mov fs, eax
517
518 ; Setup stack.
519 DEBUG_CHAR('3')
520 mov eax, [edx + CPUMCPU.Hyper.ss.Sel]
521 mov ss, ax
522 mov esp, [edx + CPUMCPU.Hyper.esp]
523
524 ; Restore TSS selector; must mark it as not busy before using ltr (!)
525 DEBUG_CHAR('4')
526 FIXUP FIX_GC_TSS_GDTE_DW2, 2
527 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
528 DEBUG_CHAR('5')
529 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
530 DEBUG_CHAR('6')
531
532 ; Activate the ldt (now we can safely crash).
533 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
534 DEBUG_CHAR('7')
535
536 ;; use flags.
537 mov esi, [edx + CPUMCPU.fUseFlags]
538
539 ; debug registers
540 test esi, CPUM_USE_DEBUG_REGS_HYPER
541 jnz htg_debug_regs_guest
542htg_debug_regs_guest_done:
543 DEBUG_CHAR('9')
544
545%ifdef VBOX_WITH_NMI
546 ;
547 ; Setup K7 NMI.
548 ;
549 mov esi, edx
550 ; clear all PerfEvtSeln registers
551 xor eax, eax
552 xor edx, edx
553 mov ecx, MSR_K7_PERFCTR0
554 wrmsr
555 mov ecx, MSR_K7_PERFCTR1
556 wrmsr
557 mov ecx, MSR_K7_PERFCTR2
558 wrmsr
559 mov ecx, MSR_K7_PERFCTR3
560 wrmsr
561
562 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
563 mov ecx, MSR_K7_EVNTSEL0
564 wrmsr
565 mov eax, 02329B000h
566 mov edx, 0fffffffeh ; -1.6GHz * 5
567 mov ecx, MSR_K7_PERFCTR0
568 wrmsr
569
570 FIXUP FIX_GC_APIC_BASE_32BIT, 1
571 mov eax, 0f0f0f0f0h
572 add eax, 0340h ; APIC_LVTPC
573 mov dword [eax], 0400h ; APIC_DM_NMI
574
575 xor edx, edx
576 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
577 mov ecx, MSR_K7_EVNTSEL0
578 wrmsr
579
580 mov edx, esi
581%endif
582
583 ; General registers (sans edx).
584 mov eax, [edx + CPUMCPU.Hyper.eax]
585 mov ebx, [edx + CPUMCPU.Hyper.ebx]
586 mov ecx, [edx + CPUMCPU.Hyper.ecx]
587 mov ebp, [edx + CPUMCPU.Hyper.ebp]
588 mov esi, [edx + CPUMCPU.Hyper.esi]
589 mov edi, [edx + CPUMCPU.Hyper.edi]
590 DEBUG_S_CHAR('!')
591
592 ;;
593 ;; Return to the VMM code which either called the switcher or
594 ;; the code set up to run by HC.
595 ;;
596 push dword [edx + CPUMCPU.Hyper.eflags]
597 push cs
598 push dword [edx + CPUMCPU.Hyper.eip]
599 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
600
601%ifdef DEBUG_STUFF
602 COM_S_PRINT ';eip='
603 push eax
604 mov eax, [esp + 8]
605 COM_S_DWORD_REG eax
606 pop eax
607 COM_S_CHAR ';'
608%endif
609%ifdef VBOX_WITH_STATISTICS
610 push edx
611 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
612 mov edx, 0ffffffffh
613 STAM_PROFILE_ADV_STOP edx
614 pop edx
615%endif
616
617 iret ; Use iret to make debugging and TF/RF work.
618
619;;
620; Detour for saving the host DR7 and DR6.
621; esi and edx must be preserved.
622htg_debug_regs_save_dr7and6:
623DEBUG_S_CHAR('s');
624 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
625 mov [edx + CPUMCPU.Host.dr7], eax
626 xor eax, eax ; clear everything. (bit 12? is read as 1...)
627 mov dr7, eax
628 mov eax, dr6 ; just in case we save the state register too.
629 mov [edx + CPUMCPU.Host.dr6], eax
630 jmp htg_debug_regs_no
631
632;;
633; Detour for saving host DR0-3 and loading hypervisor debug registers.
634; esi and edx must be preserved.
635htg_debug_regs_guest:
636 DEBUG_S_CHAR('D')
637 DEBUG_S_CHAR('R')
638 DEBUG_S_CHAR('x')
639 ; save host DR0-3.
640 mov eax, dr0
641 mov [edx + CPUMCPU.Host.dr0], eax
642 mov ebx, dr1
643 mov [edx + CPUMCPU.Host.dr1], ebx
644 mov ecx, dr2
645 mov [edx + CPUMCPU.Host.dr2], ecx
646 mov eax, dr3
647 mov [edx + CPUMCPU.Host.dr3], eax
648 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
649
650 ; load hyper DR0-7
651 mov ebx, [edx + CPUMCPU.Hyper.dr]
652 mov dr0, ebx
653 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
654 mov dr1, ecx
655 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
656 mov dr2, eax
657 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
658 mov dr3, ebx
659 mov ecx, X86_DR6_INIT_VAL
660 mov dr6, ecx
661 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
662 mov dr7, eax
663 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
664 jmp htg_debug_regs_guest_done
665
666ENDPROC vmmR0ToRawModeAsm
667
668
669;;
670; Trampoline for doing a call when starting the hyper visor execution.
671;
672; Push any arguments to the routine.
673; Push the argument frame size (cArg * 4).
674; Push the call target (_cdecl convention).
675; Push the address of this routine.
676;
677;
678ALIGNCODE(16)
679BEGINPROC vmmRCCallTrampoline
680%ifdef DEBUG_STUFF
681 COM_S_CHAR 'c'
682 COM_S_CHAR 't'
683 COM_S_CHAR '!'
684%endif
685
686 ; call routine
687 pop eax ; call address
688 pop edi ; argument count.
689%ifdef DEBUG_STUFF
690 COM_S_PRINT ';eax='
691 COM_S_DWORD_REG eax
692 COM_S_CHAR ';'
693%endif
694 call eax ; do call
695 add esp, edi ; cleanup stack
696
697 ; return to the host context.
698%ifdef DEBUG_STUFF
699 COM_S_CHAR '`'
700%endif
701.to_host_again:
702 call NAME(vmmRCToHostAsm)
703 mov eax, VERR_VMM_SWITCHER_IPE_1
704 jmp .to_host_again
705ENDPROC vmmRCCallTrampoline
706
707
708
709;;
710; The C interface.
711;
712ALIGNCODE(16)
713BEGINPROC vmmRCToHost
714%ifdef DEBUG_STUFF
715 push esi
716 COM_NEWLINE
717 DEBUG_CHAR('b')
718 DEBUG_CHAR('a')
719 DEBUG_CHAR('c')
720 DEBUG_CHAR('k')
721 DEBUG_CHAR('!')
722 COM_NEWLINE
723 pop esi
724%endif
725 mov eax, [esp + 4]
726 jmp NAME(vmmRCToHostAsm)
727ENDPROC vmmRCToHost
728
729
730;;
731; vmmRCToHostAsmNoReturn
732;
733; This is an entry point used by TRPM when dealing with raw-mode traps,
734; i.e. traps in the hypervisor code. This will not return and saves no
735; state, because the caller has already saved the state.
736;
737; @param eax Return code.
738;
739ALIGNCODE(16)
740BEGINPROC vmmRCToHostAsmNoReturn
741 DEBUG_S_CHAR('%')
742
743%ifdef VBOX_WITH_STATISTICS
744 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
745 mov edx, 0ffffffffh
746 STAM32_PROFILE_ADV_STOP edx
747
748 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
749 mov edx, 0ffffffffh
750 STAM32_PROFILE_ADV_START edx
751
752 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
753 mov edx, 0ffffffffh
754 STAM32_PROFILE_ADV_START edx
755%endif
756
757 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
758 mov edx, 0ffffffffh
759
760 jmp vmmRCToHostAsm_SaveNoGeneralRegs
761ENDPROC vmmRCToHostAsmNoReturn
762
763
764;;
765; vmmRCToHostAsm
766;
767; This is an entry point used by TRPM to return to host context when an
768; interrupt occured or an guest trap needs handling in host context. It
769; is also used by the C interface above.
770;
771; The hypervisor context is saved and it will return to the caller if
772; host context so desires.
773;
774; @param eax Return code.
775; @uses eax, edx, ecx (or it may use them in the future)
776;
777ALIGNCODE(16)
778BEGINPROC vmmRCToHostAsm
779 DEBUG_S_CHAR('%')
780 push edx
781
782%ifdef VBOX_WITH_STATISTICS
783 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
784 mov edx, 0ffffffffh
785 STAM_PROFILE_ADV_STOP edx
786
787 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
788 mov edx, 0ffffffffh
789 STAM_PROFILE_ADV_START edx
790
791 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
792 mov edx, 0ffffffffh
793 STAM_PROFILE_ADV_START edx
794%endif
795
796 ;
797 ; Load the CPUMCPU pointer.
798 ;
799 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
800 mov edx, 0ffffffffh
801
802 ; Save register context.
803 pop dword [edx + CPUMCPU.Hyper.edx]
804 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
805 mov dword [edx + CPUMCPU.Hyper.esp], esp
806 mov dword [edx + CPUMCPU.Hyper.eax], eax
807 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
808 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
809 mov dword [edx + CPUMCPU.Hyper.esi], esi
810 mov dword [edx + CPUMCPU.Hyper.edi], edi
811 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
812
813 ; special registers which may change.
814vmmRCToHostAsm_SaveNoGeneralRegs:
815 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
816 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
817 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
818
819 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
820 ; FPU context is saved before restore of host saving (another) branch.
821
822 ; Disable debug regsiters if active so they cannot trigger while switching.
823 test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
824 jz .gth_disabled_dr7
825 mov eax, X86_DR7_INIT_VAL
826 mov dr7, eax
827.gth_disabled_dr7:
828
829%ifdef VBOX_WITH_NMI
830 ;
831 ; Disarm K7 NMI.
832 ;
833 mov esi, edx
834
835 xor edx, edx
836 xor eax, eax
837 mov ecx, MSR_K7_EVNTSEL0
838 wrmsr
839
840 mov edx, esi
841%endif
842
843
844 ;;
845 ;; Load Intermediate memory context.
846 ;;
847 mov ecx, [edx + CPUMCPU.Host.cr3]
848 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
849 mov eax, 0ffffffffh
850 mov cr3, eax
851 DEBUG_CHAR('?')
852
853 ;; We're now in intermediate memory context!
854%ifdef NEED_ID
855 ;;
856 ;; Jump to identity mapped location
857 ;;
858 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
859 jmp near NAME(IDExitTarget)
860
861 ; We're now on identity mapped pages!
862ALIGNCODE(16)
863GLOBALNAME IDExitTarget
864 DEBUG_CHAR('1')
865 mov edx, cr4
866%ifdef NEED_PAE_ON_32BIT_HOST
867 and edx, ~X86_CR4_PAE
868%else
869 or edx, X86_CR4_PAE
870%endif
871 mov eax, cr0
872 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
873 mov cr0, eax
874 DEBUG_CHAR('2')
875 mov cr4, edx
876 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
877 mov edx, 0ffffffffh
878 mov cr3, edx
879 or eax, X86_CR0_PG
880 DEBUG_CHAR('3')
881 mov cr0, eax
882 DEBUG_CHAR('4')
883
884 ;;
885 ;; Jump to HC mapping.
886 ;;
887 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
888 jmp near NAME(HCExitTarget)
889%else
890 ;;
891 ;; Jump to HC mapping.
892 ;;
893 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
894 jmp near NAME(HCExitTarget)
895%endif
896
897
898 ;
899 ; When we arrive here we're at the host context
900 ; mapping of the switcher code.
901 ;
902ALIGNCODE(16)
903GLOBALNAME HCExitTarget
904 DEBUG_CHAR('9')
905 ; load final cr3
906 mov cr3, ecx
907 DEBUG_CHAR('@')
908
909
910 ;;
911 ;; Restore Host context.
912 ;;
913 ; Load CPUM pointer into edx
914 FIXUP FIX_HC_CPUM_OFF, 1, 0
915 mov edx, 0ffffffffh
916 CPUMCPU_FROM_CPUM(edx)
917 ; activate host gdt and idt
918 lgdt [edx + CPUMCPU.Host.gdtr]
919 DEBUG_CHAR('0')
920 lidt [edx + CPUMCPU.Host.idtr]
921 DEBUG_CHAR('1')
922 ; Restore TSS selector; must mark it as not busy before using ltr (!)
923%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
924 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
925 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
926 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
927 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
928 ltr word [edx + CPUMCPU.Host.tr]
929%else
930 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
931 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
932 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
933 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
934 mov ebx, ecx ; save original value
935 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
936 mov [eax + 4], ecx ; not using xchg here is paranoia..
937 ltr word [edx + CPUMCPU.Host.tr]
938 xchg [eax + 4], ebx ; using xchg is paranoia too...
939%endif
940 ; activate ldt
941 DEBUG_CHAR('2')
942 lldt [edx + CPUMCPU.Host.ldtr]
943 ; Restore segment registers
944 mov eax, [edx + CPUMCPU.Host.ds]
945 mov ds, eax
946 mov eax, [edx + CPUMCPU.Host.es]
947 mov es, eax
948 mov eax, [edx + CPUMCPU.Host.fs]
949 mov fs, eax
950 mov eax, [edx + CPUMCPU.Host.gs]
951 mov gs, eax
952 ; restore stack
953 lss esp, [edx + CPUMCPU.Host.esp]
954
955
956 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
957 ; restore MSR_IA32_SYSENTER_CS register.
958 mov ecx, MSR_IA32_SYSENTER_CS
959 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
960 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
961 xchg edx, ebx ; save/load edx
962 wrmsr ; MSR[ecx] <- edx:eax
963 xchg edx, ebx ; restore edx
964 jmp short gth_sysenter_no
965
966ALIGNCODE(16)
967gth_sysenter_no:
968
969 FIXUP FIX_NO_SYSCALL_JMP, 0, gth_syscall_no - NAME(Start) ; this will insert a jmp gth_syscall_no if host doesn't use syscall.
970 ; set MSR_K6_EFER_SCE.
971 mov ebx, edx ; save edx
972 mov ecx, MSR_K6_EFER
973 rdmsr
974 or eax, MSR_K6_EFER_SCE
975 wrmsr
976 mov edx, ebx ; restore edx
977 jmp short gth_syscall_no
978
979ALIGNCODE(16)
980gth_syscall_no:
981
982 ; Restore FPU if guest has used it.
983 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
984 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
985 test esi, CPUM_USED_FPU
986 jz near gth_fpu_no
987 mov ecx, cr0
988 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
989 mov cr0, ecx
990
991 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
992 fxsave [edx + CPUMCPU.Guest.fpu]
993 fxrstor [edx + CPUMCPU.Host.fpu]
994 jmp near gth_fpu_no
995
996gth_no_fxsave:
997 fnsave [edx + CPUMCPU.Guest.fpu]
998 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
999 not eax ; 1 means exception ignored (6 LS bits)
1000 and eax, byte 03Fh ; 6 LS bits only
1001 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
1002 jz gth_no_exceptions_pending
1003
1004 ; technically incorrect, but we certainly don't want any exceptions now!!
1005 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
1006
1007gth_no_exceptions_pending:
1008 frstor [edx + CPUMCPU.Host.fpu]
1009 jmp short gth_fpu_no
1010
1011ALIGNCODE(16)
1012gth_fpu_no:
1013
1014 ; Control registers.
1015 ; Would've liked to have these higher up in case of crashes, but
1016 ; the fpu stuff must be done before we restore cr0.
1017 mov ecx, [edx + CPUMCPU.Host.cr4]
1018 mov cr4, ecx
1019 mov ecx, [edx + CPUMCPU.Host.cr0]
1020 mov cr0, ecx
1021 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1022 ;mov cr2, ecx
1023
1024 ; restore debug registers (if modified) (esi must still be fUseFlags!)
1025 ; (must be done after cr4 reload because of the debug extension.)
1026 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST
1027 jnz gth_debug_regs_restore
1028gth_debug_regs_done:
1029
1030 ; restore general registers.
1031 mov eax, edi ; restore return code. eax = return code !!
1032 mov edi, [edx + CPUMCPU.Host.edi]
1033 mov esi, [edx + CPUMCPU.Host.esi]
1034 mov ebx, [edx + CPUMCPU.Host.ebx]
1035 mov ebp, [edx + CPUMCPU.Host.ebp]
1036 push dword [edx + CPUMCPU.Host.eflags]
1037 popfd
1038
1039%ifdef DEBUG_STUFF
1040; COM_S_CHAR '4'
1041%endif
1042 retf
1043
1044;;
1045; Detour for restoring the host debug registers.
1046; edx and edi must be preserved.
1047gth_debug_regs_restore:
1048 DEBUG_S_CHAR('d')
1049 mov eax, dr7 ; Some DR7 paranoia first...
1050 mov ecx, X86_DR7_INIT_VAL
1051 cmp eax, ecx
1052 je .gth_debug_skip_dr7_disabling
1053 mov dr7, ecx
1054.gth_debug_skip_dr7_disabling:
1055 test esi, CPUM_USED_DEBUG_REGS_HOST
1056 jz .gth_debug_regs_dr7
1057
1058 DEBUG_S_CHAR('r')
1059 mov eax, [edx + CPUMCPU.Host.dr0]
1060 mov dr0, eax
1061 mov ebx, [edx + CPUMCPU.Host.dr1]
1062 mov dr1, ebx
1063 mov ecx, [edx + CPUMCPU.Host.dr2]
1064 mov dr2, ecx
1065 mov eax, [edx + CPUMCPU.Host.dr3]
1066 mov dr3, eax
1067.gth_debug_regs_dr7:
1068 mov ebx, [edx + CPUMCPU.Host.dr6]
1069 mov dr6, ebx
1070 mov ecx, [edx + CPUMCPU.Host.dr7]
1071 mov dr7, ecx
1072
1073 and dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
1074 jmp gth_debug_regs_done
1075
1076ENDPROC vmmRCToHostAsm
1077
1078
1079GLOBALNAME End
1080;
1081; The description string (in the text section).
1082;
1083NAME(Description):
1084 db SWITCHER_DESCRIPTION
1085 db 0
1086
1087extern NAME(Relocate)
1088
1089;
1090; End the fixup records.
1091;
1092BEGINDATA
1093 db FIX_THE_END ; final entry.
1094GLOBALNAME FixupsEnd
1095
1096;;
1097; The switcher definition structure.
1098ALIGNDATA(16)
1099GLOBALNAME Def
1100 istruc VMMSWITCHERDEF
1101 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1102 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1103 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1104 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1105 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1106 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1107 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1108 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1109 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1110 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1111 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1112 ; disasm help
1113 at VMMSWITCHERDEF.offHCCode0, dd 0
1114%ifdef NEED_ID
1115 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1116%else
1117 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
1118%endif
1119 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1120 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1121%ifdef NEED_ID
1122 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1123 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
1124 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1125 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1126%else
1127 at VMMSWITCHERDEF.offIDCode0, dd 0
1128 at VMMSWITCHERDEF.cbIDCode0, dd 0
1129 at VMMSWITCHERDEF.offIDCode1, dd 0
1130 at VMMSWITCHERDEF.cbIDCode1, dd 0
1131%endif
1132 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1133%ifdef NEED_ID
1134 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1135%else
1136 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1137%endif
1138
1139 iend
1140
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette