VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 47660

Last change on this file since 47660 was 47660, checked in by vboxsync, 11 years ago

VMM: Debug register handling redo. (only partly tested on AMD-V so far.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.3 KB
Line 
1; $Id: PAEand32Bit.mac 47660 2013-08-12 00:37:34Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19
20;*******************************************************************************
21;* Header Files *
22;*******************************************************************************
23%include "VBox/asmdefs.mac"
24%include "VBox/apic.mac"
25%include "iprt/x86.mac"
26%include "VBox/vmm/cpum.mac"
27%include "VBox/vmm/stam.mac"
28%include "VBox/vmm/vm.mac"
29%include "VBox/err.mac"
30%include "CPUMInternal.mac"
31%include "VMMSwitcher.mac"
32
33%undef NEED_ID
34%ifdef NEED_PAE_ON_32BIT_HOST
35%define NEED_ID
36%endif
37%ifdef NEED_32BIT_ON_PAE_HOST
38%define NEED_ID
39%endif
40
41
42
43;
44; Start the fixup records
45; We collect the fixups in the .data section as we go along
46; It is therefore VITAL that no-one is using the .data section
47; for anything else between 'Start' and 'End'.
48;
49BEGINDATA
50GLOBALNAME Fixups
51
52
53
54BEGINCODE
55GLOBALNAME Start
56
57;;
58; The C interface.
59;
60BEGINPROC vmmR0ToRawMode
61
62%ifdef DEBUG_STUFF
63 COM_S_NEWLINE
64 COM_S_CHAR '^'
65%endif
66
67%ifdef VBOX_WITH_STATISTICS
68 ;
69 ; Switcher stats.
70 ;
71 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
72 mov edx, 0ffffffffh
73 STAM_PROFILE_ADV_START edx
74%endif
75
76 ;
77 ; Call worker.
78 ;
79 FIXUP FIX_HC_CPUM_OFF, 1, 0
80 mov edx, 0ffffffffh
81 push cs ; allow for far return and restore cs correctly.
82 call NAME(vmmR0ToRawModeAsm)
83
84%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
85 CPUM_FROM_CPUMCPU(edx)
86 ; Restore blocked Local APIC NMI vectors
87 ; Do this here to ensure the host CS is already restored
88 mov ecx, [edx + CPUM.fApicDisVectors]
89 mov edx, [edx + CPUM.pvApicBase]
90 shr ecx, 1
91 jnc gth_nolint0
92 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
93gth_nolint0:
94 shr ecx, 1
95 jnc gth_nolint1
96 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
97gth_nolint1:
98 shr ecx, 1
99 jnc gth_nopc
100 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
101gth_nopc:
102 shr ecx, 1
103 jnc gth_notherm
104 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
105gth_notherm:
106%endif
107
108%ifdef VBOX_WITH_STATISTICS
109 ;
110 ; Switcher stats.
111 ;
112 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
113 mov edx, 0ffffffffh
114 STAM_PROFILE_ADV_STOP edx
115%endif
116
117 ret
118ENDPROC vmmR0ToRawMode
119
120
121
122; *****************************************************************************
123; vmmR0ToRawModeAsm
124;
125; Phase one of the switch from host to guest context (host MMU context)
126;
127; INPUT:
128; - edx virtual address of CPUM structure (valid in host context)
129;
130; USES/DESTROYS:
131; - eax, ecx, edx
132;
133; ASSUMPTION:
134; - current CS and DS selectors are wide open
135;
136; *****************************************************************************
137ALIGNCODE(16)
138BEGINPROC vmmR0ToRawModeAsm
139 ;;
140 ;; Save CPU host context
141 ;; Skip eax, edx and ecx as these are not preserved over calls.
142 ;;
143 CPUMCPU_FROM_CPUM(edx)
144 ; general registers.
145 mov [edx + CPUMCPU.Host.ebx], ebx
146 mov [edx + CPUMCPU.Host.edi], edi
147 mov [edx + CPUMCPU.Host.esi], esi
148 mov [edx + CPUMCPU.Host.esp], esp
149 mov [edx + CPUMCPU.Host.ebp], ebp
150 ; selectors.
151 mov [edx + CPUMCPU.Host.ds], ds
152 mov [edx + CPUMCPU.Host.es], es
153 mov [edx + CPUMCPU.Host.fs], fs
154 mov [edx + CPUMCPU.Host.gs], gs
155 mov [edx + CPUMCPU.Host.ss], ss
156 ; special registers.
157 sldt [edx + CPUMCPU.Host.ldtr]
158 sidt [edx + CPUMCPU.Host.idtr]
159 sgdt [edx + CPUMCPU.Host.gdtr]
160 str [edx + CPUMCPU.Host.tr]
161 ; flags
162 pushfd
163 pop dword [edx + CPUMCPU.Host.eflags]
164
165 ; Block Local APIC NMI vectors
166 xor edi, edi
167
168%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
169 mov esi, edx
170 CPUM_FROM_CPUMCPU(edx)
171 mov ebx, [edx + CPUM.pvApicBase]
172 or ebx, ebx
173 jz htg_noapic
174 mov eax, [ebx + APIC_REG_LVT_LINT0]
175 mov ecx, eax
176 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
177 cmp ecx, APIC_REG_LVT_MODE_NMI
178 jne htg_nolint0
179 or edi, 0x01
180 or eax, APIC_REG_LVT_MASKED
181 mov [ebx + APIC_REG_LVT_LINT0], eax
182 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
183htg_nolint0:
184 mov eax, [ebx + APIC_REG_LVT_LINT1]
185 mov ecx, eax
186 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
187 cmp ecx, APIC_REG_LVT_MODE_NMI
188 jne htg_nolint1
189 or edi, 0x02
190 or eax, APIC_REG_LVT_MASKED
191 mov [ebx + APIC_REG_LVT_LINT1], eax
192 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
193htg_nolint1:
194 mov eax, [ebx + APIC_REG_LVT_PC]
195 mov ecx, eax
196 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
197 cmp ecx, APIC_REG_LVT_MODE_NMI
198 jne htg_nopc
199 or edi, 0x04
200 or eax, APIC_REG_LVT_MASKED
201 mov [ebx + APIC_REG_LVT_PC], eax
202 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
203htg_nopc:
204 mov eax, [ebx + APIC_REG_VERSION]
205 shr eax, 16
206 cmp al, 5
207 jb htg_notherm
208 mov eax, [ebx + APIC_REG_LVT_THMR]
209 mov ecx, eax
210 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
211 cmp ecx, APIC_REG_LVT_MODE_NMI
212 jne htg_notherm
213 or edi, 0x08
214 or eax, APIC_REG_LVT_MASKED
215 mov [ebx + APIC_REG_LVT_THMR], eax
216 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
217htg_notherm:
218 mov [edx + CPUM.fApicDisVectors], edi
219htg_noapic:
220 mov edx, esi
221%endif
222
223 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
224 ; save MSR_IA32_SYSENTER_CS register.
225 mov ecx, MSR_IA32_SYSENTER_CS
226 mov ebx, edx ; save edx
227 rdmsr ; edx:eax <- MSR[ecx]
228 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
229 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
230 xor eax, eax ; load 0:0 to cause #GP upon sysenter
231 xor edx, edx
232 wrmsr
233 xchg ebx, edx ; restore edx
234 jmp short htg_no_sysenter
235
236ALIGNCODE(16)
237htg_no_sysenter:
238
239 FIXUP FIX_NO_SYSCALL_JMP, 0, htg_no_syscall - NAME(Start) ; this will insert a jmp htg_no_syscall if host doesn't use syscall.
240 ; clear MSR_K6_EFER_SCE.
241 mov ebx, edx ; save edx
242 mov ecx, MSR_K6_EFER
243 rdmsr ; edx:eax <- MSR[ecx]
244 and eax, ~MSR_K6_EFER_SCE
245 wrmsr
246 mov edx, ebx ; restore edx
247 jmp short htg_no_syscall
248
249ALIGNCODE(16)
250htg_no_syscall:
251
252 ;; handle use flags.
253 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
254 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
255 mov [edx + CPUMCPU.fUseFlags], esi
256
257 ; debug registers.
258 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
259 jnz htg_debug_regs_save_dr7and6
260htg_debug_regs_no:
261
262 ; control registers.
263 mov eax, cr0
264 mov [edx + CPUMCPU.Host.cr0], eax
265 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
266 ;mov [edx + CPUMCPU.Host.cr2], eax
267 mov eax, cr3
268 mov [edx + CPUMCPU.Host.cr3], eax
269 mov eax, cr4
270 mov [edx + CPUMCPU.Host.cr4], eax
271
272 ;;
273 ;; Start switching to VMM context.
274 ;;
275
276 ;
277 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
278 ; Also disable WP. (eax==cr4 now)
279 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
280 ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
281 ;
282 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
283 mov ecx, [edx + CPUMCPU.Guest.cr4]
284 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
285 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
286 ; simplify this operation a bit (and improve locality of the data).
287
288 ;
289 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
290 ; FXSAVE support on the host CPU
291 ;
292 CPUM_FROM_CPUMCPU(edx)
293 and ecx, [edx + CPUM.CR4.AndMask]
294 or eax, ecx
295 or eax, [edx + CPUM.CR4.OrMask]
296 mov cr4, eax
297
298 CPUMCPU_FROM_CPUM(edx)
299 mov eax, [edx + CPUMCPU.Guest.cr0]
300 and eax, X86_CR0_EM
301 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
302 mov cr0, eax
303
304 ; Load new gdt so we can do far jump to guest code after cr3 reload.
305 lgdt [edx + CPUMCPU.Hyper.gdtr]
306 DEBUG_CHAR('1') ; trashes esi
307
308 ; Store the hypervisor cr3 for later loading
309 mov ebp, [edx + CPUMCPU.Hyper.cr3]
310
311 ;;
312 ;; Load Intermediate memory context.
313 ;;
314 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
315 mov eax, 0ffffffffh
316 mov cr3, eax
317 DEBUG_CHAR('2') ; trashes esi
318
319%ifdef NEED_ID
320 ;;
321 ;; Jump to identity mapped location
322 ;;
323 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
324 jmp near NAME(IDEnterTarget)
325
326 ; We're now on identity mapped pages!
327ALIGNCODE(16)
328GLOBALNAME IDEnterTarget
329 DEBUG_CHAR('3')
330 mov edx, cr4
331%ifdef NEED_PAE_ON_32BIT_HOST
332 or edx, X86_CR4_PAE
333%else
334 and edx, ~X86_CR4_PAE
335%endif
336 mov eax, cr0
337 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
338 mov cr0, eax
339 DEBUG_CHAR('4')
340 mov cr4, edx
341 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
342 mov edx, 0ffffffffh
343 mov cr3, edx
344 or eax, X86_CR0_PG
345 DEBUG_CHAR('5')
346 mov cr0, eax
347 DEBUG_CHAR('6')
348%endif
349
350 ;;
351 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
352 ;;
353 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
354 jmp 0fff8h:0deadfaceh
355
356
357 ;;
358 ;; When we arrive at this label we're at the
359 ;; guest code mapping of the switching code.
360 ;;
361ALIGNCODE(16)
362GLOBALNAME FarJmpGCTarget
363 DEBUG_CHAR('-')
364 ; load final cr3 and do far jump to load cs.
365 mov cr3, ebp ; ebp set above
366 DEBUG_CHAR('0')
367
368 ;;
369 ;; We're in VMM MMU context and VMM CS is loaded.
370 ;; Setup the rest of the VMM state.
371 ;;
372 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
373 mov edx, 0ffffffffh
374 ; Activate guest IDT
375 DEBUG_CHAR('1')
376 lidt [edx + CPUMCPU.Hyper.idtr]
377 ; Load selectors
378 DEBUG_CHAR('2')
379 FIXUP FIX_HYPER_DS, 1
380 mov eax, 0ffffh
381 mov ds, eax
382 mov es, eax
383 xor eax, eax
384 mov gs, eax
385 mov fs, eax
386
387 ; Setup stack.
388 DEBUG_CHAR('3')
389 mov eax, [edx + CPUMCPU.Hyper.ss.Sel]
390 mov ss, ax
391 mov esp, [edx + CPUMCPU.Hyper.esp]
392
393 ; Restore TSS selector; must mark it as not busy before using ltr (!)
394 DEBUG_CHAR('4')
395 FIXUP FIX_GC_TSS_GDTE_DW2, 2
396 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
397 DEBUG_CHAR('5')
398 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
399 DEBUG_CHAR('6')
400
401 ; Activate the ldt (now we can safely crash).
402 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
403 DEBUG_CHAR('7')
404
405 ;; use flags.
406 mov esi, [edx + CPUMCPU.fUseFlags]
407
408 ; debug registers
409 test esi, CPUM_USE_DEBUG_REGS_HYPER
410 jnz htg_debug_regs_guest
411htg_debug_regs_guest_done:
412 DEBUG_CHAR('9')
413
414%ifdef VBOX_WITH_NMI
415 ;
416 ; Setup K7 NMI.
417 ;
418 mov esi, edx
419 ; clear all PerfEvtSeln registers
420 xor eax, eax
421 xor edx, edx
422 mov ecx, MSR_K7_PERFCTR0
423 wrmsr
424 mov ecx, MSR_K7_PERFCTR1
425 wrmsr
426 mov ecx, MSR_K7_PERFCTR2
427 wrmsr
428 mov ecx, MSR_K7_PERFCTR3
429 wrmsr
430
431 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
432 mov ecx, MSR_K7_EVNTSEL0
433 wrmsr
434 mov eax, 02329B000h
435 mov edx, 0fffffffeh ; -1.6GHz * 5
436 mov ecx, MSR_K7_PERFCTR0
437 wrmsr
438
439 FIXUP FIX_GC_APIC_BASE_32BIT, 1
440 mov eax, 0f0f0f0f0h
441 add eax, 0340h ; APIC_LVTPC
442 mov dword [eax], 0400h ; APIC_DM_NMI
443
444 xor edx, edx
445 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
446 mov ecx, MSR_K7_EVNTSEL0
447 wrmsr
448
449 mov edx, esi
450%endif
451
452 ; General registers (sans edx).
453 mov eax, [edx + CPUMCPU.Hyper.eax]
454 mov ebx, [edx + CPUMCPU.Hyper.ebx]
455 mov ecx, [edx + CPUMCPU.Hyper.ecx]
456 mov ebp, [edx + CPUMCPU.Hyper.ebp]
457 mov esi, [edx + CPUMCPU.Hyper.esi]
458 mov edi, [edx + CPUMCPU.Hyper.edi]
459 DEBUG_S_CHAR('!')
460
461 ;;
462 ;; Return to the VMM code which either called the switcher or
463 ;; the code set up to run by HC.
464 ;;
465 push dword [edx + CPUMCPU.Hyper.eflags]
466 push cs
467 push dword [edx + CPUMCPU.Hyper.eip]
468 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
469
470%ifdef DEBUG_STUFF
471 COM_S_PRINT ';eip='
472 push eax
473 mov eax, [esp + 8]
474 COM_S_DWORD_REG eax
475 pop eax
476 COM_S_CHAR ';'
477%endif
478%ifdef VBOX_WITH_STATISTICS
479 push edx
480 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
481 mov edx, 0ffffffffh
482 STAM_PROFILE_ADV_STOP edx
483 pop edx
484%endif
485
486 iret ; Use iret to make debugging and TF/RF work.
487
488;;
489; Detour for saving the host DR7 and DR6.
490; esi and edx must be preserved.
491htg_debug_regs_save_dr7and6:
492DEBUG_S_CHAR('s');
493 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
494 mov [edx + CPUMCPU.Host.dr7], eax
495 xor eax, eax ; clear everything. (bit 12? is read as 1...)
496 mov dr7, eax
497 mov eax, dr6 ; just in case we save the state register too.
498 mov [edx + CPUMCPU.Host.dr6], eax
499 jmp htg_debug_regs_no
500
501;;
502; Detour for saving host DR0-3 and loading hypervisor debug registers.
503; esi and edx must be preserved.
504htg_debug_regs_guest:
505 DEBUG_S_CHAR('D')
506 DEBUG_S_CHAR('R')
507 DEBUG_S_CHAR('x')
508 ; save host DR0-3.
509 mov eax, dr0
510 mov [edx + CPUMCPU.Host.dr0], eax
511 mov ebx, dr1
512 mov [edx + CPUMCPU.Host.dr1], ebx
513 mov ecx, dr2
514 mov [edx + CPUMCPU.Host.dr2], ecx
515 mov eax, dr3
516 mov [edx + CPUMCPU.Host.dr3], eax
517 or dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
518
519 ; load hyper DR0-7
520 mov ebx, [edx + CPUMCPU.Hyper.dr]
521 mov dr0, ebx
522 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
523 mov dr1, ecx
524 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
525 mov dr2, eax
526 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
527 mov dr3, ebx
528 mov ecx, X86_DR6_INIT_VAL
529 mov dr6, ecx
530 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
531 mov dr7, eax
532 or dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
533 jmp htg_debug_regs_guest_done
534
535ENDPROC vmmR0ToRawModeAsm
536
537
538;;
539; Trampoline for doing a call when starting the hyper visor execution.
540;
541; Push any arguments to the routine.
542; Push the argument frame size (cArg * 4).
543; Push the call target (_cdecl convention).
544; Push the address of this routine.
545;
546;
547ALIGNCODE(16)
548BEGINPROC vmmRCCallTrampoline
549%ifdef DEBUG_STUFF
550 COM_S_CHAR 'c'
551 COM_S_CHAR 't'
552 COM_S_CHAR '!'
553%endif
554
555 ; call routine
556 pop eax ; call address
557 pop edi ; argument count.
558%ifdef DEBUG_STUFF
559 COM_S_PRINT ';eax='
560 COM_S_DWORD_REG eax
561 COM_S_CHAR ';'
562%endif
563 call eax ; do call
564 add esp, edi ; cleanup stack
565
566 ; return to the host context.
567%ifdef DEBUG_STUFF
568 COM_S_CHAR '`'
569%endif
570.to_host_again:
571 call NAME(vmmRCToHostAsm)
572 mov eax, VERR_VMM_SWITCHER_IPE_1
573 jmp .to_host_again
574ENDPROC vmmRCCallTrampoline
575
576
577
578;;
579; The C interface.
580;
581ALIGNCODE(16)
582BEGINPROC vmmRCToHost
583%ifdef DEBUG_STUFF
584 push esi
585 COM_NEWLINE
586 DEBUG_CHAR('b')
587 DEBUG_CHAR('a')
588 DEBUG_CHAR('c')
589 DEBUG_CHAR('k')
590 DEBUG_CHAR('!')
591 COM_NEWLINE
592 pop esi
593%endif
594 mov eax, [esp + 4]
595 jmp NAME(vmmRCToHostAsm)
596ENDPROC vmmRCToHost
597
598
599;;
600; vmmRCToHostAsmNoReturn
601;
602; This is an entry point used by TRPM when dealing with raw-mode traps,
603; i.e. traps in the hypervisor code. This will not return and saves no
604; state, because the caller has already saved the state.
605;
606; @param eax Return code.
607;
608ALIGNCODE(16)
609BEGINPROC vmmRCToHostAsmNoReturn
610 DEBUG_S_CHAR('%')
611
612%ifdef VBOX_WITH_STATISTICS
613 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
614 mov edx, 0ffffffffh
615 STAM32_PROFILE_ADV_STOP edx
616
617 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
618 mov edx, 0ffffffffh
619 STAM32_PROFILE_ADV_START edx
620
621 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
622 mov edx, 0ffffffffh
623 STAM32_PROFILE_ADV_START edx
624%endif
625
626 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
627 mov edx, 0ffffffffh
628
629 jmp vmmRCToHostAsm_SaveNoGeneralRegs
630ENDPROC vmmRCToHostAsmNoReturn
631
632
633;;
634; vmmRCToHostAsm
635;
636; This is an entry point used by TRPM to return to host context when an
637; interrupt occured or an guest trap needs handling in host context. It
638; is also used by the C interface above.
639;
640; The hypervisor context is saved and it will return to the caller if
641; host context so desires.
642;
643; @param eax Return code.
644; @uses eax, edx, ecx (or it may use them in the future)
645;
646ALIGNCODE(16)
647BEGINPROC vmmRCToHostAsm
648 DEBUG_S_CHAR('%')
649 push edx
650
651%ifdef VBOX_WITH_STATISTICS
652 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
653 mov edx, 0ffffffffh
654 STAM_PROFILE_ADV_STOP edx
655
656 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
657 mov edx, 0ffffffffh
658 STAM_PROFILE_ADV_START edx
659
660 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
661 mov edx, 0ffffffffh
662 STAM_PROFILE_ADV_START edx
663%endif
664
665 ;
666 ; Load the CPUMCPU pointer.
667 ;
668 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
669 mov edx, 0ffffffffh
670
671 ; Save register context.
672 pop dword [edx + CPUMCPU.Hyper.edx]
673 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
674 mov dword [edx + CPUMCPU.Hyper.esp], esp
675 mov dword [edx + CPUMCPU.Hyper.eax], eax
676 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
677 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
678 mov dword [edx + CPUMCPU.Hyper.esi], esi
679 mov dword [edx + CPUMCPU.Hyper.edi], edi
680 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
681
682 ; special registers which may change.
683vmmRCToHostAsm_SaveNoGeneralRegs:
684 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
685 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
686
687 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
688 ; FPU context is saved before restore of host saving (another) branch.
689
690 ; Disable debug regsiters if active so they cannot trigger while switching.
691 test dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
692 jz .gth_disabled_dr7
693 mov eax, X86_DR7_INIT_VAL
694 mov dr7, eax
695.gth_disabled_dr7:
696
697%ifdef VBOX_WITH_NMI
698 ;
699 ; Disarm K7 NMI.
700 ;
701 mov esi, edx
702 mov edi, eax
703
704 xor edx, edx
705 xor eax, eax
706 mov ecx, MSR_K7_EVNTSEL0
707 wrmsr
708
709 mov eax, edi
710 mov edx, esi
711%endif
712
713
714 ;;
715 ;; Load Intermediate memory context.
716 ;;
717 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
718 mov ecx, [edx + CPUMCPU.Host.cr3]
719 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
720 mov eax, 0ffffffffh
721 mov cr3, eax
722 DEBUG_CHAR('?')
723
724 ;; We're now in intermediate memory context!
725%ifdef NEED_ID
726 ;;
727 ;; Jump to identity mapped location
728 ;;
729 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
730 jmp near NAME(IDExitTarget)
731
732 ; We're now on identity mapped pages!
733ALIGNCODE(16)
734GLOBALNAME IDExitTarget
735 DEBUG_CHAR('1')
736 mov edx, cr4
737%ifdef NEED_PAE_ON_32BIT_HOST
738 and edx, ~X86_CR4_PAE
739%else
740 or edx, X86_CR4_PAE
741%endif
742 mov eax, cr0
743 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
744 mov cr0, eax
745 DEBUG_CHAR('2')
746 mov cr4, edx
747 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
748 mov edx, 0ffffffffh
749 mov cr3, edx
750 or eax, X86_CR0_PG
751 DEBUG_CHAR('3')
752 mov cr0, eax
753 DEBUG_CHAR('4')
754
755 ;;
756 ;; Jump to HC mapping.
757 ;;
758 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
759 jmp near NAME(HCExitTarget)
760%else
761 ;;
762 ;; Jump to HC mapping.
763 ;;
764 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
765 jmp near NAME(HCExitTarget)
766%endif
767
768
769 ;
770 ; When we arrive here we're at the host context
771 ; mapping of the switcher code.
772 ;
773ALIGNCODE(16)
774GLOBALNAME HCExitTarget
775 DEBUG_CHAR('9')
776 ; load final cr3
777 mov cr3, ecx
778 DEBUG_CHAR('@')
779
780
781 ;;
782 ;; Restore Host context.
783 ;;
784 ; Load CPUM pointer into edx
785 FIXUP FIX_HC_CPUM_OFF, 1, 0
786 mov edx, 0ffffffffh
787 CPUMCPU_FROM_CPUM(edx)
788 ; activate host gdt and idt
789 lgdt [edx + CPUMCPU.Host.gdtr]
790 DEBUG_CHAR('0')
791 lidt [edx + CPUMCPU.Host.idtr]
792 DEBUG_CHAR('1')
793 ; Restore TSS selector; must mark it as not busy before using ltr (!)
794%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
795 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
796 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
797 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
798 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
799 ltr word [edx + CPUMCPU.Host.tr]
800%else
801 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
802 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
803 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
804 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
805 mov ebx, ecx ; save original value
806 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
807 mov [eax + 4], ecx ; not using xchg here is paranoia..
808 ltr word [edx + CPUMCPU.Host.tr]
809 xchg [eax + 4], ebx ; using xchg is paranoia too...
810%endif
811 ; activate ldt
812 DEBUG_CHAR('2')
813 lldt [edx + CPUMCPU.Host.ldtr]
814 ; Restore segment registers
815 mov eax, [edx + CPUMCPU.Host.ds]
816 mov ds, eax
817 mov eax, [edx + CPUMCPU.Host.es]
818 mov es, eax
819 mov eax, [edx + CPUMCPU.Host.fs]
820 mov fs, eax
821 mov eax, [edx + CPUMCPU.Host.gs]
822 mov gs, eax
823 ; restore stack
824 lss esp, [edx + CPUMCPU.Host.esp]
825
826
827 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
828 ; restore MSR_IA32_SYSENTER_CS register.
829 mov ecx, MSR_IA32_SYSENTER_CS
830 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
831 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
832 xchg edx, ebx ; save/load edx
833 wrmsr ; MSR[ecx] <- edx:eax
834 xchg edx, ebx ; restore edx
835 jmp short gth_sysenter_no
836
837ALIGNCODE(16)
838gth_sysenter_no:
839
840 FIXUP FIX_NO_SYSCALL_JMP, 0, gth_syscall_no - NAME(Start) ; this will insert a jmp gth_syscall_no if host doesn't use syscall.
841 ; set MSR_K6_EFER_SCE.
842 mov ebx, edx ; save edx
843 mov ecx, MSR_K6_EFER
844 rdmsr
845 or eax, MSR_K6_EFER_SCE
846 wrmsr
847 mov edx, ebx ; restore edx
848 jmp short gth_syscall_no
849
850ALIGNCODE(16)
851gth_syscall_no:
852
853 ; Restore FPU if guest has used it.
854 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
855 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
856 test esi, CPUM_USED_FPU
857 jz near gth_fpu_no
858 mov ecx, cr0
859 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
860 mov cr0, ecx
861
862 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
863 fxsave [edx + CPUMCPU.Guest.fpu]
864 fxrstor [edx + CPUMCPU.Host.fpu]
865 jmp near gth_fpu_no
866
867gth_no_fxsave:
868 fnsave [edx + CPUMCPU.Guest.fpu]
869 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
870 not eax ; 1 means exception ignored (6 LS bits)
871 and eax, byte 03Fh ; 6 LS bits only
872 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
873 jz gth_no_exceptions_pending
874
875 ; technically incorrect, but we certainly don't want any exceptions now!!
876 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
877
878gth_no_exceptions_pending:
879 frstor [edx + CPUMCPU.Host.fpu]
880 jmp short gth_fpu_no
881
882ALIGNCODE(16)
883gth_fpu_no:
884
885 ; Control registers.
886 ; Would've liked to have these higher up in case of crashes, but
887 ; the fpu stuff must be done before we restore cr0.
888 mov ecx, [edx + CPUMCPU.Host.cr4]
889 mov cr4, ecx
890 mov ecx, [edx + CPUMCPU.Host.cr0]
891 mov cr0, ecx
892 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
893 ;mov cr2, ecx
894
895 ; restore debug registers (if modified) (esi must still be fUseFlags!)
896 ; (must be done after cr4 reload because of the debug extension.)
897 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST
898 jnz gth_debug_regs_restore
899gth_debug_regs_done:
900
901 ; restore general registers.
902 mov eax, edi ; restore return code. eax = return code !!
903 mov edi, [edx + CPUMCPU.Host.edi]
904 mov esi, [edx + CPUMCPU.Host.esi]
905 mov ebx, [edx + CPUMCPU.Host.ebx]
906 mov ebp, [edx + CPUMCPU.Host.ebp]
907 push dword [edx + CPUMCPU.Host.eflags]
908 popfd
909
910%ifdef DEBUG_STUFF
911; COM_S_CHAR '4'
912%endif
913 retf
914
915;;
916; Detour for restoring the host debug registers.
917; edx and edi must be preserved.
918gth_debug_regs_restore:
919 DEBUG_S_CHAR('d')
920 mov eax, dr7 ; Some DR7 paranoia first...
921 mov ecx, X86_DR7_INIT_VAL
922 cmp eax, ecx
923 je .gth_debug_skip_dr7_disabling
924 mov dr7, ecx
925.gth_debug_skip_dr7_disabling:
926 test esi, CPUM_USED_DEBUG_REGS_HOST
927 jz .gth_debug_regs_dr7
928
929 DEBUG_S_CHAR('r')
930 mov eax, [edx + CPUMCPU.Host.dr0]
931 mov dr0, eax
932 mov ebx, [edx + CPUMCPU.Host.dr1]
933 mov dr1, ebx
934 mov ecx, [edx + CPUMCPU.Host.dr2]
935 mov dr2, ecx
936 mov eax, [edx + CPUMCPU.Host.dr3]
937 mov dr3, eax
938.gth_debug_regs_dr7:
939 mov ebx, [edx + CPUMCPU.Host.dr6]
940 mov dr6, ebx
941 mov ecx, [edx + CPUMCPU.Host.dr7]
942 mov dr7, ecx
943
944 and dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
945 jmp gth_debug_regs_done
946
947ENDPROC vmmRCToHostAsm
948
949
950GLOBALNAME End
951;
952; The description string (in the text section).
953;
954NAME(Description):
955 db SWITCHER_DESCRIPTION
956 db 0
957
958extern NAME(Relocate)
959
960;
961; End the fixup records.
962;
963BEGINDATA
964 db FIX_THE_END ; final entry.
965GLOBALNAME FixupsEnd
966
967;;
968; The switcher definition structure.
969ALIGNDATA(16)
970GLOBALNAME Def
971 istruc VMMSWITCHERDEF
972 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
973 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
974 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
975 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
976 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
977 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
978 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
979 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
980 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
981 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
982 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
983 ; disasm help
984 at VMMSWITCHERDEF.offHCCode0, dd 0
985%ifdef NEED_ID
986 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
987%else
988 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
989%endif
990 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
991 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
992%ifdef NEED_ID
993 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
994 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
995 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
996 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
997%else
998 at VMMSWITCHERDEF.offIDCode0, dd 0
999 at VMMSWITCHERDEF.cbIDCode0, dd 0
1000 at VMMSWITCHERDEF.offIDCode1, dd 0
1001 at VMMSWITCHERDEF.cbIDCode1, dd 0
1002%endif
1003 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1004%ifdef NEED_ID
1005 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1006%else
1007 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1008%endif
1009
1010 iend
1011
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette