VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 54474

Last change on this file since 54474 was 54474, checked in by vboxsync, 10 years ago

VMMSwitcher: fixed two typos which prevented proper handling of the local APIC in x2APIC mode for 64-bit guests on 32-bit hosts and raw mode on 32-bit hosts

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.3 KB
Line 
1; $Id: PAEand32Bit.mac 54474 2015-02-25 09:20:30Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19
20;*******************************************************************************
21;* Header Files *
22;*******************************************************************************
23%include "VBox/asmdefs.mac"
24%include "VBox/apic.mac"
25%include "iprt/x86.mac"
26%include "VBox/vmm/cpum.mac"
27%include "VBox/vmm/stam.mac"
28%include "VBox/vmm/vm.mac"
29%include "VBox/err.mac"
30%include "CPUMInternal.mac"
31%include "VMMSwitcher.mac"
32
33%undef NEED_ID
34%ifdef NEED_PAE_ON_32BIT_HOST
35%define NEED_ID
36%endif
37%ifdef NEED_32BIT_ON_PAE_HOST
38%define NEED_ID
39%endif
40
41
42
43;
44; Start the fixup records
45; We collect the fixups in the .data section as we go along
46; It is therefore VITAL that no-one is using the .data section
47; for anything else between 'Start' and 'End'.
48;
49BEGINDATA
50GLOBALNAME Fixups
51
52
53
54BEGINCODE
55GLOBALNAME Start
56
57;;
58; The C interface.
59;
60BEGINPROC vmmR0ToRawMode
61
62%ifdef DEBUG_STUFF
63 COM_S_NEWLINE
64 COM_S_CHAR '^'
65%endif
66
67%ifdef VBOX_WITH_STATISTICS
68 ;
69 ; Switcher stats.
70 ;
71 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
72 mov edx, 0ffffffffh
73 STAM_PROFILE_ADV_START edx
74%endif
75
76 ;
77 ; Call worker.
78 ;
79 FIXUP FIX_HC_CPUM_OFF, 1, 0
80 mov edx, 0ffffffffh
81 push cs ; allow for far return and restore cs correctly.
82 call NAME(vmmR0ToRawModeAsm)
83
84%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
85 ; Restore blocked Local APIC NMI vectors
86 ; Do this here to ensure the host CS is already restored
87 mov ecx, [edx + CPUMCPU.fApicDisVectors]
88 test ecx, ecx
89 jz gth_apic_done
90 cmp byte [edx + CPUMCPU.fX2Apic], 1
91 je gth_x2apic
92
93 ; Legacy xAPIC mode:
94 mov edx, [edx + CPUMCPU.pvApicBase]
95 shr ecx, 1
96 jnc gth_nolint0
97 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
98gth_nolint0:
99 shr ecx, 1
100 jnc gth_nolint1
101 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
102gth_nolint1:
103 shr ecx, 1
104 jnc gth_nopc
105 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
106gth_nopc:
107 shr ecx, 1
108 jnc gth_notherm
109 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
110gth_notherm:
111 shr ecx, 1
112 jnc gth_nocmci
113 and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
114gth_nocmci:
115 jmp gth_apic_done
116
117 ; x2APIC mode:
118gth_x2apic:
119 push eax ; save eax
120 push ebx ; save it for fApicDisVectors
121 push edx ; save edx just in case.
122 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
123 shr ebx, 1
124 jnc gth_x2_nolint0
125 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
126 rdmsr
127 and eax, ~APIC_REG_LVT_MASKED
128 wrmsr
129gth_x2_nolint0:
130 shr ebx, 1
131 jnc gth_x2_nolint1
132 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
133 rdmsr
134 and eax, ~APIC_REG_LVT_MASKED
135 wrmsr
136gth_x2_nolint1:
137 shr ebx, 1
138 jnc gth_x2_nopc
139 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
140 rdmsr
141 and eax, ~APIC_REG_LVT_MASKED
142 wrmsr
143gth_x2_nopc:
144 shr ebx, 1
145 jnc gth_x2_notherm
146 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
147 rdmsr
148 and eax, ~APIC_REG_LVT_MASKED
149 wrmsr
150gth_x2_notherm:
151 shr ebx, 1
152 jnc gth_x2_nocmci
153 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
154 rdmsr
155 and eax, ~APIC_REG_LVT_MASKED
156 wrmsr
157gth_x2_nocmci:
158 pop edx
159 pop ebx
160 pop eax
161
162gth_apic_done:
163%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
164
165%ifdef VBOX_WITH_STATISTICS
166 ;
167 ; Switcher stats.
168 ;
169 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
170 mov edx, 0ffffffffh
171 STAM_PROFILE_ADV_STOP edx
172%endif
173
174 ret
175ENDPROC vmmR0ToRawMode
176
177
178
179; *****************************************************************************
180; vmmR0ToRawModeAsm
181;
182; Phase one of the switch from host to guest context (host MMU context)
183;
184; INPUT:
185; - edx virtual address of CPUM structure (valid in host context)
186;
187; USES/DESTROYS:
188; - eax, ecx, edx
189;
190; ASSUMPTION:
191; - current CS and DS selectors are wide open
192;
193; *****************************************************************************
194ALIGNCODE(16)
195BEGINPROC vmmR0ToRawModeAsm
196 ;;
197 ;; Save CPU host context
198 ;; Skip eax, edx and ecx as these are not preserved over calls.
199 ;;
200 CPUMCPU_FROM_CPUM(edx)
201 ; general registers.
202 mov [edx + CPUMCPU.Host.ebx], ebx
203 mov [edx + CPUMCPU.Host.edi], edi
204 mov [edx + CPUMCPU.Host.esi], esi
205 mov [edx + CPUMCPU.Host.esp], esp
206 mov [edx + CPUMCPU.Host.ebp], ebp
207 ; selectors.
208 mov [edx + CPUMCPU.Host.ds], ds
209 mov [edx + CPUMCPU.Host.es], es
210 mov [edx + CPUMCPU.Host.fs], fs
211 mov [edx + CPUMCPU.Host.gs], gs
212 mov [edx + CPUMCPU.Host.ss], ss
213 ; special registers.
214 sldt [edx + CPUMCPU.Host.ldtr]
215 sidt [edx + CPUMCPU.Host.idtr]
216 sgdt [edx + CPUMCPU.Host.gdtr]
217 str [edx + CPUMCPU.Host.tr]
218 ; flags
219 pushfd
220 pop dword [edx + CPUMCPU.Host.eflags]
221
222%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
223 ; Block Local APIC NMI vectors
224 cmp byte [edx + CPUMCPU.fX2Apic], 1
225 je htg_x2apic
226
227 ; Legacy xAPIC mode:
228 mov ebx, [edx + CPUMCPU.pvApicBase]
229 or ebx, ebx
230 jz htg_apic_done
231 xor edi, edi ; fApicDisVectors
232
233 mov eax, [ebx + APIC_REG_LVT_LINT0]
234 mov ecx, eax
235 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
236 cmp ecx, APIC_REG_LVT_MODE_NMI
237 jne htg_nolint0
238 or edi, 0x01
239 or eax, APIC_REG_LVT_MASKED
240 mov [ebx + APIC_REG_LVT_LINT0], eax
241 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
242htg_nolint0:
243 mov eax, [ebx + APIC_REG_LVT_LINT1]
244 mov ecx, eax
245 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
246 cmp ecx, APIC_REG_LVT_MODE_NMI
247 jne htg_nolint1
248 or edi, 0x02
249 or eax, APIC_REG_LVT_MASKED
250 mov [ebx + APIC_REG_LVT_LINT1], eax
251 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
252htg_nolint1:
253 mov eax, [ebx + APIC_REG_LVT_PC]
254 mov ecx, eax
255 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
256 cmp ecx, APIC_REG_LVT_MODE_NMI
257 jne htg_nopc
258 or edi, 0x04
259 or eax, APIC_REG_LVT_MASKED
260 mov [ebx + APIC_REG_LVT_PC], eax
261 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
262htg_nopc:
263 mov eax, [ebx + APIC_REG_VERSION]
264 shr eax, 16
265 cmp al, 5
266 jb htg_notherm
267 je htg_nocmci
268 mov eax, [ebx + APIC_REG_LVT_CMCI]
269 mov ecx, eax
270 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
271 cmp ecx, APIC_REG_LVT_MODE_NMI
272 jne htg_nocmci
273 or edi, 0x10
274 or eax, APIC_REG_LVT_MASKED
275 mov [ebx + APIC_REG_LVT_CMCI], eax
276 mov eax, [ebx + APIC_REG_LVT_CMCI] ; write completion
277htg_nocmci:
278 mov eax, [ebx + APIC_REG_LVT_THMR]
279 mov ecx, eax
280 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
281 cmp ecx, APIC_REG_LVT_MODE_NMI
282 jne htg_notherm
283 or edi, 0x08
284 or eax, APIC_REG_LVT_MASKED
285 mov [ebx + APIC_REG_LVT_THMR], eax
286 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
287htg_notherm:
288 mov [edx + CPUMCPU.fApicDisVectors], edi
289 jmp htg_apic_done
290
291 ; x2APIC mode:
292htg_x2apic:
293 mov esi, edx ; Save edx.
294 xor edi, edi ; fApicDisVectors
295
296 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
297 rdmsr
298 mov ebx, eax
299 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
300 cmp ebx, APIC_REG_LVT_MODE_NMI
301 jne htg_x2_nolint0
302 or edi, 0x01
303 or eax, APIC_REG_LVT_MASKED
304 wrmsr
305htg_x2_nolint0:
306 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
307 rdmsr
308 mov ebx, eax
309 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
310 cmp ebx, APIC_REG_LVT_MODE_NMI
311 jne htg_x2_nolint1
312 or edi, 0x02
313 or eax, APIC_REG_LVT_MASKED
314 wrmsr
315htg_x2_nolint1:
316 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
317 rdmsr
318 mov ebx, eax
319 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
320 cmp ebx, APIC_REG_LVT_MODE_NMI
321 jne htg_x2_nopc
322 or edi, 0x04
323 or eax, APIC_REG_LVT_MASKED
324 wrmsr
325htg_x2_nopc:
326 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
327 rdmsr
328 shr eax, 16
329 cmp al, 5
330 jb htg_x2_notherm
331 je htg_x2_nocmci
332 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
333 rdmsr
334 mov ebx, eax
335 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
336 cmp ebx, APIC_REG_LVT_MODE_NMI
337 jne htg_x2_nocmci
338 or edi, 0x10
339 or eax, APIC_REG_LVT_MASKED
340 wrmsr
341htg_x2_nocmci:
342 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
343 rdmsr
344 mov ebx, eax
345 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
346 cmp ebx, APIC_REG_LVT_MODE_NMI
347 jne htg_x2_notherm
348 or edi, 0x08
349 or eax, APIC_REG_LVT_MASKED
350 wrmsr
351htg_x2_notherm:
352 mov edx, esi ; Restore edx.
353 mov [edx + CPUMCPU.fApicDisVectors], edi
354
355htg_apic_done:
356%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
357
358 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
359 ; save MSR_IA32_SYSENTER_CS register.
360 mov ecx, MSR_IA32_SYSENTER_CS
361 mov ebx, edx ; save edx
362 rdmsr ; edx:eax <- MSR[ecx]
363 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
364 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
365 xor eax, eax ; load 0:0 to cause #GP upon sysenter
366 xor edx, edx
367 wrmsr
368 xchg ebx, edx ; restore edx
369 jmp short htg_no_sysenter
370
371ALIGNCODE(16)
372htg_no_sysenter:
373
374 FIXUP FIX_NO_SYSCALL_JMP, 0, htg_no_syscall - NAME(Start) ; this will insert a jmp htg_no_syscall if host doesn't use syscall.
375 ; clear MSR_K6_EFER_SCE.
376 mov ebx, edx ; save edx
377 mov ecx, MSR_K6_EFER
378 rdmsr ; edx:eax <- MSR[ecx]
379 and eax, ~MSR_K6_EFER_SCE
380 wrmsr
381 mov edx, ebx ; restore edx
382 jmp short htg_no_syscall
383
384ALIGNCODE(16)
385htg_no_syscall:
386
387 ;; handle use flags.
388 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
389 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
390 mov [edx + CPUMCPU.fUseFlags], esi
391
392 ; debug registers.
393 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
394 jnz htg_debug_regs_save_dr7and6
395htg_debug_regs_no:
396
397 ; control registers.
398 mov eax, cr0
399 mov [edx + CPUMCPU.Host.cr0], eax
400 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
401 ;mov [edx + CPUMCPU.Host.cr2], eax
402 mov eax, cr3
403 mov [edx + CPUMCPU.Host.cr3], eax
404 mov eax, cr4
405 mov [edx + CPUMCPU.Host.cr4], eax
406
407 ;;
408 ;; Start switching to VMM context.
409 ;;
410
411 ;
412 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
413 ; Also disable WP. (eax==cr4 now)
414 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
415 ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
416 ;
417 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
418 mov ecx, [edx + CPUMCPU.Guest.cr4]
419 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
420 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
421 ; simplify this operation a bit (and improve locality of the data).
422
423 ;
424 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
425 ; FXSAVE support on the host CPU
426 ;
427 CPUM_FROM_CPUMCPU(edx)
428 and ecx, [edx + CPUM.CR4.AndMask]
429 or eax, ecx
430 or eax, [edx + CPUM.CR4.OrMask]
431 mov cr4, eax
432
433 CPUMCPU_FROM_CPUM(edx)
434 mov eax, [edx + CPUMCPU.Guest.cr0]
435 and eax, X86_CR0_EM
436 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
437 mov cr0, eax
438
439 ; Load new gdt so we can do far jump to guest code after cr3 reload.
440 lgdt [edx + CPUMCPU.Hyper.gdtr]
441 DEBUG_CHAR('1') ; trashes esi
442
443 ; Store the hypervisor cr3 for later loading
444 mov ebp, [edx + CPUMCPU.Hyper.cr3]
445
446 ;;
447 ;; Load Intermediate memory context.
448 ;;
449 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
450 mov eax, 0ffffffffh
451 mov cr3, eax
452 DEBUG_CHAR('2') ; trashes esi
453
454%ifdef NEED_ID
455 ;;
456 ;; Jump to identity mapped location
457 ;;
458 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
459 jmp near NAME(IDEnterTarget)
460
461 ; We're now on identity mapped pages!
462ALIGNCODE(16)
463GLOBALNAME IDEnterTarget
464 DEBUG_CHAR('3')
465 mov edx, cr4
466%ifdef NEED_PAE_ON_32BIT_HOST
467 or edx, X86_CR4_PAE
468%else
469 and edx, ~X86_CR4_PAE
470%endif
471 mov eax, cr0
472 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
473 mov cr0, eax
474 DEBUG_CHAR('4')
475 mov cr4, edx
476 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
477 mov edx, 0ffffffffh
478 mov cr3, edx
479 or eax, X86_CR0_PG
480 DEBUG_CHAR('5')
481 mov cr0, eax
482 DEBUG_CHAR('6')
483%endif
484
485 ;;
486 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
487 ;;
488 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
489 jmp 0fff8h:0deadfaceh
490
491
492 ;;
493 ;; When we arrive at this label we're at the
494 ;; guest code mapping of the switching code.
495 ;;
496ALIGNCODE(16)
497GLOBALNAME FarJmpGCTarget
498 DEBUG_CHAR('-')
499 ; load final cr3 and do far jump to load cs.
500 mov cr3, ebp ; ebp set above
501 DEBUG_CHAR('0')
502
503 ;;
504 ;; We're in VMM MMU context and VMM CS is loaded.
505 ;; Setup the rest of the VMM state.
506 ;;
507 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
508 mov edx, 0ffffffffh
509 ; Activate guest IDT
510 DEBUG_CHAR('1')
511 lidt [edx + CPUMCPU.Hyper.idtr]
512 ; Load selectors
513 DEBUG_CHAR('2')
514 FIXUP FIX_HYPER_DS, 1
515 mov eax, 0ffffh
516 mov ds, eax
517 mov es, eax
518 xor eax, eax
519 mov gs, eax
520 mov fs, eax
521
522 ; Setup stack.
523 DEBUG_CHAR('3')
524 mov eax, [edx + CPUMCPU.Hyper.ss.Sel]
525 mov ss, ax
526 mov esp, [edx + CPUMCPU.Hyper.esp]
527
528 ; Restore TSS selector; must mark it as not busy before using ltr (!)
529 DEBUG_CHAR('4')
530 FIXUP FIX_GC_TSS_GDTE_DW2, 2
531 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
532 DEBUG_CHAR('5')
533 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
534 DEBUG_CHAR('6')
535
536 ; Activate the ldt (now we can safely crash).
537 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
538 DEBUG_CHAR('7')
539
540 ;; use flags.
541 mov esi, [edx + CPUMCPU.fUseFlags]
542
543 ; debug registers
544 test esi, CPUM_USE_DEBUG_REGS_HYPER
545 jnz htg_debug_regs_guest
546htg_debug_regs_guest_done:
547 DEBUG_CHAR('9')
548
549%ifdef VBOX_WITH_NMI
550 ;
551 ; Setup K7 NMI.
552 ;
553 mov esi, edx
554 ; clear all PerfEvtSeln registers
555 xor eax, eax
556 xor edx, edx
557 mov ecx, MSR_K7_PERFCTR0
558 wrmsr
559 mov ecx, MSR_K7_PERFCTR1
560 wrmsr
561 mov ecx, MSR_K7_PERFCTR2
562 wrmsr
563 mov ecx, MSR_K7_PERFCTR3
564 wrmsr
565
566 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
567 mov ecx, MSR_K7_EVNTSEL0
568 wrmsr
569 mov eax, 02329B000h
570 mov edx, 0fffffffeh ; -1.6GHz * 5
571 mov ecx, MSR_K7_PERFCTR0
572 wrmsr
573
574 FIXUP FIX_GC_APIC_BASE_32BIT, 1
575 mov eax, 0f0f0f0f0h
576 add eax, 0340h ; APIC_LVTPC
577 mov dword [eax], 0400h ; APIC_DM_NMI
578
579 xor edx, edx
580 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
581 mov ecx, MSR_K7_EVNTSEL0
582 wrmsr
583
584 mov edx, esi
585%endif
586
587 ; General registers (sans edx).
588 mov eax, [edx + CPUMCPU.Hyper.eax]
589 mov ebx, [edx + CPUMCPU.Hyper.ebx]
590 mov ecx, [edx + CPUMCPU.Hyper.ecx]
591 mov ebp, [edx + CPUMCPU.Hyper.ebp]
592 mov esi, [edx + CPUMCPU.Hyper.esi]
593 mov edi, [edx + CPUMCPU.Hyper.edi]
594 DEBUG_S_CHAR('!')
595
596 ;;
597 ;; Return to the VMM code which either called the switcher or
598 ;; the code set up to run by HC.
599 ;;
600 push dword [edx + CPUMCPU.Hyper.eflags]
601 push cs
602 push dword [edx + CPUMCPU.Hyper.eip]
603 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
604
605%ifdef DEBUG_STUFF
606 COM_S_PRINT ';eip='
607 push eax
608 mov eax, [esp + 8]
609 COM_S_DWORD_REG eax
610 pop eax
611 COM_S_CHAR ';'
612%endif
613%ifdef VBOX_WITH_STATISTICS
614 push edx
615 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
616 mov edx, 0ffffffffh
617 STAM_PROFILE_ADV_STOP edx
618 pop edx
619%endif
620
621 iret ; Use iret to make debugging and TF/RF work.
622
623;;
624; Detour for saving the host DR7 and DR6.
625; esi and edx must be preserved.
626htg_debug_regs_save_dr7and6:
627DEBUG_S_CHAR('s');
628 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
629 mov [edx + CPUMCPU.Host.dr7], eax
630 xor eax, eax ; clear everything. (bit 12? is read as 1...)
631 mov dr7, eax
632 mov eax, dr6 ; just in case we save the state register too.
633 mov [edx + CPUMCPU.Host.dr6], eax
634 jmp htg_debug_regs_no
635
636;;
637; Detour for saving host DR0-3 and loading hypervisor debug registers.
638; esi and edx must be preserved.
639htg_debug_regs_guest:
640 DEBUG_S_CHAR('D')
641 DEBUG_S_CHAR('R')
642 DEBUG_S_CHAR('x')
643 ; save host DR0-3.
644 mov eax, dr0
645 mov [edx + CPUMCPU.Host.dr0], eax
646 mov ebx, dr1
647 mov [edx + CPUMCPU.Host.dr1], ebx
648 mov ecx, dr2
649 mov [edx + CPUMCPU.Host.dr2], ecx
650 mov eax, dr3
651 mov [edx + CPUMCPU.Host.dr3], eax
652 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
653
654 ; load hyper DR0-7
655 mov ebx, [edx + CPUMCPU.Hyper.dr]
656 mov dr0, ebx
657 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
658 mov dr1, ecx
659 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
660 mov dr2, eax
661 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
662 mov dr3, ebx
663 mov ecx, X86_DR6_INIT_VAL
664 mov dr6, ecx
665 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
666 mov dr7, eax
667 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
668 jmp htg_debug_regs_guest_done
669
670ENDPROC vmmR0ToRawModeAsm
671
672
673;;
674; Trampoline for doing a call when starting the hyper visor execution.
675;
676; Push any arguments to the routine.
677; Push the argument frame size (cArg * 4).
678; Push the call target (_cdecl convention).
679; Push the address of this routine.
680;
681;
682ALIGNCODE(16)
683BEGINPROC vmmRCCallTrampoline
684%ifdef DEBUG_STUFF
685 COM_S_CHAR 'c'
686 COM_S_CHAR 't'
687 COM_S_CHAR '!'
688%endif
689
690 ; call routine
691 pop eax ; call address
692 pop edi ; argument count.
693%ifdef DEBUG_STUFF
694 COM_S_PRINT ';eax='
695 COM_S_DWORD_REG eax
696 COM_S_CHAR ';'
697%endif
698 call eax ; do call
699 add esp, edi ; cleanup stack
700
701 ; return to the host context.
702%ifdef DEBUG_STUFF
703 COM_S_CHAR '`'
704%endif
705.to_host_again:
706 call NAME(vmmRCToHostAsm)
707 mov eax, VERR_VMM_SWITCHER_IPE_1
708 jmp .to_host_again
709ENDPROC vmmRCCallTrampoline
710
711
712
713;;
714; The C interface.
715;
716ALIGNCODE(16)
717BEGINPROC vmmRCToHost
718%ifdef DEBUG_STUFF
719 push esi
720 COM_NEWLINE
721 DEBUG_CHAR('b')
722 DEBUG_CHAR('a')
723 DEBUG_CHAR('c')
724 DEBUG_CHAR('k')
725 DEBUG_CHAR('!')
726 COM_NEWLINE
727 pop esi
728%endif
729 mov eax, [esp + 4]
730 jmp NAME(vmmRCToHostAsm)
731ENDPROC vmmRCToHost
732
733
734;;
735; vmmRCToHostAsmNoReturn
736;
737; This is an entry point used by TRPM when dealing with raw-mode traps,
738; i.e. traps in the hypervisor code. This will not return and saves no
739; state, because the caller has already saved the state.
740;
741; @param eax Return code.
742;
743ALIGNCODE(16)
744BEGINPROC vmmRCToHostAsmNoReturn
745 DEBUG_S_CHAR('%')
746
747%ifdef VBOX_WITH_STATISTICS
748 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
749 mov edx, 0ffffffffh
750 STAM32_PROFILE_ADV_STOP edx
751
752 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
753 mov edx, 0ffffffffh
754 STAM32_PROFILE_ADV_START edx
755
756 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
757 mov edx, 0ffffffffh
758 STAM32_PROFILE_ADV_START edx
759%endif
760
761 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
762 mov edx, 0ffffffffh
763
764 jmp vmmRCToHostAsm_SaveNoGeneralRegs
765ENDPROC vmmRCToHostAsmNoReturn
766
767
768;;
769; vmmRCToHostAsm
770;
771; This is an entry point used by TRPM to return to host context when an
772; interrupt occured or an guest trap needs handling in host context. It
773; is also used by the C interface above.
774;
775; The hypervisor context is saved and it will return to the caller if
776; host context so desires.
777;
778; @param eax Return code.
779; @uses eax, edx, ecx (or it may use them in the future)
780;
781ALIGNCODE(16)
782BEGINPROC vmmRCToHostAsm
783 DEBUG_S_CHAR('%')
784 push edx
785
786%ifdef VBOX_WITH_STATISTICS
787 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
788 mov edx, 0ffffffffh
789 STAM_PROFILE_ADV_STOP edx
790
791 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
792 mov edx, 0ffffffffh
793 STAM_PROFILE_ADV_START edx
794
795 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
796 mov edx, 0ffffffffh
797 STAM_PROFILE_ADV_START edx
798%endif
799
800 ;
801 ; Load the CPUMCPU pointer.
802 ;
803 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
804 mov edx, 0ffffffffh
805
806 ; Save register context.
807 pop dword [edx + CPUMCPU.Hyper.edx]
808 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
809 mov dword [edx + CPUMCPU.Hyper.esp], esp
810 mov dword [edx + CPUMCPU.Hyper.eax], eax
811 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
812 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
813 mov dword [edx + CPUMCPU.Hyper.esi], esi
814 mov dword [edx + CPUMCPU.Hyper.edi], edi
815 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
816
817 ; special registers which may change.
818vmmRCToHostAsm_SaveNoGeneralRegs:
819 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
820 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
821 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
822
823 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
824 ; FPU context is saved before restore of host saving (another) branch.
825
826 ; Disable debug regsiters if active so they cannot trigger while switching.
827 test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
828 jz .gth_disabled_dr7
829 mov eax, X86_DR7_INIT_VAL
830 mov dr7, eax
831.gth_disabled_dr7:
832
833%ifdef VBOX_WITH_NMI
834 ;
835 ; Disarm K7 NMI.
836 ;
837 mov esi, edx
838
839 xor edx, edx
840 xor eax, eax
841 mov ecx, MSR_K7_EVNTSEL0
842 wrmsr
843
844 mov edx, esi
845%endif
846
847
848 ;;
849 ;; Load Intermediate memory context.
850 ;;
851 mov ecx, [edx + CPUMCPU.Host.cr3]
852 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
853 mov eax, 0ffffffffh
854 mov cr3, eax
855 DEBUG_CHAR('?')
856
857 ;; We're now in intermediate memory context!
858%ifdef NEED_ID
859 ;;
860 ;; Jump to identity mapped location
861 ;;
862 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
863 jmp near NAME(IDExitTarget)
864
865 ; We're now on identity mapped pages!
866ALIGNCODE(16)
867GLOBALNAME IDExitTarget
868 DEBUG_CHAR('1')
869 mov edx, cr4
870%ifdef NEED_PAE_ON_32BIT_HOST
871 and edx, ~X86_CR4_PAE
872%else
873 or edx, X86_CR4_PAE
874%endif
875 mov eax, cr0
876 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
877 mov cr0, eax
878 DEBUG_CHAR('2')
879 mov cr4, edx
880 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
881 mov edx, 0ffffffffh
882 mov cr3, edx
883 or eax, X86_CR0_PG
884 DEBUG_CHAR('3')
885 mov cr0, eax
886 DEBUG_CHAR('4')
887
888 ;;
889 ;; Jump to HC mapping.
890 ;;
891 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
892 jmp near NAME(HCExitTarget)
893%else
894 ;;
895 ;; Jump to HC mapping.
896 ;;
897 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
898 jmp near NAME(HCExitTarget)
899%endif
900
901
902 ;
903 ; When we arrive here we're at the host context
904 ; mapping of the switcher code.
905 ;
906ALIGNCODE(16)
907GLOBALNAME HCExitTarget
908 DEBUG_CHAR('9')
909 ; load final cr3
910 mov cr3, ecx
911 DEBUG_CHAR('@')
912
913
914 ;;
915 ;; Restore Host context.
916 ;;
917 ; Load CPUM pointer into edx
918 FIXUP FIX_HC_CPUM_OFF, 1, 0
919 mov edx, 0ffffffffh
920 CPUMCPU_FROM_CPUM(edx)
921 ; activate host gdt and idt
922 lgdt [edx + CPUMCPU.Host.gdtr]
923 DEBUG_CHAR('0')
924 lidt [edx + CPUMCPU.Host.idtr]
925 DEBUG_CHAR('1')
926 ; Restore TSS selector; must mark it as not busy before using ltr (!)
927%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
928 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
929 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
930 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
931 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
932 ltr word [edx + CPUMCPU.Host.tr]
933%else
934 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
935 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
936 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
937 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
938 mov ebx, ecx ; save original value
939 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
940 mov [eax + 4], ecx ; not using xchg here is paranoia..
941 ltr word [edx + CPUMCPU.Host.tr]
942 xchg [eax + 4], ebx ; using xchg is paranoia too...
943%endif
944 ; activate ldt
945 DEBUG_CHAR('2')
946 lldt [edx + CPUMCPU.Host.ldtr]
947 ; Restore segment registers
948 mov eax, [edx + CPUMCPU.Host.ds]
949 mov ds, eax
950 mov eax, [edx + CPUMCPU.Host.es]
951 mov es, eax
952 mov eax, [edx + CPUMCPU.Host.fs]
953 mov fs, eax
954 mov eax, [edx + CPUMCPU.Host.gs]
955 mov gs, eax
956 ; restore stack
957 lss esp, [edx + CPUMCPU.Host.esp]
958
959
960 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
961 ; restore MSR_IA32_SYSENTER_CS register.
962 mov ecx, MSR_IA32_SYSENTER_CS
963 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
964 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
965 xchg edx, ebx ; save/load edx
966 wrmsr ; MSR[ecx] <- edx:eax
967 xchg edx, ebx ; restore edx
968 jmp short gth_sysenter_no
969
970ALIGNCODE(16)
971gth_sysenter_no:
972
973 FIXUP FIX_NO_SYSCALL_JMP, 0, gth_syscall_no - NAME(Start) ; this will insert a jmp gth_syscall_no if host doesn't use syscall.
974 ; set MSR_K6_EFER_SCE.
975 mov ebx, edx ; save edx
976 mov ecx, MSR_K6_EFER
977 rdmsr
978 or eax, MSR_K6_EFER_SCE
979 wrmsr
980 mov edx, ebx ; restore edx
981 jmp short gth_syscall_no
982
983ALIGNCODE(16)
984gth_syscall_no:
985
986 ; Restore FPU if guest has used it.
987 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
988 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
989 test esi, CPUM_USED_FPU
990 jz near gth_fpu_no
991 mov ecx, cr0
992 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
993 mov cr0, ecx
994
995 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
996 fxsave [edx + CPUMCPU.Guest.fpu]
997 fxrstor [edx + CPUMCPU.Host.fpu]
998 jmp near gth_fpu_no
999
1000gth_no_fxsave:
1001 fnsave [edx + CPUMCPU.Guest.fpu]
1002 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
1003 not eax ; 1 means exception ignored (6 LS bits)
1004 and eax, byte 03Fh ; 6 LS bits only
1005 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
1006 jz gth_no_exceptions_pending
1007
1008 ; technically incorrect, but we certainly don't want any exceptions now!!
1009 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
1010
1011gth_no_exceptions_pending:
1012 frstor [edx + CPUMCPU.Host.fpu]
1013 jmp short gth_fpu_no
1014
1015ALIGNCODE(16)
1016gth_fpu_no:
1017
1018 ; Control registers.
1019 ; Would've liked to have these higher up in case of crashes, but
1020 ; the fpu stuff must be done before we restore cr0.
1021 mov ecx, [edx + CPUMCPU.Host.cr4]
1022 mov cr4, ecx
1023 mov ecx, [edx + CPUMCPU.Host.cr0]
1024 mov cr0, ecx
1025 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1026 ;mov cr2, ecx
1027
1028 ; restore debug registers (if modified) (esi must still be fUseFlags!)
1029 ; (must be done after cr4 reload because of the debug extension.)
1030 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST
1031 jnz gth_debug_regs_restore
1032gth_debug_regs_done:
1033
1034 ; restore general registers.
1035 mov eax, edi ; restore return code. eax = return code !!
1036 mov edi, [edx + CPUMCPU.Host.edi]
1037 mov esi, [edx + CPUMCPU.Host.esi]
1038 mov ebx, [edx + CPUMCPU.Host.ebx]
1039 mov ebp, [edx + CPUMCPU.Host.ebp]
1040 push dword [edx + CPUMCPU.Host.eflags]
1041 popfd
1042
1043%ifdef DEBUG_STUFF
1044; COM_S_CHAR '4'
1045%endif
1046 retf
1047
1048;;
1049; Detour for restoring the host debug registers.
1050; edx and edi must be preserved.
1051gth_debug_regs_restore:
1052 DEBUG_S_CHAR('d')
1053 mov eax, dr7 ; Some DR7 paranoia first...
1054 mov ecx, X86_DR7_INIT_VAL
1055 cmp eax, ecx
1056 je .gth_debug_skip_dr7_disabling
1057 mov dr7, ecx
1058.gth_debug_skip_dr7_disabling:
1059 test esi, CPUM_USED_DEBUG_REGS_HOST
1060 jz .gth_debug_regs_dr7
1061
1062 DEBUG_S_CHAR('r')
1063 mov eax, [edx + CPUMCPU.Host.dr0]
1064 mov dr0, eax
1065 mov ebx, [edx + CPUMCPU.Host.dr1]
1066 mov dr1, ebx
1067 mov ecx, [edx + CPUMCPU.Host.dr2]
1068 mov dr2, ecx
1069 mov eax, [edx + CPUMCPU.Host.dr3]
1070 mov dr3, eax
1071.gth_debug_regs_dr7:
1072 mov ebx, [edx + CPUMCPU.Host.dr6]
1073 mov dr6, ebx
1074 mov ecx, [edx + CPUMCPU.Host.dr7]
1075 mov dr7, ecx
1076
1077 and dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
1078 jmp gth_debug_regs_done
1079
1080ENDPROC vmmRCToHostAsm
1081
1082
1083GLOBALNAME End
1084;
1085; The description string (in the text section).
1086;
1087NAME(Description):
1088 db SWITCHER_DESCRIPTION
1089 db 0
1090
1091extern NAME(Relocate)
1092
1093;
1094; End the fixup records.
1095;
1096BEGINDATA
1097 db FIX_THE_END ; final entry.
1098GLOBALNAME FixupsEnd
1099
1100;;
1101; The switcher definition structure.
1102ALIGNDATA(16)
1103GLOBALNAME Def
1104 istruc VMMSWITCHERDEF
1105 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1106 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1107 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1108 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1109 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1110 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1111 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1112 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1113 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1114 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1115 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1116 ; disasm help
1117 at VMMSWITCHERDEF.offHCCode0, dd 0
1118%ifdef NEED_ID
1119 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1120%else
1121 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
1122%endif
1123 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1124 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1125%ifdef NEED_ID
1126 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1127 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
1128 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1129 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1130%else
1131 at VMMSWITCHERDEF.offIDCode0, dd 0
1132 at VMMSWITCHERDEF.cbIDCode0, dd 0
1133 at VMMSWITCHERDEF.offIDCode1, dd 0
1134 at VMMSWITCHERDEF.cbIDCode1, dd 0
1135%endif
1136 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1137%ifdef NEED_ID
1138 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1139%else
1140 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1141%endif
1142
1143 iend
1144
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette