VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 20961

Last change on this file since 20961 was 18927, checked in by vboxsync, 16 years ago

Big step to separate VMM data structures for guest SMP. (pgm, em)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.7 KB
Line 
1; $Id: PAEand32Bit.mac 18927 2009-04-16 11:41:38Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;%define DEBUG_STUFF 1
23
24;*******************************************************************************
25;* Header Files *
26;*******************************************************************************
27%include "VBox/asmdefs.mac"
28%include "VBox/x86.mac"
29%include "VBox/cpum.mac"
30%include "VBox/stam.mac"
31%include "VBox/vm.mac"
32%include "CPUMInternal.mac"
33%include "VMMSwitcher/VMMSwitcher.mac"
34
35%undef NEED_ID
36%ifdef NEED_PAE_ON_32BIT_HOST
37%define NEED_ID
38%endif
39%ifdef NEED_32BIT_ON_PAE_HOST
40%define NEED_ID
41%endif
42
43
44
45;
46; Start the fixup records
47; We collect the fixups in the .data section as we go along
48; It is therefore VITAL that no-one is using the .data section
49; for anything else between 'Start' and 'End'.
50;
51BEGINDATA
52GLOBALNAME Fixups
53
54
55
56BEGINCODE
57GLOBALNAME Start
58
59;;
60; The C interface.
61;
62BEGINPROC vmmR0HostToGuest
63
64%ifdef DEBUG_STUFF
65 COM_S_NEWLINE
66 COM_S_CHAR '^'
67%endif
68
69%ifdef VBOX_WITH_STATISTICS
70 ;
71 ; Switcher stats.
72 ;
73 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
74 mov edx, 0ffffffffh
75 STAM_PROFILE_ADV_START edx
76%endif
77
78 ;
79 ; Call worker.
80 ;
81 FIXUP FIX_HC_CPUM_OFF, 1, 0
82 mov edx, 0ffffffffh
83 push cs ; allow for far return and restore cs correctly.
84 call NAME(vmmR0HostToGuestAsm)
85
86%ifdef VBOX_WITH_STATISTICS
87 ;
88 ; Switcher stats.
89 ;
90 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
91 mov edx, 0ffffffffh
92 STAM_PROFILE_ADV_STOP edx
93%endif
94
95 ret
96ENDPROC vmmR0HostToGuest
97
98
99
100; *****************************************************************************
101; vmmR0HostToGuestAsm
102;
103; Phase one of the switch from host to guest context (host MMU context)
104;
105; INPUT:
106; - edx virtual address of CPUM structure (valid in host context)
107;
108; USES/DESTROYS:
109; - eax, ecx, edx
110;
111; ASSUMPTION:
112; - current CS and DS selectors are wide open
113;
114; *****************************************************************************
115ALIGNCODE(16)
116BEGINPROC vmmR0HostToGuestAsm
117 ;;
118 ;; Save CPU host context
119 ;; Skip eax, edx and ecx as these are not preserved over calls.
120 ;;
121 CPUMCPU_FROM_CPUM(edx)
122 ; general registers.
123 mov [edx + CPUMCPU.Host.ebx], ebx
124 mov [edx + CPUMCPU.Host.edi], edi
125 mov [edx + CPUMCPU.Host.esi], esi
126 mov [edx + CPUMCPU.Host.esp], esp
127 mov [edx + CPUMCPU.Host.ebp], ebp
128 ; selectors.
129 mov [edx + CPUMCPU.Host.ds], ds
130 mov [edx + CPUMCPU.Host.es], es
131 mov [edx + CPUMCPU.Host.fs], fs
132 mov [edx + CPUMCPU.Host.gs], gs
133 mov [edx + CPUMCPU.Host.ss], ss
134 ; special registers.
135 sldt [edx + CPUMCPU.Host.ldtr]
136 sidt [edx + CPUMCPU.Host.idtr]
137 sgdt [edx + CPUMCPU.Host.gdtr]
138 str [edx + CPUMCPU.Host.tr]
139 ; flags
140 pushfd
141 pop dword [edx + CPUMCPU.Host.eflags]
142
143 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
144 ; save MSR_IA32_SYSENTER_CS register.
145 mov ecx, MSR_IA32_SYSENTER_CS
146 mov ebx, edx ; save edx
147 rdmsr ; edx:eax <- MSR[ecx]
148 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
149 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
150 xor eax, eax ; load 0:0 to cause #GP upon sysenter
151 xor edx, edx
152 wrmsr
153 xchg ebx, edx ; restore edx
154 jmp short htg_no_sysenter
155
156ALIGNCODE(16)
157htg_no_sysenter:
158
159 ;; handle use flags.
160 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
161 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
162 mov [edx + CPUMCPU.fUseFlags], esi
163
164 ; debug registers.
165 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
166 jz htg_debug_regs_no
167 jmp htg_debug_regs_save_dr7and6
168htg_debug_regs_no:
169
170 ; control registers.
171 mov eax, cr0
172 mov [edx + CPUMCPU.Host.cr0], eax
173 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
174 ;mov [edx + CPUMCPU.Host.cr2], eax
175 mov eax, cr3
176 mov [edx + CPUMCPU.Host.cr3], eax
177 mov eax, cr4
178 mov [edx + CPUMCPU.Host.cr4], eax
179
180 ;;
181 ;; Start switching to VMM context.
182 ;;
183
184 ;
185 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
186 ; Also disable WP. (eax==cr4 now)
187 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
188 ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
189 ;
190 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
191 mov ecx, [edx + CPUMCPU.Guest.cr4]
192 ;; @todo Switcher cleanup: Determin base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
193 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
194 ; simplify this operation a bit (and improve locality of the data).
195
196 ;
197 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
198 ; FXSAVE support on the host CPU
199 ;
200 CPUM_FROM_CPUMCPU(edx)
201 and ecx, [edx + CPUM.CR4.AndMask]
202 or eax, ecx
203 or eax, [edx + CPUM.CR4.OrMask]
204 mov cr4, eax
205
206 CPUMCPU_FROM_CPUM(edx)
207 mov eax, [edx + CPUMCPU.Guest.cr0]
208 and eax, X86_CR0_EM
209 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
210 mov cr0, eax
211
212 ; Load new gdt so we can do far jump to guest code after cr3 reload.
213 lgdt [edx + CPUMCPU.Hyper.gdtr]
214 DEBUG_CHAR('1') ; trashes esi
215
216 ; Store the hypervisor cr3 for later loading
217 mov ebp, [edx + CPUMCPU.Hyper.cr3]
218
219 ;;
220 ;; Load Intermediate memory context.
221 ;;
222 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
223 mov eax, 0ffffffffh
224 mov cr3, eax
225 DEBUG_CHAR('2') ; trashes esi
226
227%ifdef NEED_ID
228 ;;
229 ;; Jump to identity mapped location
230 ;;
231 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
232 jmp near NAME(IDEnterTarget)
233
234 ; We're now on identity mapped pages!
235ALIGNCODE(16)
236GLOBALNAME IDEnterTarget
237 DEBUG_CHAR('3')
238 mov edx, cr4
239%ifdef NEED_PAE_ON_32BIT_HOST
240 or edx, X86_CR4_PAE
241%else
242 and edx, ~X86_CR4_PAE
243%endif
244 mov eax, cr0
245 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
246 mov cr0, eax
247 DEBUG_CHAR('4')
248 mov cr4, edx
249 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
250 mov edx, 0ffffffffh
251 mov cr3, edx
252 or eax, X86_CR0_PG
253 DEBUG_CHAR('5')
254 mov cr0, eax
255 DEBUG_CHAR('6')
256%endif
257
258 ;;
259 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
260 ;;
261 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
262 jmp 0fff8h:0deadfaceh
263
264
265 ;;
266 ;; When we arrive at this label we're at the
267 ;; guest code mapping of the switching code.
268 ;;
269ALIGNCODE(16)
270GLOBALNAME FarJmpGCTarget
271 DEBUG_CHAR('-')
272 ; load final cr3 and do far jump to load cs.
273 mov cr3, ebp ; ebp set above
274 DEBUG_CHAR('0')
275
276 ;;
277 ;; We're in VMM MMU context and VMM CS is loaded.
278 ;; Setup the rest of the VMM state.
279 ;;
280 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
281 mov edx, 0ffffffffh
282 ; Activate guest IDT
283 DEBUG_CHAR('1')
284 lidt [edx + CPUMCPU.Hyper.idtr]
285 ; Load selectors
286 DEBUG_CHAR('2')
287 FIXUP FIX_HYPER_DS, 1
288 mov eax, 0ffffh
289 mov ds, eax
290 mov es, eax
291 xor eax, eax
292 mov gs, eax
293 mov fs, eax
294
295 ; Setup stack; use the lss_esp, ss pair for lss
296 DEBUG_CHAR('3')
297 mov eax, [edx + CPUMCPU.Hyper.esp]
298 mov [edx + CPUMCPU.Hyper.lss_esp], eax
299 lss esp, [edx + CPUMCPU.Hyper.lss_esp]
300
301 ; Restore TSS selector; must mark it as not busy before using ltr (!)
302 DEBUG_CHAR('4')
303 FIXUP FIX_GC_TSS_GDTE_DW2, 2
304 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
305 DEBUG_CHAR('5')
306 ltr word [edx + CPUMCPU.Hyper.tr]
307 DEBUG_CHAR('6')
308
309 ; Activate the ldt (now we can safely crash).
310 lldt [edx + CPUMCPU.Hyper.ldtr]
311 DEBUG_CHAR('7')
312
313 ;; use flags.
314 mov esi, [edx + CPUMCPU.fUseFlags]
315
316 ; debug registers
317 test esi, CPUM_USE_DEBUG_REGS
318 jz htg_debug_regs_guest_no
319 jmp htg_debug_regs_guest
320htg_debug_regs_guest_no:
321 DEBUG_CHAR('9')
322
323%ifdef VBOX_WITH_NMI
324 ;
325 ; Setup K7 NMI.
326 ;
327 mov esi, edx
328 ; clear all PerfEvtSeln registers
329 xor eax, eax
330 xor edx, edx
331 mov ecx, MSR_K7_PERFCTR0
332 wrmsr
333 mov ecx, MSR_K7_PERFCTR1
334 wrmsr
335 mov ecx, MSR_K7_PERFCTR2
336 wrmsr
337 mov ecx, MSR_K7_PERFCTR3
338 wrmsr
339
340 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
341 mov ecx, MSR_K7_EVNTSEL0
342 wrmsr
343 mov eax, 02329B000h
344 mov edx, 0fffffffeh ; -1.6GHz * 5
345 mov ecx, MSR_K7_PERFCTR0
346 wrmsr
347
348 FIXUP FIX_GC_APIC_BASE_32BIT, 1
349 mov eax, 0f0f0f0f0h
350 add eax, 0340h ; APIC_LVTPC
351 mov dword [eax], 0400h ; APIC_DM_NMI
352
353 xor edx, edx
354 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
355 mov ecx, MSR_K7_EVNTSEL0
356 wrmsr
357
358 mov edx, esi
359%endif
360
361 ; General registers.
362 mov ebx, [edx + CPUMCPU.Hyper.ebx]
363 mov ebp, [edx + CPUMCPU.Hyper.ebp]
364 mov esi, [edx + CPUMCPU.Hyper.esi]
365 mov edi, [edx + CPUMCPU.Hyper.edi]
366 push dword [edx + CPUMCPU.Hyper.eflags]
367 popfd
368 DEBUG_CHAR('!')
369
370 ;;
371 ;; Return to the VMM code which either called the switcher or
372 ;; the code set up to run by HC.
373 ;;
374%ifdef DEBUG_STUFF
375 COM_S_PRINT ';eip='
376 mov eax, [edx + CPUMCPU.Hyper.eip]
377 COM_S_DWORD_REG eax
378 COM_S_CHAR ';'
379%endif
380 mov eax, [edx + CPUMCPU.Hyper.eip]
381 ; callees expect CPUM ptr
382 CPUM_FROM_CPUMCPU(edx)
383
384%ifdef VBOX_WITH_STATISTICS
385 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
386 mov edx, 0ffffffffh
387 STAM_PROFILE_ADV_STOP edx
388 FIXUP FIX_GC_CPUM_OFF, 1, 0
389 mov edx, 0ffffffffh
390%endif
391 jmp eax
392
393;;
394; Detour for saving the host DR7 and DR6.
395; esi and edx must be preserved.
396htg_debug_regs_save_dr7and6:
397DEBUG_S_CHAR('s');
398 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
399 mov [edx + CPUMCPU.Host.dr7], eax
400 xor eax, eax ; clear everything. (bit 12? is read as 1...)
401 mov dr7, eax
402 mov eax, dr6 ; just in case we save the state register too.
403 mov [edx + CPUMCPU.Host.dr6], eax
404 jmp htg_debug_regs_no
405
406;;
407; Detour for saving host DR0-3 and loading hypervisor debug registers.
408; esi and edx must be preserved.
409htg_debug_regs_guest:
410 DEBUG_S_CHAR('D')
411 DEBUG_S_CHAR('R')
412 DEBUG_S_CHAR('x')
413 ; save host DR0-3.
414 mov eax, dr0
415 mov [edx + CPUMCPU.Host.dr0], eax
416 mov ebx, dr1
417 mov [edx + CPUMCPU.Host.dr1], ebx
418 mov ecx, dr2
419 mov [edx + CPUMCPU.Host.dr2], ecx
420 mov eax, dr3
421 mov [edx + CPUMCPU.Host.dr3], eax
422
423 ; load hyper DR0-7
424 mov ebx, [edx + CPUMCPU.Hyper.dr]
425 mov dr0, ebx
426 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
427 mov dr1, ecx
428 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
429 mov dr2, eax
430 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
431 mov dr3, ebx
432 ;mov eax, [edx + CPUMCPU.Hyper.dr + 8*6]
433 mov ecx, 0ffff0ff0h
434 mov dr6, ecx
435 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
436 mov dr7, eax
437 jmp htg_debug_regs_guest_no
438
439ENDPROC vmmR0HostToGuestAsm
440
441
442;;
443; Trampoline for doing a call when starting the hyper visor execution.
444;
445; Push any arguments to the routine.
446; Push the argument frame size (cArg * 4).
447; Push the call target (_cdecl convention).
448; Push the address of this routine.
449;
450;
451ALIGNCODE(16)
452BEGINPROC vmmGCCallTrampoline
453%ifdef DEBUG_STUFF
454 COM_S_CHAR 'c'
455 COM_S_CHAR 't'
456 COM_S_CHAR '!'
457%endif
458
459 ; call routine
460 pop eax ; call address
461 mov esi, edx ; save edx
462 pop edi ; argument count.
463%ifdef DEBUG_STUFF
464 COM_S_PRINT ';eax='
465 COM_S_DWORD_REG eax
466 COM_S_CHAR ';'
467%endif
468 call eax ; do call
469 add esp, edi ; cleanup stack
470
471 ; return to the host context.
472 push byte 0 ; eip
473 mov edx, esi ; CPUM pointer
474
475%ifdef DEBUG_STUFF
476 COM_S_CHAR '`'
477%endif
478 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode.
479ENDPROC vmmGCCallTrampoline
480
481
482
483;;
484; The C interface.
485;
486ALIGNCODE(16)
487BEGINPROC vmmGCGuestToHost
488%ifdef DEBUG_STUFF
489 push esi
490 COM_NEWLINE
491 DEBUG_CHAR('b')
492 DEBUG_CHAR('a')
493 DEBUG_CHAR('c')
494 DEBUG_CHAR('k')
495 DEBUG_CHAR('!')
496 COM_NEWLINE
497 pop esi
498%endif
499 mov eax, [esp + 4]
500 jmp NAME(VMMGCGuestToHostAsm)
501ENDPROC vmmGCGuestToHost
502
503
504;;
505; VMMGCGuestToHostAsmGuestCtx
506;
507; Switches from Guest Context to Host Context.
508; Of course it's only called from within the GC.
509;
510; @param eax Return code.
511; @param esp + 4 Pointer to CPUMCTXCORE.
512;
513; @remark ASSUMES interrupts disabled.
514;
515ALIGNCODE(16)
516BEGINPROC VMMGCGuestToHostAsmGuestCtx
517 DEBUG_CHAR('~')
518
519%ifdef VBOX_WITH_STATISTICS
520 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
521 mov edx, 0ffffffffh
522 STAM_PROFILE_ADV_STOP edx
523
524 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
525 mov edx, 0ffffffffh
526 STAM_PROFILE_ADV_START edx
527
528 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
529 mov edx, 0ffffffffh
530 STAM_PROFILE_ADV_START edx
531%endif
532
533 ;
534 ; Load the CPUMCPU pointer.
535 ;
536 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
537 mov edx, 0ffffffffh
538
539 ; Skip return address (assumes called!)
540 lea esp, [esp + 4]
541
542 ;
543 ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
544 ;
545 ; general purpose registers.
546 push eax
547
548 mov eax, [esp + 4 + CPUMCTXCORE.eax]
549 mov [edx + CPUMCPU.Guest.eax], eax
550 mov eax, [esp + 4 + CPUMCTXCORE.ecx]
551 mov [edx + CPUMCPU.Guest.ecx], eax
552 mov eax, [esp + 4 + CPUMCTXCORE.edx]
553 mov [edx + CPUMCPU.Guest.edx], eax
554 mov eax, [esp + 4 + CPUMCTXCORE.ebx]
555 mov [edx + CPUMCPU.Guest.ebx], eax
556 mov eax, [esp + 4 + CPUMCTXCORE.esp]
557 mov [edx + CPUMCPU.Guest.esp], eax
558 mov eax, [esp + 4 + CPUMCTXCORE.ebp]
559 mov [edx + CPUMCPU.Guest.ebp], eax
560 mov eax, [esp + 4 + CPUMCTXCORE.esi]
561 mov [edx + CPUMCPU.Guest.esi], eax
562 mov eax, [esp + 4 + CPUMCTXCORE.edi]
563 mov [edx + CPUMCPU.Guest.edi], eax
564 mov eax, dword [esp + 4 + CPUMCTXCORE.es]
565 mov dword [edx + CPUMCPU.Guest.es], eax
566 mov eax, dword [esp + 4 + CPUMCTXCORE.cs]
567 mov dword [edx + CPUMCPU.Guest.cs], eax
568 mov eax, dword [esp + 4 + CPUMCTXCORE.ss]
569 mov dword [edx + CPUMCPU.Guest.ss], eax
570 mov eax, dword [esp + 4 + CPUMCTXCORE.ds]
571 mov dword [edx + CPUMCPU.Guest.ds], eax
572 mov eax, dword [esp + 4 + CPUMCTXCORE.fs]
573 mov dword [edx + CPUMCPU.Guest.fs], eax
574 mov eax, dword [esp + 4 + CPUMCTXCORE.gs]
575 mov dword [edx + CPUMCPU.Guest.gs], eax
576 mov eax, [esp + 4 + CPUMCTXCORE.eflags]
577 mov dword [edx + CPUMCPU.Guest.eflags], eax
578 mov eax, [esp + 4 + CPUMCTXCORE.eip]
579 mov dword [edx + CPUMCPU.Guest.eip], eax
580 pop eax
581
582 add esp, CPUMCTXCORE_size ; skip CPUMCTXCORE structure
583
584 jmp vmmGCGuestToHostAsm_EIPDone
585ENDPROC VMMGCGuestToHostAsmGuestCtx
586
587
588;;
589; VMMGCGuestToHostAsmHyperCtx
590;
591; This is an alternative entry point which we'll be using
592; when the we have the hypervisor context and need to save
593; that before going to the host.
594;
595; This is typically useful when abandoning the hypervisor
596; because of a trap and want the trap state to be saved.
597;
598; @param eax Return code.
599; @param ecx Points to CPUMCTXCORE.
600; @uses eax,edx,ecx
601ALIGNCODE(16)
602BEGINPROC VMMGCGuestToHostAsmHyperCtx
603 DEBUG_CHAR('#')
604
605%ifdef VBOX_WITH_STATISTICS
606 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
607 mov edx, 0ffffffffh
608 STAM_PROFILE_ADV_STOP edx
609
610 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
611 mov edx, 0ffffffffh
612 STAM_PROFILE_ADV_START edx
613
614 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
615 mov edx, 0ffffffffh
616 STAM_PROFILE_ADV_START edx
617%endif
618
619 ;
620 ; Load the CPUM pointer.
621 ;
622 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
623 mov edx, 0ffffffffh
624
625 push eax ; save return code.
626 ; general purpose registers
627 mov eax, [ecx + CPUMCTXCORE.edi]
628 mov [edx + CPUMCPU.Hyper.edi], eax
629 mov eax, [ecx + CPUMCTXCORE.esi]
630 mov [edx + CPUMCPU.Hyper.esi], eax
631 mov eax, [ecx + CPUMCTXCORE.ebp]
632 mov [edx + CPUMCPU.Hyper.ebp], eax
633 mov eax, [ecx + CPUMCTXCORE.eax]
634 mov [edx + CPUMCPU.Hyper.eax], eax
635 mov eax, [ecx + CPUMCTXCORE.ebx]
636 mov [edx + CPUMCPU.Hyper.ebx], eax
637 mov eax, [ecx + CPUMCTXCORE.edx]
638 mov [edx + CPUMCPU.Hyper.edx], eax
639 mov eax, [ecx + CPUMCTXCORE.ecx]
640 mov [edx + CPUMCPU.Hyper.ecx], eax
641 mov eax, [ecx + CPUMCTXCORE.esp]
642 mov [edx + CPUMCPU.Hyper.esp], eax
643 ; selectors
644 mov eax, [ecx + CPUMCTXCORE.ss]
645 mov [edx + CPUMCPU.Hyper.ss], eax
646 mov eax, [ecx + CPUMCTXCORE.gs]
647 mov [edx + CPUMCPU.Hyper.gs], eax
648 mov eax, [ecx + CPUMCTXCORE.fs]
649 mov [edx + CPUMCPU.Hyper.fs], eax
650 mov eax, [ecx + CPUMCTXCORE.es]
651 mov [edx + CPUMCPU.Hyper.es], eax
652 mov eax, [ecx + CPUMCTXCORE.ds]
653 mov [edx + CPUMCPU.Hyper.ds], eax
654 mov eax, [ecx + CPUMCTXCORE.cs]
655 mov [edx + CPUMCPU.Hyper.cs], eax
656 ; flags
657 mov eax, [ecx + CPUMCTXCORE.eflags]
658 mov [edx + CPUMCPU.Hyper.eflags], eax
659 ; eip
660 mov eax, [ecx + CPUMCTXCORE.eip]
661 mov [edx + CPUMCPU.Hyper.eip], eax
662 ; jump to common worker code.
663 pop eax ; restore return code.
664 jmp vmmGCGuestToHostAsm_SkipHyperRegs
665
666ENDPROC VMMGCGuestToHostAsmHyperCtx
667
668
669;;
670; VMMGCGuestToHostAsm
671;
672; This is an alternative entry point which we'll be using
673; when the we have saved the guest state already or we haven't
674; been messing with the guest at all.
675;
676; @param eax Return code.
677; @uses eax, edx, ecx (or it may use them in the future)
678;
679ALIGNCODE(16)
680BEGINPROC VMMGCGuestToHostAsm
681 DEBUG_CHAR('%')
682
683%ifdef VBOX_WITH_STATISTICS
684 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
685 mov edx, 0ffffffffh
686 STAM_PROFILE_ADV_STOP edx
687
688 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
689 mov edx, 0ffffffffh
690 STAM_PROFILE_ADV_START edx
691
692 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
693 mov edx, 0ffffffffh
694 STAM_PROFILE_ADV_START edx
695%endif
696
697 ;
698 ; Load the CPUMCPU pointer.
699 ;
700 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
701 mov edx, 0ffffffffh
702
703 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
704 jmp short vmmGCGuestToHostAsm_EIPDone
705
706ALIGNCODE(16)
707vmmGCGuestToHostAsm_EIPDone:
708 ; general registers which we care about.
709 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
710 mov dword [edx + CPUMCPU.Hyper.esi], esi
711 mov dword [edx + CPUMCPU.Hyper.edi], edi
712 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
713 mov dword [edx + CPUMCPU.Hyper.esp], esp
714
715 ; special registers which may change.
716vmmGCGuestToHostAsm_SkipHyperRegs:
717 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
718 sldt [edx + CPUMCPU.Hyper.ldtr]
719
720 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
721 ; FPU context is saved before restore of host saving (another) branch.
722
723%ifdef VBOX_WITH_NMI
724 ;
725 ; Disarm K7 NMI.
726 ;
727 mov esi, edx
728 mov edi, eax
729
730 xor edx, edx
731 xor eax, eax
732 mov ecx, MSR_K7_EVNTSEL0
733 wrmsr
734
735 mov eax, edi
736 mov edx, esi
737%endif
738
739
740 ;;
741 ;; Load Intermediate memory context.
742 ;;
743 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
744 mov ecx, [edx + CPUMCPU.Host.cr3]
745 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
746 mov eax, 0ffffffffh
747 mov cr3, eax
748 DEBUG_CHAR('?')
749
750 ;; We're now in intermediate memory context!
751%ifdef NEED_ID
752 ;;
753 ;; Jump to identity mapped location
754 ;;
755 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
756 jmp near NAME(IDExitTarget)
757
758 ; We're now on identity mapped pages!
759ALIGNCODE(16)
760GLOBALNAME IDExitTarget
761 DEBUG_CHAR('1')
762 mov edx, cr4
763%ifdef NEED_PAE_ON_32BIT_HOST
764 and edx, ~X86_CR4_PAE
765%else
766 or edx, X86_CR4_PAE
767%endif
768 mov eax, cr0
769 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
770 mov cr0, eax
771 DEBUG_CHAR('2')
772 mov cr4, edx
773 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
774 mov edx, 0ffffffffh
775 mov cr3, edx
776 or eax, X86_CR0_PG
777 DEBUG_CHAR('3')
778 mov cr0, eax
779 DEBUG_CHAR('4')
780
781 ;;
782 ;; Jump to HC mapping.
783 ;;
784 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
785 jmp near NAME(HCExitTarget)
786%else
787 ;;
788 ;; Jump to HC mapping.
789 ;;
790 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
791 jmp near NAME(HCExitTarget)
792%endif
793
794
795 ;
796 ; When we arrive here we're at the host context
797 ; mapping of the switcher code.
798 ;
799ALIGNCODE(16)
800GLOBALNAME HCExitTarget
801 DEBUG_CHAR('9')
802 ; load final cr3
803 mov cr3, ecx
804 DEBUG_CHAR('@')
805
806
807 ;;
808 ;; Restore Host context.
809 ;;
810 ; Load CPUM pointer into edx
811 FIXUP FIX_HC_CPUM_OFF, 1, 0
812 mov edx, 0ffffffffh
813 CPUMCPU_FROM_CPUM(edx)
814 ; activate host gdt and idt
815 lgdt [edx + CPUMCPU.Host.gdtr]
816 DEBUG_CHAR('0')
817 lidt [edx + CPUMCPU.Host.idtr]
818 DEBUG_CHAR('1')
819 ; Restore TSS selector; must mark it as not busy before using ltr (!)
820%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
821 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
822 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
823 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
824 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
825 ltr word [edx + CPUMCPU.Host.tr]
826%else
827 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
828 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
829 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
830 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
831 mov ebx, ecx ; save orginal value
832 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
833 mov [eax + 4], ecx ; not using xchg here is paranoia..
834 ltr word [edx + CPUMCPU.Host.tr]
835 xchg [eax + 4], ebx ; using xchg is paranoia too...
836%endif
837 ; activate ldt
838 DEBUG_CHAR('2')
839 lldt [edx + CPUMCPU.Host.ldtr]
840 ; Restore segment registers
841 mov eax, [edx + CPUMCPU.Host.ds]
842 mov ds, eax
843 mov eax, [edx + CPUMCPU.Host.es]
844 mov es, eax
845 mov eax, [edx + CPUMCPU.Host.fs]
846 mov fs, eax
847 mov eax, [edx + CPUMCPU.Host.gs]
848 mov gs, eax
849 ; restore stack
850 lss esp, [edx + CPUMCPU.Host.esp]
851
852
853 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
854 ; restore MSR_IA32_SYSENTER_CS register.
855 mov ecx, MSR_IA32_SYSENTER_CS
856 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
857 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
858 xchg edx, ebx ; save/load edx
859 wrmsr ; MSR[ecx] <- edx:eax
860 xchg edx, ebx ; restore edx
861 jmp short gth_sysenter_no
862
863ALIGNCODE(16)
864gth_sysenter_no:
865
866 ;; @todo AMD syscall
867
868 ; Restore FPU if guest has used it.
869 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
870 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
871 test esi, CPUM_USED_FPU
872 jz near gth_fpu_no
873 mov ecx, cr0
874 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
875 mov cr0, ecx
876
877 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
878 fxsave [edx + CPUMCPU.Guest.fpu]
879 fxrstor [edx + CPUMCPU.Host.fpu]
880 jmp near gth_fpu_no
881
882gth_no_fxsave:
883 fnsave [edx + CPUMCPU.Guest.fpu]
884 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
885 not eax ; 1 means exception ignored (6 LS bits)
886 and eax, byte 03Fh ; 6 LS bits only
887 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
888 jz gth_no_exceptions_pending
889
890 ; technically incorrect, but we certainly don't want any exceptions now!!
891 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
892
893gth_no_exceptions_pending:
894 frstor [edx + CPUMCPU.Host.fpu]
895 jmp short gth_fpu_no
896
897ALIGNCODE(16)
898gth_fpu_no:
899
900 ; Control registers.
901 ; Would've liked to have these highere up in case of crashes, but
902 ; the fpu stuff must be done before we restore cr0.
903 mov ecx, [edx + CPUMCPU.Host.cr4]
904 mov cr4, ecx
905 mov ecx, [edx + CPUMCPU.Host.cr0]
906 mov cr0, ecx
907 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
908 ;mov cr2, ecx
909
910 ; restore debug registers (if modified) (esi must still be fUseFlags!)
911 ; (must be done after cr4 reload because of the debug extension.)
912 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
913 jz short gth_debug_regs_no
914 jmp gth_debug_regs_restore
915gth_debug_regs_no:
916
917 ; restore general registers.
918 mov eax, edi ; restore return code. eax = return code !!
919 mov edi, [edx + CPUMCPU.Host.edi]
920 mov esi, [edx + CPUMCPU.Host.esi]
921 mov ebx, [edx + CPUMCPU.Host.ebx]
922 mov ebp, [edx + CPUMCPU.Host.ebp]
923 push dword [edx + CPUMCPU.Host.eflags]
924 popfd
925
926%ifdef DEBUG_STUFF
927; COM_S_CHAR '4'
928%endif
929 retf
930
931;;
932; Detour for restoring the host debug registers.
933; edx and edi must be preserved.
934gth_debug_regs_restore:
935 DEBUG_S_CHAR('d')
936 xor eax, eax
937 mov dr7, eax ; paranoia or not?
938 test esi, CPUM_USE_DEBUG_REGS
939 jz short gth_debug_regs_dr7
940 DEBUG_S_CHAR('r')
941 mov eax, [edx + CPUMCPU.Host.dr0]
942 mov dr0, eax
943 mov ebx, [edx + CPUMCPU.Host.dr1]
944 mov dr1, ebx
945 mov ecx, [edx + CPUMCPU.Host.dr2]
946 mov dr2, ecx
947 mov eax, [edx + CPUMCPU.Host.dr3]
948 mov dr3, eax
949gth_debug_regs_dr7:
950 mov ebx, [edx + CPUMCPU.Host.dr6]
951 mov dr6, ebx
952 mov ecx, [edx + CPUMCPU.Host.dr7]
953 mov dr7, ecx
954 jmp gth_debug_regs_no
955
956ENDPROC VMMGCGuestToHostAsm
957
958
959GLOBALNAME End
960;
961; The description string (in the text section).
962;
963NAME(Description):
964 db SWITCHER_DESCRIPTION
965 db 0
966
967extern NAME(Relocate)
968
969;
970; End the fixup records.
971;
972BEGINDATA
973 db FIX_THE_END ; final entry.
974GLOBALNAME FixupsEnd
975
976;;
977; The switcher definition structure.
978ALIGNDATA(16)
979GLOBALNAME Def
980 istruc VMMSWITCHERDEF
981 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
982 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
983 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
984 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
985 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
986 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
987 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
988 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
989 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
990 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
991 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
992 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
993 ; disasm help
994 at VMMSWITCHERDEF.offHCCode0, dd 0
995%ifdef NEED_ID
996 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
997%else
998 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
999%endif
1000 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1001 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1002%ifdef NEED_ID
1003 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1004 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
1005 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1006 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1007%else
1008 at VMMSWITCHERDEF.offIDCode0, dd 0
1009 at VMMSWITCHERDEF.cbIDCode0, dd 0
1010 at VMMSWITCHERDEF.offIDCode1, dd 0
1011 at VMMSWITCHERDEF.cbIDCode1, dd 0
1012%endif
1013 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1014%ifdef NEED_ID
1015 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1016%else
1017 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1018%endif
1019
1020 iend
1021
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette