VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 34893

Last change on this file since 34893 was 33935, checked in by vboxsync, 14 years ago

VMM: mask all Local APIC interrupt vectors which are set up to NMI mode during world switch (raw mode only)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.3 KB
Line 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2007 Oracle Corporation
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15
16;%define DEBUG_STUFF 1
17;%define STRICT_IF 1
18
19;*******************************************************************************
20;* Defined Constants And Macros *
21;*******************************************************************************
22
23
24;*******************************************************************************
25;* Header Files *
26;*******************************************************************************
27%include "VBox/asmdefs.mac"
28%include "VBox/apic.mac"
29%include "VBox/x86.mac"
30%include "VBox/cpum.mac"
31%include "VBox/stam.mac"
32%include "VBox/vm.mac"
33%include "CPUMInternal.mac"
34%include "VMMSwitcher/VMMSwitcher.mac"
35
36
37;
38; Start the fixup records
39; We collect the fixups in the .data section as we go along
40; It is therefore VITAL that no-one is using the .data section
41; for anything else between 'Start' and 'End'.
42;
43BEGINDATA
44GLOBALNAME Fixups
45
46
47
48BEGINCODE
49GLOBALNAME Start
50
51BITS 32
52
53;;
54; The C interface.
55; @param [esp + 04h] Param 1 - VM handle
56; @param [esp + 08h] Param 2 - VMCPU offset
57;
58BEGINPROC vmmR0HostToGuest
59 %ifdef DEBUG_STUFF
60 COM32_S_NEWLINE
61 COM32_S_CHAR '^'
62 %endif
63
64 %ifdef VBOX_WITH_STATISTICS
65 ;
66 ; Switcher stats.
67 ;
68 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
69 mov edx, 0ffffffffh
70 STAM_PROFILE_ADV_START edx
71 %endif
72
73 push ebp
74 mov ebp, [esp + 12] ; VMCPU offset
75
76 ; turn off interrupts
77 pushf
78 cli
79
80 ;
81 ; Call worker.
82 ;
83 FIXUP FIX_HC_CPUM_OFF, 1, 0
84 mov edx, 0ffffffffh
85 push cs ; allow for far return and restore cs correctly.
86 call NAME(vmmR0HostToGuestAsm)
87
88 ; restore original flags
89 popf
90 pop ebp
91
92%ifdef VBOX_WITH_STATISTICS
93 ;
94 ; Switcher stats.
95 ;
96 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
97 mov edx, 0ffffffffh
98 STAM_PROFILE_ADV_STOP edx
99%endif
100
101 ret
102
103ENDPROC vmmR0HostToGuest
104
105; *****************************************************************************
106; vmmR0HostToGuestAsm
107;
108; Phase one of the switch from host to guest context (host MMU context)
109;
110; INPUT:
111; - edx virtual address of CPUM structure (valid in host context)
112; - ebp offset of the CPUMCPU structure
113;
114; USES/DESTROYS:
115; - eax, ecx, edx, esi
116;
117; ASSUMPTION:
118; - current CS and DS selectors are wide open
119;
120; *****************************************************************************
121ALIGNCODE(16)
122BEGINPROC vmmR0HostToGuestAsm
123 ;;
124 ;; Save CPU host context
125 ;; Skip eax, edx and ecx as these are not preserved over calls.
126 ;;
127 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
128%ifdef VBOX_WITH_CRASHDUMP_MAGIC
129 ; phys address of scratch page
130 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
131 mov cr2, eax
132
133 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
134%endif
135
136 ; general registers.
137 mov [edx + CPUMCPU.Host.ebx], ebx
138 mov [edx + CPUMCPU.Host.edi], edi
139 mov [edx + CPUMCPU.Host.esi], esi
140 mov [edx + CPUMCPU.Host.esp], esp
141 mov [edx + CPUMCPU.Host.ebp], ebp
142 ; selectors.
143 mov [edx + CPUMCPU.Host.ds], ds
144 mov [edx + CPUMCPU.Host.es], es
145 mov [edx + CPUMCPU.Host.fs], fs
146 mov [edx + CPUMCPU.Host.gs], gs
147 mov [edx + CPUMCPU.Host.ss], ss
148 ; special registers.
149 sldt [edx + CPUMCPU.Host.ldtr]
150 sidt [edx + CPUMCPU.Host.idtr]
151 sgdt [edx + CPUMCPU.Host.gdtr]
152 str [edx + CPUMCPU.Host.tr]
153
154%ifdef VBOX_WITH_CRASHDUMP_MAGIC
155 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
156%endif
157
158%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
159 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
160 mov ebx, [edx + CPUM.pvApicBase]
161 or ebx, ebx
162 jz htg_noapic
163 mov eax, [ebx + APIC_REG_LVT_LINT0]
164 mov ecx, eax
165 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
166 cmp ecx, APIC_REG_LVT_MODE_NMI
167 jne htg_nolint0
168 or edi, 0x01
169 or eax, APIC_REG_LVT_MASKED
170 mov [ebx + APIC_REG_LVT_LINT0], eax
171 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
172htg_nolint0:
173 mov eax, [ebx + APIC_REG_LVT_LINT1]
174 mov ecx, eax
175 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
176 cmp ecx, APIC_REG_LVT_MODE_NMI
177 jne htg_nolint1
178 or edi, 0x02
179 or eax, APIC_REG_LVT_MASKED
180 mov [ebx + APIC_REG_LVT_LINT1], eax
181 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
182htg_nolint1:
183 mov eax, [ebx + APIC_REG_LVT_PC]
184 mov ecx, eax
185 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
186 cmp ecx, APIC_REG_LVT_MODE_NMI
187 jne htg_nopc
188 or edi, 0x04
189 or eax, APIC_REG_LVT_MASKED
190 mov [ebx + APIC_REG_LVT_PC], eax
191 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
192htg_nopc:
193 mov eax, [ebx + APIC_REG_VERSION]
194 shr eax, 16
195 cmp al, 5
196 jb htg_notherm
197 mov eax, [ebx + APIC_REG_LVT_THMR]
198 mov ecx, eax
199 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
200 cmp ecx, APIC_REG_LVT_MODE_NMI
201 jne htg_notherm
202 or edi, 0x08
203 or eax, APIC_REG_LVT_MASKED
204 mov [ebx + APIC_REG_LVT_THMR], eax
205 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
206htg_notherm:
207 mov [edx + CPUM.fApicDisVectors], edi
208htg_noapic:
209 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
210%endif
211
212 ; control registers.
213 mov eax, cr0
214 mov [edx + CPUMCPU.Host.cr0], eax
215 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
216 mov eax, cr3
217 mov [edx + CPUMCPU.Host.cr3], eax
218 mov eax, cr4
219 mov [edx + CPUMCPU.Host.cr4], eax
220
221 ; save the host EFER msr
222 mov ebx, edx
223 mov ecx, MSR_K6_EFER
224 rdmsr
225 mov [ebx + CPUMCPU.Host.efer], eax
226 mov [ebx + CPUMCPU.Host.efer + 4], edx
227 mov edx, ebx
228
229%ifdef VBOX_WITH_CRASHDUMP_MAGIC
230 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
231%endif
232
233 ; Load new gdt so we can do a far jump after going into 64 bits mode
234 lgdt [edx + CPUMCPU.Hyper.gdtr]
235
236%ifdef VBOX_WITH_CRASHDUMP_MAGIC
237 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
238%endif
239
240 ;;
241 ;; Load Intermediate memory context.
242 ;;
243 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
244 mov eax, 0ffffffffh
245 mov cr3, eax
246 DEBUG_CHAR('?')
247
248 ;;
249 ;; Jump to identity mapped location
250 ;;
251 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
252 jmp near NAME(IDEnterTarget)
253
254
255 ; We're now on identity mapped pages!
256ALIGNCODE(16)
257GLOBALNAME IDEnterTarget
258 DEBUG_CHAR('2')
259
260 ; 1. Disable paging.
261 mov ebx, cr0
262 and ebx, ~X86_CR0_PG
263 mov cr0, ebx
264 DEBUG_CHAR('2')
265
266%ifdef VBOX_WITH_CRASHDUMP_MAGIC
267 mov eax, cr2
268 mov dword [eax], 3
269%endif
270
271 ; 2. Enable PAE.
272 mov ecx, cr4
273 or ecx, X86_CR4_PAE
274 mov cr4, ecx
275
276 ; 3. Load long mode intermediate CR3.
277 FIXUP FIX_INTER_AMD64_CR3, 1
278 mov ecx, 0ffffffffh
279 mov cr3, ecx
280 DEBUG_CHAR('3')
281
282%ifdef VBOX_WITH_CRASHDUMP_MAGIC
283 mov eax, cr2
284 mov dword [eax], 4
285%endif
286
287 ; 4. Enable long mode.
288 mov esi, edx
289 mov ecx, MSR_K6_EFER
290 rdmsr
291 FIXUP FIX_EFER_OR_MASK, 1
292 or eax, 0ffffffffh
293 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
294 wrmsr
295 mov edx, esi
296 DEBUG_CHAR('4')
297
298%ifdef VBOX_WITH_CRASHDUMP_MAGIC
299 mov eax, cr2
300 mov dword [eax], 5
301%endif
302
303 ; 5. Enable paging.
304 or ebx, X86_CR0_PG
305 ; Disable ring 0 write protection too
306 and ebx, ~X86_CR0_WRITE_PROTECT
307 mov cr0, ebx
308 DEBUG_CHAR('5')
309
310 ; Jump from compatibility mode to 64-bit mode.
311 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
312 jmp 0ffffh:0fffffffeh
313
314 ;
315 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
316BITS 64
317ALIGNCODE(16)
318NAME(IDEnter64Mode):
319 DEBUG_CHAR('6')
320 jmp [NAME(pICEnterTarget) wrt rip]
321
322; 64-bit jump target
323NAME(pICEnterTarget):
324FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
325dq 0ffffffffffffffffh
326
327; 64-bit pCpum address.
328NAME(pCpumIC):
329FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
330dq 0ffffffffffffffffh
331
332%ifdef VBOX_WITH_CRASHDUMP_MAGIC
333NAME(pMarker):
334db 'Switch_marker'
335%endif
336
337 ;
338 ; When we arrive here we're in 64 bits mode in the intermediate context
339 ;
340ALIGNCODE(16)
341GLOBALNAME ICEnterTarget
342 ; Load CPUM pointer into rdx
343 mov rdx, [NAME(pCpumIC) wrt rip]
344 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
345
346 mov rax, cs
347 mov ds, rax
348 mov es, rax
349
350 ; Invalidate fs & gs
351 mov rax, 0
352 mov fs, rax
353 mov gs, rax
354
355%ifdef VBOX_WITH_CRASHDUMP_MAGIC
356 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
357%endif
358
359 ; Setup stack; use the lss_esp, ss pair for lss
360 DEBUG_CHAR('7')
361 mov rsp, 0
362 mov eax, [rdx + CPUMCPU.Hyper.esp]
363 mov [rdx + CPUMCPU.Hyper.lss_esp], eax
364 lss esp, [rdx + CPUMCPU.Hyper.lss_esp]
365
366%ifdef VBOX_WITH_CRASHDUMP_MAGIC
367 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
368%endif
369
370
371 ; load the hypervisor function address
372 mov r9, [rdx + CPUMCPU.Hyper.eip]
373
374 ; Check if we need to restore the guest FPU state
375 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
376 test esi, CPUM_SYNC_FPU_STATE
377 jz near gth_fpu_no
378
379%ifdef VBOX_WITH_CRASHDUMP_MAGIC
380 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
381%endif
382
383 mov rax, cr0
384 mov rcx, rax ; save old CR0
385 and rax, ~(X86_CR0_TS | X86_CR0_EM)
386 mov cr0, rax
387 fxrstor [rdx + CPUMCPU.Guest.fpu]
388 mov cr0, rcx ; and restore old CR0 again
389
390 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
391
392gth_fpu_no:
393 ; Check if we need to restore the guest debug state
394 test esi, CPUM_SYNC_DEBUG_STATE
395 jz near gth_debug_no
396
397%ifdef VBOX_WITH_CRASHDUMP_MAGIC
398 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
399%endif
400
401 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
402 mov dr0, rax
403 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
404 mov dr1, rax
405 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
406 mov dr2, rax
407 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
408 mov dr3, rax
409 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
410 mov dr6, rax ; not required for AMD-V
411
412 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
413
414gth_debug_no:
415
416%ifdef VBOX_WITH_CRASHDUMP_MAGIC
417 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
418%endif
419
420 ; parameter for all helper functions (pCtx)
421 lea rsi, [rdx + CPUMCPU.Guest.fpu]
422 call r9
423
424 ; Load CPUM pointer into rdx
425 mov rdx, [NAME(pCpumIC) wrt rip]
426 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
427
428%ifdef VBOX_WITH_CRASHDUMP_MAGIC
429 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
430%endif
431
432 ; Save the return code
433 mov dword [rdx + CPUMCPU.u32RetCode], eax
434
435 ; now let's switch back
436 jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
437
438ENDPROC vmmR0HostToGuestAsm
439
440
441;;
442; Trampoline for doing a call when starting the hyper visor execution.
443;
444; Push any arguments to the routine.
445; Push the argument frame size (cArg * 4).
446; Push the call target (_cdecl convention).
447; Push the address of this routine.
448;
449;
450BITS 64
451ALIGNCODE(16)
452BEGINPROC vmmGCCallTrampoline
453%ifdef DEBUG_STUFF
454 COM64_S_CHAR 'c'
455 COM64_S_CHAR 't'
456 COM64_S_CHAR '!'
457%endif
458 int3
459ENDPROC vmmGCCallTrampoline
460
461
462;;
463; The C interface.
464;
465BITS 64
466ALIGNCODE(16)
467BEGINPROC vmmGCGuestToHost
468%ifdef DEBUG_STUFF
469 push rsi
470 COM_NEWLINE
471 DEBUG_CHAR('b')
472 DEBUG_CHAR('a')
473 DEBUG_CHAR('c')
474 DEBUG_CHAR('k')
475 DEBUG_CHAR('!')
476 COM_NEWLINE
477 pop rsi
478%endif
479 int3
480ENDPROC vmmGCGuestToHost
481
482;;
483; VMMGCGuestToHostAsm
484;
485; This is an alternative entry point which we'll be using
486; when the we have saved the guest state already or we haven't
487; been messing with the guest at all.
488;
489; @param eax Return code.
490; @uses eax, edx, ecx (or it may use them in the future)
491;
492BITS 64
493ALIGNCODE(16)
494BEGINPROC VMMGCGuestToHostAsm
495 ;; We're still in the intermediate memory context!
496
497 ;;
498 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
499 ;;
500 jmp far [NAME(fpIDEnterTarget) wrt rip]
501
502; 16:32 Pointer to IDEnterTarget.
503NAME(fpIDEnterTarget):
504 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
505dd 0
506 FIXUP FIX_HYPER_CS, 0
507dd 0
508
509 ; We're now on identity mapped pages!
510ALIGNCODE(16)
511GLOBALNAME IDExitTarget
512BITS 32
513 DEBUG_CHAR('1')
514
515 ; 1. Deactivate long mode by turning off paging.
516 mov ebx, cr0
517 and ebx, ~X86_CR0_PG
518 mov cr0, ebx
519 DEBUG_CHAR('2')
520
521 ; 2. Load intermediate page table.
522 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
523 mov edx, 0ffffffffh
524 mov cr3, edx
525 DEBUG_CHAR('3')
526
527 ; 3. Disable long mode.
528 mov ecx, MSR_K6_EFER
529 rdmsr
530 DEBUG_CHAR('5')
531 and eax, ~(MSR_K6_EFER_LME)
532 wrmsr
533 DEBUG_CHAR('6')
534
535%ifndef NEED_PAE_ON_HOST
536 ; 3b. Disable PAE.
537 mov eax, cr4
538 and eax, ~X86_CR4_PAE
539 mov cr4, eax
540 DEBUG_CHAR('7')
541%endif
542
543 ; 4. Enable paging.
544 or ebx, X86_CR0_PG
545 mov cr0, ebx
546 jmp short just_a_jump
547just_a_jump:
548 DEBUG_CHAR('8')
549
550 ;;
551 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
552 ;;
553 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
554 jmp near NAME(ICExitTarget)
555
556 ;;
557 ;; When we arrive at this label we're at the
558 ;; intermediate mapping of the switching code.
559 ;;
560BITS 32
561ALIGNCODE(16)
562GLOBALNAME ICExitTarget
563 DEBUG_CHAR('8')
564
565 ; load the hypervisor data selector into ds & es
566 FIXUP FIX_HYPER_DS, 1
567 mov eax, 0ffffh
568 mov ds, eax
569 mov es, eax
570
571 FIXUP FIX_GC_CPUM_OFF, 1, 0
572 mov edx, 0ffffffffh
573 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
574 mov esi, [edx + CPUMCPU.Host.cr3]
575 mov cr3, esi
576
577 ;; now we're in host memory context, let's restore regs
578 FIXUP FIX_HC_CPUM_OFF, 1, 0
579 mov edx, 0ffffffffh
580 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
581
582 ; restore the host EFER
583 mov ebx, edx
584 mov ecx, MSR_K6_EFER
585 mov eax, [ebx + CPUMCPU.Host.efer]
586 mov edx, [ebx + CPUMCPU.Host.efer + 4]
587 wrmsr
588 mov edx, ebx
589
590 ; activate host gdt and idt
591 lgdt [edx + CPUMCPU.Host.gdtr]
592 DEBUG_CHAR('0')
593 lidt [edx + CPUMCPU.Host.idtr]
594 DEBUG_CHAR('1')
595
596 ; Restore TSS selector; must mark it as not busy before using ltr (!)
597 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
598 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
599 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
600 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
601 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
602 ltr word [edx + CPUMCPU.Host.tr]
603
604 ; activate ldt
605 DEBUG_CHAR('2')
606 lldt [edx + CPUMCPU.Host.ldtr]
607
608 ; Restore segment registers
609 mov eax, [edx + CPUMCPU.Host.ds]
610 mov ds, eax
611 mov eax, [edx + CPUMCPU.Host.es]
612 mov es, eax
613 mov eax, [edx + CPUMCPU.Host.fs]
614 mov fs, eax
615 mov eax, [edx + CPUMCPU.Host.gs]
616 mov gs, eax
617 ; restore stack
618 lss esp, [edx + CPUMCPU.Host.esp]
619
620 ; Control registers.
621 mov ecx, [edx + CPUMCPU.Host.cr4]
622 mov cr4, ecx
623 mov ecx, [edx + CPUMCPU.Host.cr0]
624 mov cr0, ecx
625 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
626 ;mov cr2, ecx
627
628%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
629 ; Restore blocked Local APIC NMI vectors
630 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
631 mov ebx, [edx + CPUM.pvApicBase]
632 mov ecx, [edx + CPUM.fApicDisVectors]
633 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
634 shr ecx, 1
635 jnc gth_nolint0
636 and dword [ebx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
637gth_nolint0:
638 shr ecx, 1
639 jnc gth_nolint1
640 and dword [ebx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
641gth_nolint1:
642 shr ecx, 1
643 jnc gth_nopc
644 and dword [ebx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
645gth_nopc:
646 shr ecx, 1
647 jnc gth_notherm
648 and dword [ebx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
649gth_notherm:
650%endif
651
652 ; restore general registers.
653 mov edi, [edx + CPUMCPU.Host.edi]
654 mov esi, [edx + CPUMCPU.Host.esi]
655 mov ebx, [edx + CPUMCPU.Host.ebx]
656 mov ebp, [edx + CPUMCPU.Host.ebp]
657
658 ; store the return code in eax
659 mov eax, [edx + CPUMCPU.u32RetCode]
660 retf
661ENDPROC VMMGCGuestToHostAsm
662
663;;
664; VMMGCGuestToHostAsmHyperCtx
665;
666; This is an alternative entry point which we'll be using
667; when the we have the hypervisor context and need to save
668; that before going to the host.
669;
670; This is typically useful when abandoning the hypervisor
671; because of a trap and want the trap state to be saved.
672;
673; @param eax Return code.
674; @param ecx Points to CPUMCTXCORE.
675; @uses eax,edx,ecx
676ALIGNCODE(16)
677BEGINPROC VMMGCGuestToHostAsmHyperCtx
678 int3
679
680;;
681; VMMGCGuestToHostAsmGuestCtx
682;
683; Switches from Guest Context to Host Context.
684; Of course it's only called from within the GC.
685;
686; @param eax Return code.
687; @param esp + 4 Pointer to CPUMCTXCORE.
688;
689; @remark ASSUMES interrupts disabled.
690;
691ALIGNCODE(16)
692BEGINPROC VMMGCGuestToHostAsmGuestCtx
693 int3
694
695GLOBALNAME End
696;
697; The description string (in the text section).
698;
699NAME(Description):
700 db SWITCHER_DESCRIPTION
701 db 0
702
703extern NAME(Relocate)
704
705;
706; End the fixup records.
707;
708BEGINDATA
709 db FIX_THE_END ; final entry.
710GLOBALNAME FixupsEnd
711
712;;
713; The switcher definition structure.
714ALIGNDATA(16)
715GLOBALNAME Def
716 istruc VMMSWITCHERDEF
717 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
718 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
719 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
720 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
721 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
722 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
723 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
724 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
725 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
726 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
727 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
728 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
729 ; disasm help
730 at VMMSWITCHERDEF.offHCCode0, dd 0
731 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
732 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
733 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
734 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
735 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
736 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
737 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
738 at VMMSWITCHERDEF.offGCCode, dd 0
739 at VMMSWITCHERDEF.cbGCCode, dd 0
740
741 iend
742
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette