VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 53949

Last change on this file since 53949 was 53835, checked in by vboxsync, 10 years ago

VMMSwitcher: also mask NMI in APIC_REG_LVT_CMCI

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.4 KB
Line 
1; $Id: LegacyandAMD64.mac 53835 2015-01-15 20:39:49Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2013 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.virtualbox.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;;
34; This macro is used for storing a debug code in a CMOS location.
35;
36; If we tripple fault or something, the debug code can be retrieved and we
37; might have a clue as to where the problem occurred. The code is currently
38; using CMOS register 3 in the 2nd bank as this _seems_ to be unused on my
39; Extreme4 X79 asrock mainboard.
40;
41; @param %1 The debug code (byte)
42; @note Trashes AL.
43;
44%macro DEBUG_CMOS_TRASH_AL 1
45%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
46 mov al, 3
47 out 72h, al
48 mov al, %1
49 out 73h, al
50 in al, 73h
51%endif
52%endmacro
53
54;;
55; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
56; doesn't trash any registers.
57;
58%macro DEBUG_CMOS_STACK64 1
59%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
60 push rax
61 DEBUG_CMOS_TRASH_AL %1
62 pop rax
63%endif
64%endmacro
65
66;;
67; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
68; doesn't trash any registers.
69;
70%macro DEBUG_CMOS_STACK32 1
71%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
72 push eax
73 DEBUG_CMOS_TRASH_AL %1
74 pop eax
75%endif
76%endmacro
77
78
79;; Stubs for making OS/2 compile (though, not work).
80%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
81 %macro vmwrite 2,
82 int3
83 %endmacro
84 %define vmlaunch int3
85 %define vmresume int3
86 %define vmsave int3
87 %define vmload int3
88 %define vmrun int3
89 %define clgi int3
90 %define stgi int3
91 %macro invlpga 2,
92 int3
93 %endmacro
94%endif
95
96;; Debug options
97;%define DEBUG_STUFF 1
98;%define STRICT_IF 1
99
100
101;*******************************************************************************
102;* Header Files *
103;*******************************************************************************
104%include "VBox/asmdefs.mac"
105%include "iprt/x86.mac"
106%include "VBox/err.mac"
107%include "VBox/apic.mac"
108
109%include "VBox/vmm/cpum.mac"
110%include "VBox/vmm/stam.mac"
111%include "VBox/vmm/vm.mac"
112%include "VBox/vmm/hm_vmx.mac"
113%include "CPUMInternal.mac"
114%include "HMInternal.mac"
115%include "VMMSwitcher.mac"
116
117
118;
119; Start the fixup records
120; We collect the fixups in the .data section as we go along
121; It is therefore VITAL that no-one is using the .data section
122; for anything else between 'Start' and 'End'.
123;
124BEGINDATA
125GLOBALNAME Fixups
126
127
128
129BEGINCODE
130GLOBALNAME Start
131
132BITS 32
133
134;;
135; The C interface.
136; @param [esp + 04h] Param 1 - VM handle
137; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
138; structure for the calling EMT.
139;
140BEGINPROC vmmR0ToRawMode
141%ifdef DEBUG_STUFF
142 COM32_S_NEWLINE
143 COM32_S_CHAR '^'
144%endif
145
146%ifdef VBOX_WITH_STATISTICS
147 ;
148 ; Switcher stats.
149 ;
150 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
151 mov edx, 0ffffffffh
152 STAM_PROFILE_ADV_START edx
153%endif
154
155 push ebp
156 mov ebp, [esp + 12] ; CPUMCPU offset
157
158 ; turn off interrupts
159 pushf
160 cli
161 ;DEBUG_CMOS_STACK32 10h
162
163 ;
164 ; Call worker.
165 ;
166 FIXUP FIX_HC_CPUM_OFF, 1, 0
167 mov edx, 0ffffffffh
168 push cs ; allow for far return and restore cs correctly.
169 call NAME(vmmR0ToRawModeAsm)
170
171%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
172 ; Restore blocked Local APIC NMI vectors
173 ; Do this here to ensure the host CS is already restored
174 mov ecx, [edx + CPUMCPU.fApicDisVectors]
175 test ecx, ecx
176 jz gth_apic_done
177 cmp byte [edx + CPUMCPU.fX2Apic], 1
178 je gth_x2apic
179
180 mov edx, [edx + CPUMCPU.pvApicBase]
181 shr ecx, 1
182 jnc gth_nolint0
183 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
184gth_nolint0:
185 shr ecx, 1
186 jnc gth_nolint1
187 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
188gth_nolint1:
189 shr ecx, 1
190 jnc gth_nopc
191 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
192gth_nopc:
193 shr ecx, 1
194 jnc gth_notherm
195 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
196gth_notherm:
197 shr ecx, 1
198 jnc gth_nocmci
199 and dword [edx + APIC_REG_LVT_CMCI], ~APIC_REG_LVT_MASKED
200gth_nocmci:
201 jmp gth_apic_done
202
203gth_x2apic:
204 ;DEBUG_CMOS_STACK32 7ch
205 push eax ; save eax
206 push ebx ; save it for fApicDisVectors
207 push edx ; save edx just in case.
208 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
209 shr ebx, 1
210 jnc gth_x2_nolint0
211 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
212 rdmsr
213 and eax, ~APIC_REG_LVT_MASKED
214 wrmsr
215gth_x2_nolint0:
216 shr ebx, 1
217 jnc gth_x2_nolint1
218 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
219 rdmsr
220 and eax, ~APIC_REG_LVT_MASKED
221 wrmsr
222gth_x2_nolint1:
223 shr ebx, 1
224 jnc gth_x2_nopc
225 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
226 rdmsr
227 and eax, ~APIC_REG_LVT_MASKED
228 wrmsr
229gth_x2_nopc:
230 shr ebx, 1
231 jnc gth_x2_notherm
232 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
233 rdmsr
234 and eax, ~APIC_REG_LVT_MASKED
235 wrmsr
236gth_x2_notherm:
237 shr ebx, 1
238 jnc gth_x2_nocmci
239 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
240 rdmsr
241 and eax, ~APIC_REG_LVT_MASKED
242 wrmsr
243gth_x2_nocmci:
244 pop edx
245 pop ebx
246 pop eax
247
248gth_apic_done:
249%endif
250
251 ; restore original flags
252 ;DEBUG_CMOS_STACK32 7eh
253 popf
254 pop ebp
255
256%ifdef VBOX_WITH_STATISTICS
257 ;
258 ; Switcher stats.
259 ;
260 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
261 mov edx, 0ffffffffh
262 STAM_PROFILE_ADV_STOP edx
263%endif
264
265 ;DEBUG_CMOS_STACK32 7fh
266 ret
267
268ENDPROC vmmR0ToRawMode
269
270; *****************************************************************************
271; vmmR0ToRawModeAsm
272;
273; Phase one of the switch from host to guest context (host MMU context)
274;
275; INPUT:
276; - edx virtual address of CPUM structure (valid in host context)
277; - ebp offset of the CPUMCPU structure relative to CPUM.
278;
279; USES/DESTROYS:
280; - eax, ecx, edx, esi
281;
282; ASSUMPTION:
283; - current CS and DS selectors are wide open
284;
285; *****************************************************************************
286ALIGNCODE(16)
287BEGINPROC vmmR0ToRawModeAsm
288 ;;
289 ;; Save CPU host context
290 ;; Skip eax, edx and ecx as these are not preserved over calls.
291 ;;
292 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
293%ifdef VBOX_WITH_CRASHDUMP_MAGIC
294 ; phys address of scratch page
295 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
296 mov cr2, eax
297
298 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
299%endif
300
301 ; general registers.
302 mov [edx + CPUMCPU.Host.ebx], ebx
303 mov [edx + CPUMCPU.Host.edi], edi
304 mov [edx + CPUMCPU.Host.esi], esi
305 mov [edx + CPUMCPU.Host.esp], esp
306 mov [edx + CPUMCPU.Host.ebp], ebp ; offCpumCpu!
307 ; selectors.
308 mov [edx + CPUMCPU.Host.ds], ds
309 mov [edx + CPUMCPU.Host.es], es
310 mov [edx + CPUMCPU.Host.fs], fs
311 mov [edx + CPUMCPU.Host.gs], gs
312 mov [edx + CPUMCPU.Host.ss], ss
313 ; special registers.
314 DEBUG32_S_CHAR('s')
315 DEBUG32_S_CHAR(';')
316 sldt [edx + CPUMCPU.Host.ldtr]
317 sidt [edx + CPUMCPU.Host.idtr]
318 sgdt [edx + CPUMCPU.Host.gdtr]
319 str [edx + CPUMCPU.Host.tr]
320
321%ifdef VBOX_WITH_CRASHDUMP_MAGIC
322 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
323%endif
324
325%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
326 DEBUG32_S_CHAR('f')
327 DEBUG32_S_CHAR(';')
328 cmp byte [edx + CPUMCPU.pvApicBase], 1
329 je htg_x2apic
330
331 mov ebx, [edx + CPUMCPU.pvApicBase]
332 or ebx, ebx
333 jz htg_apic_done
334 mov eax, [ebx + APIC_REG_LVT_LINT0]
335 mov ecx, eax
336 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
337 cmp ecx, APIC_REG_LVT_MODE_NMI
338 jne htg_nolint0
339 or edi, 0x01
340 or eax, APIC_REG_LVT_MASKED
341 mov [ebx + APIC_REG_LVT_LINT0], eax
342 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
343htg_nolint0:
344 mov eax, [ebx + APIC_REG_LVT_LINT1]
345 mov ecx, eax
346 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
347 cmp ecx, APIC_REG_LVT_MODE_NMI
348 jne htg_nolint1
349 or edi, 0x02
350 or eax, APIC_REG_LVT_MASKED
351 mov [ebx + APIC_REG_LVT_LINT1], eax
352 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
353htg_nolint1:
354 mov eax, [ebx + APIC_REG_LVT_PC]
355 mov ecx, eax
356 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
357 cmp ecx, APIC_REG_LVT_MODE_NMI
358 jne htg_nopc
359 or edi, 0x04
360 or eax, APIC_REG_LVT_MASKED
361 mov [ebx + APIC_REG_LVT_PC], eax
362 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
363htg_nopc:
364 mov eax, [ebx + APIC_REG_VERSION]
365 shr eax, 16
366 cmp al, 5
367 jb htg_notherm
368 je htg_nocmci
369 mov eax, [ebx + APIC_REG_LVT_CMCI]
370 mov ecx, eax
371 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
372 cmp ecx, APIC_REG_LVT_MODE_NMI
373 jne htg_nocmci
374 or edi, 0x10
375 or eax, APIC_REG_LVT_MASKED
376 mov [ebx + APIC_REG_LVT_CMCI], eax
377 mov eax, [ebx + APIC_REG_LVT_CMCI] ; write completion
378htg_nocmci:
379 mov eax, [ebx + APIC_REG_LVT_THMR]
380 mov ecx, eax
381 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
382 cmp ecx, APIC_REG_LVT_MODE_NMI
383 jne htg_notherm
384 or edi, 0x08
385 or eax, APIC_REG_LVT_MASKED
386 mov [ebx + APIC_REG_LVT_THMR], eax
387 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
388htg_notherm:
389 mov [edx + CPUMCPU.fApicDisVectors], edi
390 jmp htg_apic_done
391
392htg_x2apic:
393 mov esi, edx ; Save edx.
394 xor edi, edi ; fApicDisVectors
395
396 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
397 rdmsr
398 mov ebx, eax
399 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
400 cmp ebx, APIC_REG_LVT_MODE_NMI
401 jne htg_x2_nolint0
402 or edi, 0x01
403 or eax, APIC_REG_LVT_MASKED
404 wrmsr
405htg_x2_nolint0:
406 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
407 rdmsr
408 mov ebx, eax
409 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
410 cmp ebx, APIC_REG_LVT_MODE_NMI
411 jne htg_x2_nolint1
412 or edi, 0x02
413 or eax, APIC_REG_LVT_MASKED
414 wrmsr
415htg_x2_nolint1:
416 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
417 rdmsr
418 mov ebx, eax
419 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
420 cmp ebx, APIC_REG_LVT_MODE_NMI
421 jne htg_x2_nopc
422 or edi, 0x04
423 or eax, APIC_REG_LVT_MASKED
424 wrmsr
425htg_x2_nopc:
426 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
427 rdmsr
428 shr eax, 16
429 cmp al, 5
430 jb htg_x2_notherm
431 je htg_x2_nocmci
432 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_CMCI >> 4)
433 rdmsr
434 mov ebx, eax
435 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
436 cmp ebx, APIC_REG_LVT_MODE_NMI
437 jne htg_x2_nocmci
438 or edi, 0x10
439 or eax, APIC_REG_LVT_MASKED
440 wrmsr
441htg_x2_nocmci:
442 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
443 rdmsr
444 mov ebx, eax
445 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
446 cmp ebx, APIC_REG_LVT_MODE_NMI
447 jne htg_x2_notherm
448 or edi, 0x08
449 or eax, APIC_REG_LVT_MASKED
450 wrmsr
451htg_x2_notherm:
452 mov edx, esi ; Restore edx.
453 mov [edx + CPUMCPU.fApicDisVectors], edi
454
455htg_apic_done:
456%endif
457
458 ; control registers.
459 mov eax, cr0
460 mov [edx + CPUMCPU.Host.cr0], eax
461 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
462 mov eax, cr3
463 mov [edx + CPUMCPU.Host.cr3], eax
464 mov esi, cr4 ; esi = cr4, we'll modify it further down.
465 mov [edx + CPUMCPU.Host.cr4], esi
466
467 DEBUG32_S_CHAR('c')
468 DEBUG32_S_CHAR(';')
469
470 ; save the host EFER msr
471 mov ebx, edx
472 mov ecx, MSR_K6_EFER
473 rdmsr
474 mov [ebx + CPUMCPU.Host.efer], eax
475 mov [ebx + CPUMCPU.Host.efer + 4], edx
476 mov edx, ebx
477 DEBUG32_S_CHAR('e')
478 DEBUG32_S_CHAR(';')
479
480%ifdef VBOX_WITH_CRASHDUMP_MAGIC
481 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
482%endif
483
484 ; Load new gdt so we can do a far jump after going into 64 bits mode
485 ;DEBUG_CMOS_STACK32 16h
486 lgdt [edx + CPUMCPU.Hyper.gdtr]
487
488 DEBUG32_S_CHAR('g')
489 DEBUG32_S_CHAR('!')
490%ifdef VBOX_WITH_CRASHDUMP_MAGIC
491 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
492%endif
493
494 ;;
495 ;; Clean up CR4. X86_CR4_PGE, X86_CR4_PCE, X86_CR4_PCIDE (not really
496 ;; relevant for 32-bit, but whatever) and X86_CR4_VMXE must be cleared.
497 ;;
498 and esi, X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE \
499 | X86_CR4_MCE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT | X86_CR4_SMXE | X86_CR4_OSXSAVE
500 mov cr4, esi
501
502 ;;
503 ;; Load Intermediate memory context.
504 ;;
505 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
506 mov eax, 0ffffffffh
507 mov cr3, eax
508 DEBUG32_CHAR('?')
509%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
510 DEBUG_CMOS_TRASH_AL 17h
511%endif
512
513 ;;
514 ;; Jump to identity mapped location
515 ;;
516 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
517 jmp near NAME(IDEnterTarget)
518
519
520 ; We're now on identity mapped pages!
521ALIGNCODE(16)
522GLOBALNAME IDEnterTarget
523 DEBUG32_CHAR('1')
524 DEBUG_CMOS_TRASH_AL 19h
525
526 ; 1. Disable paging.
527 mov ebx, cr0
528 and ebx, ~X86_CR0_PG
529 mov cr0, ebx
530 DEBUG32_CHAR('2')
531 DEBUG_CMOS_TRASH_AL 1ah
532
533%ifdef VBOX_WITH_CRASHDUMP_MAGIC
534 mov eax, cr2
535 mov dword [eax], 3
536%endif
537
538 ; 2. Enable PAE.
539 mov ecx, cr4
540 or ecx, X86_CR4_PAE
541 mov cr4, ecx
542 DEBUG_CMOS_TRASH_AL 1bh
543
544 ; 3. Load long mode intermediate CR3.
545 FIXUP FIX_INTER_AMD64_CR3, 1
546 mov ecx, 0ffffffffh
547 mov cr3, ecx
548 DEBUG32_CHAR('3')
549 DEBUG_CMOS_TRASH_AL 1ch
550
551%ifdef VBOX_WITH_CRASHDUMP_MAGIC
552 mov eax, cr2
553 mov dword [eax], 4
554%endif
555
556 ; 4. Enable long mode.
557 mov esi, edx
558 mov ecx, MSR_K6_EFER
559 rdmsr
560 FIXUP FIX_EFER_OR_MASK, 1
561 or eax, 0ffffffffh
562 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
563 wrmsr
564 mov edx, esi
565 DEBUG32_CHAR('4')
566 DEBUG_CMOS_TRASH_AL 1dh
567
568%ifdef VBOX_WITH_CRASHDUMP_MAGIC
569 mov eax, cr2
570 mov dword [eax], 5
571%endif
572
573 ; 5. Enable paging.
574 or ebx, X86_CR0_PG
575 ; Disable ring 0 write protection too
576 and ebx, ~X86_CR0_WRITE_PROTECT
577 mov cr0, ebx
578 DEBUG32_CHAR('5')
579
580 ; Jump from compatibility mode to 64-bit mode.
581 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
582 jmp 0ffffh:0fffffffeh
583
584 ;
585 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
586BITS 64
587ALIGNCODE(16)
588NAME(IDEnter64Mode):
589 DEBUG64_CHAR('6')
590 DEBUG_CMOS_TRASH_AL 1eh
591 jmp [NAME(pICEnterTarget) wrt rip]
592
593; 64-bit jump target
594NAME(pICEnterTarget):
595FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
596dq 0ffffffffffffffffh
597
598; 64-bit pCpum address.
599NAME(pCpumIC):
600FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
601dq 0ffffffffffffffffh
602
603%ifdef VBOX_WITH_CRASHDUMP_MAGIC
604NAME(pMarker):
605db 'Switch_marker'
606%endif
607
608 ;
609 ; When we arrive here we're in 64 bits mode in the intermediate context
610 ;
611ALIGNCODE(16)
612GLOBALNAME ICEnterTarget
613 ;DEBUG_CMOS_TRASH_AL 1fh
614 ; Load CPUM pointer into rdx
615 mov rdx, [NAME(pCpumIC) wrt rip]
616 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
617
618 mov rax, cs
619 mov ds, rax
620 mov es, rax
621
622 ; Invalidate fs & gs
623 mov rax, 0
624 mov fs, rax
625 mov gs, rax
626
627%ifdef VBOX_WITH_CRASHDUMP_MAGIC
628 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
629%endif
630
631 ; Setup stack.
632 DEBUG64_CHAR('7')
633 mov rsp, 0
634 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
635 mov ss, ax
636 mov esp, [rdx + CPUMCPU.Hyper.esp]
637
638%ifdef VBOX_WITH_CRASHDUMP_MAGIC
639 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
640%endif
641
642%ifdef VBOX_WITH_64ON32_IDT
643 ; Set up emergency trap handlers.
644 lidt [rdx + CPUMCPU.Hyper.idtr]
645%endif
646
647 ; load the hypervisor function address
648 mov r9, [rdx + CPUMCPU.Hyper.eip]
649 DEBUG64_S_CHAR('8')
650
651 ; Check if we need to restore the guest FPU state
652 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
653 test esi, CPUM_SYNC_FPU_STATE
654 jz near htg_fpu_no
655
656%ifdef VBOX_WITH_CRASHDUMP_MAGIC
657 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
658%endif
659
660 mov rax, cr0
661 mov rcx, rax ; save old CR0
662 and rax, ~(X86_CR0_TS | X86_CR0_EM)
663 mov cr0, rax
664 ; Use explicit REX prefix. See @bugref{6398}.
665 o64 fxrstor [rdx + CPUMCPU.Guest.fpu]
666 mov cr0, rcx ; and restore old CR0 again
667
668 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
669
670htg_fpu_no:
671 ; Check if we need to restore the guest debug state
672 test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
673 jz htg_debug_done
674
675%ifdef VBOX_WITH_CRASHDUMP_MAGIC
676 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
677%endif
678 test esi, CPUM_SYNC_DEBUG_REGS_HYPER
679 jnz htg_debug_hyper
680
681 ; Guest values in DRx, letting the guest access them directly.
682 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
683 mov dr0, rax
684 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
685 mov dr1, rax
686 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
687 mov dr2, rax
688 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
689 mov dr3, rax
690 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
691 mov dr6, rax ; not required for AMD-V
692
693 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST
694 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST
695 jmp htg_debug_done
696
697htg_debug_hyper:
698 ; Combined values in DRx, intercepting all accesses.
699 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
700 mov dr0, rax
701 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
702 mov dr1, rax
703 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
704 mov dr2, rax
705 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
706 mov dr3, rax
707 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
708 mov dr6, rax ; not required for AMD-V
709
710 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER
711 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
712
713htg_debug_done:
714
715%ifdef VBOX_WITH_CRASHDUMP_MAGIC
716 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
717%endif
718
719 ;
720 ; "Call" the specified helper function.
721 ;
722
723 ; parameter for all helper functions (pCtx)
724 DEBUG64_CHAR('9')
725 lea rsi, [rdx + CPUMCPU.Guest.fpu]
726 lea rax, [htg_return wrt rip]
727 push rax ; return address
728
729 cmp r9d, HM64ON32OP_VMXRCStartVM64
730 jz NAME(VMXRCStartVM64)
731 cmp r9d, HM64ON32OP_SVMRCVMRun64
732 jz NAME(SVMRCVMRun64)
733 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
734 jz NAME(HMRCSaveGuestFPU64)
735 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
736 jz NAME(HMRCSaveGuestDebug64)
737 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
738 jz NAME(HMRCTestSwitcher64)
739 mov eax, VERR_HM_INVALID_HM64ON32OP
740htg_return:
741 DEBUG64_CHAR('r')
742
743 ; Load CPUM pointer into rdx
744 mov rdx, [NAME(pCpumIC) wrt rip]
745 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
746
747%ifdef VBOX_WITH_CRASHDUMP_MAGIC
748 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
749%endif
750
751 ; Save the return code
752 mov dword [rdx + CPUMCPU.u32RetCode], eax
753
754 ; now let's switch back
755 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
756
757ENDPROC vmmR0ToRawModeAsm
758
759
760
761
762;
763;
764; HM code (used to be HMRCA.asm at one point).
765; HM code (used to be HMRCA.asm at one point).
766; HM code (used to be HMRCA.asm at one point).
767;
768;
769
770;; @def MYPUSHSEGS
771; Macro saving all segment registers on the stack.
772; @param 1 full width register name
773%macro MYPUSHSEGS 1
774 mov %1, es
775 push %1
776 mov %1, ds
777 push %1
778%endmacro
779
780;; @def MYPOPSEGS
781; Macro restoring all segment registers on the stack
782; @param 1 full width register name
783%macro MYPOPSEGS 1
784 pop %1
785 mov ds, %1
786 pop %1
787 mov es, %1
788%endmacro
789
790
791;/**
792; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
793; *
794; * @returns VBox status code
795; * @param HCPhysCpuPage VMXON physical address [rsp+8]
796; * @param HCPhysVmcs VMCS physical address [rsp+16]
797; * @param pCache VMCS cache [rsp+24]
798; * @param pCtx Guest context (rsi)
799; */
800BEGINPROC VMXRCStartVM64
801 push rbp
802 mov rbp, rsp
803 DEBUG_CMOS_STACK64 20h
804
805 ; Make sure VT-x instructions are allowed.
806 mov rax, cr4
807 or rax, X86_CR4_VMXE
808 mov cr4, rax
809
810 ; Enter VMX Root Mode.
811 vmxon [rbp + 8 + 8]
812 jnc .vmxon_success
813 mov rax, VERR_VMX_INVALID_VMXON_PTR
814 jmp .vmstart64_vmxon_failed
815
816.vmxon_success:
817 jnz .vmxon_success2
818 mov rax, VERR_VMX_VMXON_FAILED
819 jmp .vmstart64_vmxon_failed
820
821.vmxon_success2:
822 ; Activate the VMCS pointer
823 vmptrld [rbp + 16 + 8]
824 jnc .vmptrld_success
825 mov rax, VERR_VMX_INVALID_VMCS_PTR
826 jmp .vmstart64_vmxoff_end
827
828.vmptrld_success:
829 jnz .vmptrld_success2
830 mov rax, VERR_VMX_VMPTRLD_FAILED
831 jmp .vmstart64_vmxoff_end
832
833.vmptrld_success2:
834
835 ; Save the VMCS pointer on the stack
836 push qword [rbp + 16 + 8];
837
838 ; Save segment registers.
839 MYPUSHSEGS rax
840
841%ifdef VMX_USE_CACHED_VMCS_ACCESSES
842 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
843 mov rbx, [rbp + 24 + 8] ; pCache
844
845 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
846 mov qword [rbx + VMCSCACHE.uPos], 2
847 %endif
848
849 %ifdef DEBUG
850 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
851 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
852 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
853 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
854 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
855 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
856 %endif
857
858 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
859 cmp ecx, 0
860 je .no_cached_writes
861 mov rdx, rcx
862 mov rcx, 0
863 jmp .cached_write
864
865ALIGN(16)
866.cached_write:
867 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
868 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
869 inc rcx
870 cmp rcx, rdx
871 jl .cached_write
872
873 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
874.no_cached_writes:
875
876 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
877 mov qword [rbx + VMCSCACHE.uPos], 3
878 %endif
879 ; Save the pCache pointer.
880 push rbx
881%endif
882
883 ; Save the host state that's relevant in the temporary 64-bit mode.
884 mov rdx, cr0
885 mov eax, VMX_VMCS_HOST_CR0
886 vmwrite rax, rdx
887
888 mov rdx, cr3
889 mov eax, VMX_VMCS_HOST_CR3
890 vmwrite rax, rdx
891
892 mov rdx, cr4
893 mov eax, VMX_VMCS_HOST_CR4
894 vmwrite rax, rdx
895
896 mov rdx, cs
897 mov eax, VMX_VMCS_HOST_FIELD_CS
898 vmwrite rax, rdx
899
900 mov rdx, ss
901 mov eax, VMX_VMCS_HOST_FIELD_SS
902 vmwrite rax, rdx
903
904%if 0 ; Another experiment regarding tripple faults... Seems not to be necessary.
905 sub rsp, 16
906 str [rsp]
907 movsx rdx, word [rsp]
908 mov eax, VMX_VMCS_HOST_FIELD_TR
909 vmwrite rax, rdx
910 add rsp, 16
911%endif
912
913 sub rsp, 16
914 sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.)
915 mov eax, VMX_VMCS_HOST_GDTR_BASE
916 vmwrite rax, [rsp + 6 + 2]
917 add rsp, 16
918
919%ifdef VBOX_WITH_64ON32_IDT
920 sub rsp, 16
921 sidt [rsp + 6]
922 mov eax, VMX_VMCS_HOST_IDTR_BASE
923 vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work?
924 add rsp, 16
925 ;call NAME(vmm64On32PrintIdtr)
926%endif
927
928%ifdef VBOX_WITH_CRASHDUMP_MAGIC
929 mov qword [rbx + VMCSCACHE.uPos], 4
930%endif
931
932 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
933
934 ; First we have to save some final CPU context registers.
935 lea rdx, [.vmlaunch64_done wrt rip]
936 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
937 vmwrite rax, rdx
938 ; Note: assumes success!
939
940 ; Manual save and restore:
941 ; - General purpose registers except RIP, RSP
942 ;
943 ; Trashed:
944 ; - CR2 (we don't care)
945 ; - LDTR (reset to 0)
946 ; - DRx (presumably not changed at all)
947 ; - DR7 (reset to 0x400)
948 ; - EFLAGS (reset to RT_BIT(1); not relevant)
949
950%ifdef VBOX_WITH_CRASHDUMP_MAGIC
951 mov qword [rbx + VMCSCACHE.uPos], 5
952%endif
953
954 ; Save the pCtx pointer
955 push rsi
956
957 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
958 mov rbx, qword [rsi + CPUMCTX.cr2]
959 mov rdx, cr2
960 cmp rdx, rbx
961 je .skipcr2write64
962 mov cr2, rbx
963
964.skipcr2write64:
965 mov eax, VMX_VMCS_HOST_RSP
966 vmwrite rax, rsp
967 ; Note: assumes success!
968 ; Don't mess with ESP anymore!!!
969
970 ; Save Guest's general purpose registers.
971 mov rax, qword [rsi + CPUMCTX.eax]
972 mov rbx, qword [rsi + CPUMCTX.ebx]
973 mov rcx, qword [rsi + CPUMCTX.ecx]
974 mov rdx, qword [rsi + CPUMCTX.edx]
975 mov rbp, qword [rsi + CPUMCTX.ebp]
976 mov r8, qword [rsi + CPUMCTX.r8]
977 mov r9, qword [rsi + CPUMCTX.r9]
978 mov r10, qword [rsi + CPUMCTX.r10]
979 mov r11, qword [rsi + CPUMCTX.r11]
980 mov r12, qword [rsi + CPUMCTX.r12]
981 mov r13, qword [rsi + CPUMCTX.r13]
982 mov r14, qword [rsi + CPUMCTX.r14]
983 mov r15, qword [rsi + CPUMCTX.r15]
984
985 ; Save rdi & rsi.
986 mov rdi, qword [rsi + CPUMCTX.edi]
987 mov rsi, qword [rsi + CPUMCTX.esi]
988
989 vmlaunch
990 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
991
992ALIGNCODE(16)
993.vmlaunch64_done:
994%if 0 ;fixme later - def VBOX_WITH_64ON32_IDT
995 push rdx
996 mov rdx, [rsp + 8] ; pCtx
997 lidt [rdx + CPUMCPU.Hyper.idtr]
998 pop rdx
999%endif
1000 jc near .vmstart64_invalid_vmcs_ptr
1001 jz near .vmstart64_start_failed
1002
1003 push rdi
1004 mov rdi, [rsp + 8] ; pCtx
1005
1006 mov qword [rdi + CPUMCTX.eax], rax
1007 mov qword [rdi + CPUMCTX.ebx], rbx
1008 mov qword [rdi + CPUMCTX.ecx], rcx
1009 mov qword [rdi + CPUMCTX.edx], rdx
1010 mov qword [rdi + CPUMCTX.esi], rsi
1011 mov qword [rdi + CPUMCTX.ebp], rbp
1012 mov qword [rdi + CPUMCTX.r8], r8
1013 mov qword [rdi + CPUMCTX.r9], r9
1014 mov qword [rdi + CPUMCTX.r10], r10
1015 mov qword [rdi + CPUMCTX.r11], r11
1016 mov qword [rdi + CPUMCTX.r12], r12
1017 mov qword [rdi + CPUMCTX.r13], r13
1018 mov qword [rdi + CPUMCTX.r14], r14
1019 mov qword [rdi + CPUMCTX.r15], r15
1020 mov rax, cr2
1021 mov qword [rdi + CPUMCTX.cr2], rax
1022
1023 pop rax ; The guest edi we pushed above
1024 mov qword [rdi + CPUMCTX.edi], rax
1025
1026 pop rsi ; pCtx (needed in rsi by the macros below)
1027
1028%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1029 pop rdi ; Saved pCache
1030
1031 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1032 mov dword [rdi + VMCSCACHE.uPos], 7
1033 %endif
1034 %ifdef DEBUG
1035 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1036 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1037 mov rax, cr8
1038 mov [rdi + VMCSCACHE.TestOut.cr8], rax
1039 %endif
1040
1041 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
1042 cmp ecx, 0 ; Can't happen
1043 je .no_cached_reads
1044 jmp .cached_read
1045
1046ALIGN(16)
1047.cached_read:
1048 dec rcx
1049 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
1050 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
1051 cmp rcx, 0
1052 jnz .cached_read
1053.no_cached_reads:
1054 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1055 mov dword [rdi + VMCSCACHE.uPos], 8
1056 %endif
1057%endif
1058
1059 ; Restore segment registers.
1060 MYPOPSEGS rax
1061
1062 mov eax, VINF_SUCCESS
1063
1064%ifdef VBOX_WITH_CRASHDUMP_MAGIC
1065 mov dword [rdi + VMCSCACHE.uPos], 9
1066%endif
1067.vmstart64_end:
1068
1069%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1070 %ifdef DEBUG
1071 mov rdx, [rsp] ; HCPhysVmcs
1072 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
1073 %endif
1074%endif
1075
1076 ; Write back the data and disable the VMCS.
1077 vmclear qword [rsp] ; Pushed pVMCS
1078 add rsp, 8
1079
1080.vmstart64_vmxoff_end:
1081 ; Disable VMX root mode.
1082 vmxoff
1083.vmstart64_vmxon_failed:
1084%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1085 %ifdef DEBUG
1086 cmp eax, VINF_SUCCESS
1087 jne .skip_flags_save
1088
1089 pushf
1090 pop rdx
1091 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
1092 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1093 mov dword [rdi + VMCSCACHE.uPos], 12
1094 %endif
1095.skip_flags_save:
1096 %endif
1097%endif
1098 pop rbp
1099 ret
1100
1101
1102.vmstart64_invalid_vmcs_ptr:
1103 pop rsi ; pCtx (needed in rsi by the macros below)
1104
1105%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1106 pop rdi ; pCache
1107 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1108 mov dword [rdi + VMCSCACHE.uPos], 10
1109 %endif
1110
1111 %ifdef DEBUG
1112 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1113 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1114 %endif
1115%endif
1116
1117 ; Restore segment registers.
1118 MYPOPSEGS rax
1119
1120 ; Restore all general purpose host registers.
1121 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1122 jmp .vmstart64_end
1123
1124.vmstart64_start_failed:
1125 pop rsi ; pCtx (needed in rsi by the macros below)
1126
1127%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1128 pop rdi ; pCache
1129
1130 %ifdef DEBUG
1131 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1132 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1133 %endif
1134 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1135 mov dword [rdi + VMCSCACHE.uPos], 11
1136 %endif
1137%endif
1138
1139 ; Restore segment registers.
1140 MYPOPSEGS rax
1141
1142 ; Restore all general purpose host registers.
1143 mov eax, VERR_VMX_UNABLE_TO_START_VM
1144 jmp .vmstart64_end
1145ENDPROC VMXRCStartVM64
1146
1147
1148;/**
1149; * Prepares for and executes VMRUN (64 bits guests)
1150; *
1151; * @returns VBox status code
1152; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
1153; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
1154; * @param pCtx Guest context (rsi)
1155; */
1156BEGINPROC SVMRCVMRun64
1157 push rbp
1158 mov rbp, rsp
1159 pushf
1160 DEBUG_CMOS_STACK64 30h
1161
1162 ; Manual save and restore:
1163 ; - General purpose registers except RIP, RSP, RAX
1164 ;
1165 ; Trashed:
1166 ; - CR2 (we don't care)
1167 ; - LDTR (reset to 0)
1168 ; - DRx (presumably not changed at all)
1169 ; - DR7 (reset to 0x400)
1170
1171 ; Save the Guest CPU context pointer.
1172 push rsi ; Push for saving the state at the end
1173
1174 ; Save host fs, gs, sysenter msr etc
1175 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
1176 push rax ; Save for the vmload after vmrun
1177 vmsave
1178
1179 ; Setup eax for VMLOAD
1180 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
1181
1182 ; Restore Guest's general purpose registers.
1183 ; rax is loaded from the VMCB by VMRUN.
1184 mov rbx, qword [rsi + CPUMCTX.ebx]
1185 mov rcx, qword [rsi + CPUMCTX.ecx]
1186 mov rdx, qword [rsi + CPUMCTX.edx]
1187 mov rdi, qword [rsi + CPUMCTX.edi]
1188 mov rbp, qword [rsi + CPUMCTX.ebp]
1189 mov r8, qword [rsi + CPUMCTX.r8]
1190 mov r9, qword [rsi + CPUMCTX.r9]
1191 mov r10, qword [rsi + CPUMCTX.r10]
1192 mov r11, qword [rsi + CPUMCTX.r11]
1193 mov r12, qword [rsi + CPUMCTX.r12]
1194 mov r13, qword [rsi + CPUMCTX.r13]
1195 mov r14, qword [rsi + CPUMCTX.r14]
1196 mov r15, qword [rsi + CPUMCTX.r15]
1197 mov rsi, qword [rsi + CPUMCTX.esi]
1198
1199 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1200 clgi
1201 sti
1202
1203 ; Load guest fs, gs, sysenter msr etc
1204 vmload
1205 ; Run the VM
1206 vmrun
1207
1208 ; rax is in the VMCB already; we can use it here.
1209
1210 ; Save guest fs, gs, sysenter msr etc.
1211 vmsave
1212
1213 ; Load host fs, gs, sysenter msr etc.
1214 pop rax ; Pushed above
1215 vmload
1216
1217 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1218 cli
1219 stgi
1220
1221 pop rax ; pCtx
1222
1223 mov qword [rax + CPUMCTX.ebx], rbx
1224 mov qword [rax + CPUMCTX.ecx], rcx
1225 mov qword [rax + CPUMCTX.edx], rdx
1226 mov qword [rax + CPUMCTX.esi], rsi
1227 mov qword [rax + CPUMCTX.edi], rdi
1228 mov qword [rax + CPUMCTX.ebp], rbp
1229 mov qword [rax + CPUMCTX.r8], r8
1230 mov qword [rax + CPUMCTX.r9], r9
1231 mov qword [rax + CPUMCTX.r10], r10
1232 mov qword [rax + CPUMCTX.r11], r11
1233 mov qword [rax + CPUMCTX.r12], r12
1234 mov qword [rax + CPUMCTX.r13], r13
1235 mov qword [rax + CPUMCTX.r14], r14
1236 mov qword [rax + CPUMCTX.r15], r15
1237
1238 mov eax, VINF_SUCCESS
1239
1240 popf
1241 pop rbp
1242 ret
1243ENDPROC SVMRCVMRun64
1244
1245;/**
1246; * Saves the guest FPU context
1247; *
1248; * @returns VBox status code
1249; * @param pCtx Guest context [rsi]
1250; */
1251BEGINPROC HMRCSaveGuestFPU64
1252 DEBUG_CMOS_STACK64 40h
1253 mov rax, cr0
1254 mov rcx, rax ; save old CR0
1255 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1256 mov cr0, rax
1257
1258 ; Use explicit REX prefix. See @bugref{6398}.
1259 o64 fxsave [rsi + CPUMCTX.fpu]
1260
1261 mov cr0, rcx ; and restore old CR0 again
1262
1263 mov eax, VINF_SUCCESS
1264 ret
1265ENDPROC HMRCSaveGuestFPU64
1266
1267;/**
1268; * Saves the guest debug context (DR0-3, DR6)
1269; *
1270; * @returns VBox status code
1271; * @param pCtx Guest context [rsi]
1272; */
1273BEGINPROC HMRCSaveGuestDebug64
1274 DEBUG_CMOS_STACK64 41h
1275 mov rax, dr0
1276 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1277 mov rax, dr1
1278 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1279 mov rax, dr2
1280 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1281 mov rax, dr3
1282 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1283 mov rax, dr6
1284 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1285 mov eax, VINF_SUCCESS
1286 ret
1287ENDPROC HMRCSaveGuestDebug64
1288
1289;/**
1290; * Dummy callback handler
1291; *
1292; * @returns VBox status code
1293; * @param param1 Parameter 1 [rsp+8]
1294; * @param param2 Parameter 2 [rsp+12]
1295; * @param param3 Parameter 3 [rsp+16]
1296; * @param param4 Parameter 4 [rsp+20]
1297; * @param param5 Parameter 5 [rsp+24]
1298; * @param pCtx Guest context [rsi]
1299; */
1300BEGINPROC HMRCTestSwitcher64
1301 DEBUG_CMOS_STACK64 42h
1302 mov eax, [rsp+8]
1303 ret
1304ENDPROC HMRCTestSwitcher64
1305
1306
1307%ifdef VBOX_WITH_64ON32_IDT
1308;
1309; Trap handling.
1310;
1311
1312;; Here follows an array of trap handler entry points, 8 byte in size.
1313BEGINPROC vmm64On32TrapHandlers
1314%macro vmm64On32TrapEntry 1
1315GLOBALNAME vmm64On32Trap %+ i
1316 db 06ah, i ; push imm8 - note that this is a signextended value.
1317 jmp NAME(%1)
1318 ALIGNCODE(8)
1319%assign i i+1
1320%endmacro
1321%assign i 0 ; start counter.
1322 vmm64On32TrapEntry vmm64On32Trap ; 0
1323 vmm64On32TrapEntry vmm64On32Trap ; 1
1324 vmm64On32TrapEntry vmm64On32Trap ; 2
1325 vmm64On32TrapEntry vmm64On32Trap ; 3
1326 vmm64On32TrapEntry vmm64On32Trap ; 4
1327 vmm64On32TrapEntry vmm64On32Trap ; 5
1328 vmm64On32TrapEntry vmm64On32Trap ; 6
1329 vmm64On32TrapEntry vmm64On32Trap ; 7
1330 vmm64On32TrapEntry vmm64On32TrapErrCode ; 8
1331 vmm64On32TrapEntry vmm64On32Trap ; 9
1332 vmm64On32TrapEntry vmm64On32TrapErrCode ; a
1333 vmm64On32TrapEntry vmm64On32TrapErrCode ; b
1334 vmm64On32TrapEntry vmm64On32TrapErrCode ; c
1335 vmm64On32TrapEntry vmm64On32TrapErrCode ; d
1336 vmm64On32TrapEntry vmm64On32TrapErrCode ; e
1337 vmm64On32TrapEntry vmm64On32Trap ; f (reserved)
1338 vmm64On32TrapEntry vmm64On32Trap ; 10
1339 vmm64On32TrapEntry vmm64On32TrapErrCode ; 11
1340 vmm64On32TrapEntry vmm64On32Trap ; 12
1341 vmm64On32TrapEntry vmm64On32Trap ; 13
1342%rep (0x100 - 0x14)
1343 vmm64On32TrapEntry vmm64On32Trap
1344%endrep
1345ENDPROC vmm64On32TrapHandlers
1346
1347;; Fake an error code and jump to the real thing.
1348BEGINPROC vmm64On32Trap
1349 push qword [rsp]
1350 jmp NAME(vmm64On32TrapErrCode)
1351ENDPROC vmm64On32Trap
1352
1353
1354;;
1355; Trap frame:
1356; [rbp + 38h] = ss
1357; [rbp + 30h] = rsp
1358; [rbp + 28h] = eflags
1359; [rbp + 20h] = cs
1360; [rbp + 18h] = rip
1361; [rbp + 10h] = error code (or trap number)
1362; [rbp + 08h] = trap number
1363; [rbp + 00h] = rbp
1364; [rbp - 08h] = rax
1365; [rbp - 10h] = rbx
1366; [rbp - 18h] = ds
1367;
1368BEGINPROC vmm64On32TrapErrCode
1369 push rbp
1370 mov rbp, rsp
1371 push rax
1372 push rbx
1373 mov ax, ds
1374 push rax
1375 sub rsp, 20h
1376
1377 mov ax, cs
1378 mov ds, ax
1379
1380%if 1
1381 COM64_S_NEWLINE
1382 COM64_S_CHAR '!'
1383 COM64_S_CHAR 't'
1384 COM64_S_CHAR 'r'
1385 COM64_S_CHAR 'a'
1386 COM64_S_CHAR 'p'
1387 movzx eax, byte [rbp + 08h]
1388 COM64_S_DWORD_REG eax
1389 COM64_S_CHAR '!'
1390%endif
1391
1392%if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM
1393 sidt [rsp]
1394 movsx eax, word [rsp]
1395 shr eax, 12 ; div by 16 * 256 (0x1000).
1396%else
1397 ; hardcoded VCPU(0) for now...
1398 mov rbx, [NAME(pCpumIC) wrt rip]
1399 mov eax, [rbx + CPUM.offCPUMCPU0]
1400%endif
1401 push rax ; Save the offset for rbp later.
1402
1403 add rbx, rax ; rbx = CPUMCPU
1404
1405 ;
1406 ; Deal with recursive traps due to vmxoff (lazy bird).
1407 ;
1408 lea rax, [.vmxoff_trap_location wrt rip]
1409 cmp rax, [rbp + 18h]
1410 je .not_vmx_root
1411
1412 ;
1413 ; Save the context.
1414 ;
1415 mov rax, [rbp - 8]
1416 mov [rbx + CPUMCPU.Hyper.eax], rax
1417 mov [rbx + CPUMCPU.Hyper.ecx], rcx
1418 mov [rbx + CPUMCPU.Hyper.edx], rdx
1419 mov rax, [rbp - 10h]
1420 mov [rbx + CPUMCPU.Hyper.ebx], rax
1421 mov rax, [rbp]
1422 mov [rbx + CPUMCPU.Hyper.ebp], rax
1423 mov rax, [rbp + 30h]
1424 mov [rbx + CPUMCPU.Hyper.esp], rax
1425 mov [rbx + CPUMCPU.Hyper.edi], rdi
1426 mov [rbx + CPUMCPU.Hyper.esi], rsi
1427 mov [rbx + CPUMCPU.Hyper.r8], r8
1428 mov [rbx + CPUMCPU.Hyper.r9], r9
1429 mov [rbx + CPUMCPU.Hyper.r10], r10
1430 mov [rbx + CPUMCPU.Hyper.r11], r11
1431 mov [rbx + CPUMCPU.Hyper.r12], r12
1432 mov [rbx + CPUMCPU.Hyper.r13], r13
1433 mov [rbx + CPUMCPU.Hyper.r14], r14
1434 mov [rbx + CPUMCPU.Hyper.r15], r15
1435
1436 mov rax, [rbp + 18h]
1437 mov [rbx + CPUMCPU.Hyper.eip], rax
1438 movzx ax, [rbp + 20h]
1439 mov [rbx + CPUMCPU.Hyper.cs.Sel], ax
1440 mov ax, [rbp + 38h]
1441 mov [rbx + CPUMCPU.Hyper.ss.Sel], ax
1442 mov ax, [rbp - 18h]
1443 mov [rbx + CPUMCPU.Hyper.ds.Sel], ax
1444
1445 mov rax, [rbp + 28h]
1446 mov [rbx + CPUMCPU.Hyper.eflags], rax
1447
1448 mov rax, cr2
1449 mov [rbx + CPUMCPU.Hyper.cr2], rax
1450
1451 mov rax, [rbp + 10h]
1452 mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code
1453 movzx eax, byte [rbp + 08h]
1454 mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number
1455
1456 ;
1457 ; Finally, leave VMX root operation before trying to return to the host.
1458 ;
1459 mov rax, cr4
1460 test rax, X86_CR4_VMXE
1461 jz .not_vmx_root
1462.vmxoff_trap_location:
1463 vmxoff
1464.not_vmx_root:
1465
1466 ;
1467 ; Go back to the host.
1468 ;
1469 pop rbp
1470 mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC
1471 jmp NAME(vmmRCToHostAsm)
1472ENDPROC vmm64On32TrapErrCode
1473
1474;; We allocate the IDT here to avoid having to allocate memory separately somewhere.
1475ALIGNCODE(16)
1476GLOBALNAME vmm64On32Idt
1477%assign i 0
1478%rep 256
1479 dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets.
1480 dq 0
1481%assign i (i + 1)
1482%endrep
1483
1484
1485 %if 0
1486;; For debugging purposes.
1487BEGINPROC vmm64On32PrintIdtr
1488 push rax
1489 push rsi ; paranoia
1490 push rdi ; ditto
1491 sub rsp, 16
1492
1493 COM64_S_CHAR ';'
1494 COM64_S_CHAR 'i'
1495 COM64_S_CHAR 'd'
1496 COM64_S_CHAR 't'
1497 COM64_S_CHAR 'r'
1498 COM64_S_CHAR '='
1499 sidt [rsp + 6]
1500 mov eax, [rsp + 8 + 4]
1501 COM64_S_DWORD_REG eax
1502 mov eax, [rsp + 8]
1503 COM64_S_DWORD_REG eax
1504 COM64_S_CHAR ':'
1505 movzx eax, word [rsp + 6]
1506 COM64_S_DWORD_REG eax
1507 COM64_S_CHAR '!'
1508
1509 add rsp, 16
1510 pop rdi
1511 pop rsi
1512 pop rax
1513 ret
1514ENDPROC vmm64On32PrintIdtr
1515 %endif
1516
1517 %if 1
1518;; For debugging purposes.
1519BEGINPROC vmm64On32DumpCmos
1520 push rax
1521 push rdx
1522 push rcx
1523 push rsi ; paranoia
1524 push rdi ; ditto
1525 sub rsp, 16
1526
1527%if 0
1528 mov al, 3
1529 out 72h, al
1530 mov al, 68h
1531 out 73h, al
1532%endif
1533
1534 COM64_S_NEWLINE
1535 COM64_S_CHAR 'c'
1536 COM64_S_CHAR 'm'
1537 COM64_S_CHAR 'o'
1538 COM64_S_CHAR 's'
1539 COM64_S_CHAR '0'
1540 COM64_S_CHAR ':'
1541
1542 xor ecx, ecx
1543.loop1:
1544 mov al, cl
1545 out 70h, al
1546 in al, 71h
1547 COM64_S_BYTE_REG eax
1548 COM64_S_CHAR ' '
1549 inc ecx
1550 cmp ecx, 128
1551 jb .loop1
1552
1553 COM64_S_NEWLINE
1554 COM64_S_CHAR 'c'
1555 COM64_S_CHAR 'm'
1556 COM64_S_CHAR 'o'
1557 COM64_S_CHAR 's'
1558 COM64_S_CHAR '1'
1559 COM64_S_CHAR ':'
1560 xor ecx, ecx
1561.loop2:
1562 mov al, cl
1563 out 72h, al
1564 in al, 73h
1565 COM64_S_BYTE_REG eax
1566 COM64_S_CHAR ' '
1567 inc ecx
1568 cmp ecx, 128
1569 jb .loop2
1570
1571%if 0
1572 COM64_S_NEWLINE
1573 COM64_S_CHAR 'c'
1574 COM64_S_CHAR 'm'
1575 COM64_S_CHAR 'o'
1576 COM64_S_CHAR 's'
1577 COM64_S_CHAR '2'
1578 COM64_S_CHAR ':'
1579 xor ecx, ecx
1580.loop3:
1581 mov al, cl
1582 out 74h, al
1583 in al, 75h
1584 COM64_S_BYTE_REG eax
1585 COM64_S_CHAR ' '
1586 inc ecx
1587 cmp ecx, 128
1588 jb .loop3
1589
1590 COM64_S_NEWLINE
1591 COM64_S_CHAR 'c'
1592 COM64_S_CHAR 'm'
1593 COM64_S_CHAR 'o'
1594 COM64_S_CHAR 's'
1595 COM64_S_CHAR '3'
1596 COM64_S_CHAR ':'
1597 xor ecx, ecx
1598.loop4:
1599 mov al, cl
1600 out 72h, al
1601 in al, 73h
1602 COM64_S_BYTE_REG eax
1603 COM64_S_CHAR ' '
1604 inc ecx
1605 cmp ecx, 128
1606 jb .loop4
1607
1608 COM64_S_NEWLINE
1609%endif
1610
1611 add rsp, 16
1612 pop rdi
1613 pop rsi
1614 pop rcx
1615 pop rdx
1616 pop rax
1617 ret
1618ENDPROC vmm64On32DumpCmos
1619 %endif
1620
1621%endif ; VBOX_WITH_64ON32_IDT
1622
1623
1624
1625;
1626;
1627; Back to switcher code.
1628; Back to switcher code.
1629; Back to switcher code.
1630;
1631;
1632
1633
1634
1635;;
1636; Trampoline for doing a call when starting the hyper visor execution.
1637;
1638; Push any arguments to the routine.
1639; Push the argument frame size (cArg * 4).
1640; Push the call target (_cdecl convention).
1641; Push the address of this routine.
1642;
1643;
1644BITS 64
1645ALIGNCODE(16)
1646BEGINPROC vmmRCCallTrampoline
1647%ifdef DEBUG_STUFF
1648 COM64_S_CHAR 'c'
1649 COM64_S_CHAR 't'
1650 COM64_S_CHAR '!'
1651%endif
1652 int3
1653ENDPROC vmmRCCallTrampoline
1654
1655
1656;;
1657; The C interface.
1658;
1659BITS 64
1660ALIGNCODE(16)
1661BEGINPROC vmmRCToHost
1662%ifdef DEBUG_STUFF
1663 push rsi
1664 COM_NEWLINE
1665 COM_CHAR 'b'
1666 COM_CHAR 'a'
1667 COM_CHAR 'c'
1668 COM_CHAR 'k'
1669 COM_CHAR '!'
1670 COM_NEWLINE
1671 pop rsi
1672%endif
1673 int3
1674ENDPROC vmmRCToHost
1675
1676;;
1677; vmmRCToHostAsm
1678;
1679; This is an alternative entry point which we'll be using
1680; when the we have saved the guest state already or we haven't
1681; been messing with the guest at all.
1682;
1683; @param rbp The virtual cpu number.
1684; @param
1685;
1686BITS 64
1687ALIGNCODE(16)
1688BEGINPROC vmmRCToHostAsm
1689NAME(vmmRCToHostAsmNoReturn):
1690 ;; We're still in the intermediate memory context!
1691
1692 ;;
1693 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1694 ;;
1695 jmp far [NAME(fpIDEnterTarget) wrt rip]
1696
1697; 16:32 Pointer to IDEnterTarget.
1698NAME(fpIDEnterTarget):
1699 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1700dd 0
1701 FIXUP FIX_HYPER_CS, 0
1702dd 0
1703
1704 ; We're now on identity mapped pages!
1705ALIGNCODE(16)
1706GLOBALNAME IDExitTarget
1707BITS 32
1708 DEBUG32_CHAR('1')
1709
1710 ; 1. Deactivate long mode by turning off paging.
1711 mov ebx, cr0
1712 and ebx, ~X86_CR0_PG
1713 mov cr0, ebx
1714 DEBUG32_CHAR('2')
1715
1716 ; 2. Load intermediate page table.
1717 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1718 mov edx, 0ffffffffh
1719 mov cr3, edx
1720 DEBUG32_CHAR('3')
1721
1722 ; 3. Disable long mode.
1723 mov ecx, MSR_K6_EFER
1724 rdmsr
1725 DEBUG32_CHAR('5')
1726 and eax, ~(MSR_K6_EFER_LME)
1727 wrmsr
1728 DEBUG32_CHAR('6')
1729
1730%ifndef NEED_PAE_ON_HOST
1731 ; 3b. Disable PAE.
1732 mov eax, cr4
1733 and eax, ~X86_CR4_PAE
1734 mov cr4, eax
1735 DEBUG32_CHAR('7')
1736%endif
1737
1738 ; 4. Enable paging.
1739 or ebx, X86_CR0_PG
1740 mov cr0, ebx
1741 jmp short just_a_jump
1742just_a_jump:
1743 DEBUG32_CHAR('8')
1744
1745 ;;
1746 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1747 ;;
1748 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1749 jmp near NAME(ICExitTarget)
1750
1751 ;;
1752 ;; When we arrive at this label we're at the host mapping of the
1753 ;; switcher code, but with intermediate page tables.
1754 ;;
1755BITS 32
1756ALIGNCODE(16)
1757GLOBALNAME ICExitTarget
1758 DEBUG32_CHAR('9')
1759 ;DEBUG_CMOS_TRASH_AL 70h
1760
1761 ; load the hypervisor data selector into ds & es
1762 FIXUP FIX_HYPER_DS, 1
1763 mov eax, 0ffffh
1764 mov ds, eax
1765 mov es, eax
1766 DEBUG32_CHAR('a')
1767
1768 FIXUP FIX_GC_CPUM_OFF, 1, 0
1769 mov edx, 0ffffffffh
1770 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1771
1772 DEBUG32_CHAR('b')
1773 mov esi, [edx + CPUMCPU.Host.cr3]
1774 mov cr3, esi
1775 DEBUG32_CHAR('c')
1776
1777 ;; now we're in host memory context, let's restore regs
1778 FIXUP FIX_HC_CPUM_OFF, 1, 0
1779 mov edx, 0ffffffffh
1780 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1781 DEBUG32_CHAR('e')
1782
1783 ; restore the host EFER
1784 mov ebx, edx
1785 mov ecx, MSR_K6_EFER
1786 mov eax, [ebx + CPUMCPU.Host.efer]
1787 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1788 DEBUG32_CHAR('f')
1789 wrmsr
1790 mov edx, ebx
1791 DEBUG32_CHAR('g')
1792
1793 ; activate host gdt and idt
1794 lgdt [edx + CPUMCPU.Host.gdtr]
1795 DEBUG32_CHAR('0')
1796 lidt [edx + CPUMCPU.Host.idtr]
1797 DEBUG32_CHAR('1')
1798
1799 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1800 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1801 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1802 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1803 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1804 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1805 ltr word [edx + CPUMCPU.Host.tr]
1806
1807 ; activate ldt
1808 DEBUG32_CHAR('2')
1809 lldt [edx + CPUMCPU.Host.ldtr]
1810
1811 ; Restore segment registers
1812 mov eax, [edx + CPUMCPU.Host.ds]
1813 mov ds, eax
1814 mov eax, [edx + CPUMCPU.Host.es]
1815 mov es, eax
1816 mov eax, [edx + CPUMCPU.Host.fs]
1817 mov fs, eax
1818 mov eax, [edx + CPUMCPU.Host.gs]
1819 mov gs, eax
1820 ; restore stack
1821 lss esp, [edx + CPUMCPU.Host.esp]
1822
1823 ; Control registers.
1824 mov ecx, [edx + CPUMCPU.Host.cr4]
1825 mov cr4, ecx
1826 mov ecx, [edx + CPUMCPU.Host.cr0]
1827 mov cr0, ecx
1828 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1829 ;mov cr2, ecx
1830
1831 ; restore general registers.
1832 mov edi, [edx + CPUMCPU.Host.edi]
1833 mov esi, [edx + CPUMCPU.Host.esi]
1834 mov ebx, [edx + CPUMCPU.Host.ebx]
1835 mov ebp, [edx + CPUMCPU.Host.ebp]
1836
1837 ; store the return code in eax
1838 DEBUG_CMOS_TRASH_AL 79h
1839 mov eax, [edx + CPUMCPU.u32RetCode]
1840 retf
1841ENDPROC vmmRCToHostAsm
1842
1843
1844GLOBALNAME End
1845;
1846; The description string (in the text section).
1847;
1848NAME(Description):
1849 db SWITCHER_DESCRIPTION
1850 db 0
1851
1852extern NAME(Relocate)
1853
1854;
1855; End the fixup records.
1856;
1857BEGINDATA
1858 db FIX_THE_END ; final entry.
1859GLOBALNAME FixupsEnd
1860
1861;;
1862; The switcher definition structure.
1863ALIGNDATA(16)
1864GLOBALNAME Def
1865 istruc VMMSWITCHERDEF
1866 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1867 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1868 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1869 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1870 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1871 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1872 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1873 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1874 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1875 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1876 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1877 ; disasm help
1878 at VMMSWITCHERDEF.offHCCode0, dd 0
1879 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1880 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1881 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1882 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1883 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1884 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1885 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
1886%ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT.
1887 at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start)
1888%else
1889 at VMMSWITCHERDEF.offGCCode, dd 0
1890%endif
1891 at VMMSWITCHERDEF.cbGCCode, dd 0
1892
1893 iend
1894
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette