VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 52224

Last change on this file since 52224 was 49528, checked in by vboxsync, 11 years ago

VMM/VMMSwitcher: Use explicit REX.W prefix for fxsave/fxrstor for 64-bit guests.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 49.4 KB
Line 
1; $Id: LegacyandAMD64.mac 49528 2013-11-18 12:46:50Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2013 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.virtualbox.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;;
34; This macro is used for storing a debug code in a CMOS location.
35;
36; If we tripple fault or something, the debug code can be retrieved and we
37; might have a clue as to where the problem occurred. The code is currently
38; using CMOS register 3 in the 2nd bank as this _seems_ to be unused on my
39; Extreme4 X79 asrock mainboard.
40;
41; @param %1 The debug code (byte)
42; @note Trashes AL.
43;
44%macro DEBUG_CMOS_TRASH_AL 1
45%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
46 mov al, 3
47 out 72h, al
48 mov al, %1
49 out 73h, al
50 in al, 73h
51%endif
52%endmacro
53
54;;
55; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
56; doesn't trash any registers.
57;
58%macro DEBUG_CMOS_STACK64 1
59%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
60 push rax
61 DEBUG_CMOS_TRASH_AL %1
62 pop rax
63%endif
64%endmacro
65
66;;
67; Version of DEBUG_CMOS_TRASH_AL that saves AL on the stack and therefore
68; doesn't trash any registers.
69;
70%macro DEBUG_CMOS_STACK32 1
71%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
72 push eax
73 DEBUG_CMOS_TRASH_AL %1
74 pop eax
75%endif
76%endmacro
77
78
79;; Stubs for making OS/2 compile (though, not work).
80%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
81 %macro vmwrite 2,
82 int3
83 %endmacro
84 %define vmlaunch int3
85 %define vmresume int3
86 %define vmsave int3
87 %define vmload int3
88 %define vmrun int3
89 %define clgi int3
90 %define stgi int3
91 %macro invlpga 2,
92 int3
93 %endmacro
94%endif
95
96;; Debug options
97;%define DEBUG_STUFF 1
98;%define STRICT_IF 1
99
100
101;*******************************************************************************
102;* Header Files *
103;*******************************************************************************
104%include "VBox/asmdefs.mac"
105%include "iprt/x86.mac"
106%include "VBox/err.mac"
107%include "VBox/apic.mac"
108
109%include "VBox/vmm/cpum.mac"
110%include "VBox/vmm/stam.mac"
111%include "VBox/vmm/vm.mac"
112%include "VBox/vmm/hm_vmx.mac"
113%include "CPUMInternal.mac"
114%include "HMInternal.mac"
115%include "VMMSwitcher.mac"
116
117
118;
119; Start the fixup records
120; We collect the fixups in the .data section as we go along
121; It is therefore VITAL that no-one is using the .data section
122; for anything else between 'Start' and 'End'.
123;
124BEGINDATA
125GLOBALNAME Fixups
126
127
128
129BEGINCODE
130GLOBALNAME Start
131
132BITS 32
133
134;;
135; The C interface.
136; @param [esp + 04h] Param 1 - VM handle
137; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
138; structure for the calling EMT.
139;
140BEGINPROC vmmR0ToRawMode
141%ifdef DEBUG_STUFF
142 COM32_S_NEWLINE
143 COM32_S_CHAR '^'
144%endif
145
146%ifdef VBOX_WITH_STATISTICS
147 ;
148 ; Switcher stats.
149 ;
150 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
151 mov edx, 0ffffffffh
152 STAM_PROFILE_ADV_START edx
153%endif
154
155 push ebp
156 mov ebp, [esp + 12] ; CPUMCPU offset
157
158 ; turn off interrupts
159 pushf
160 cli
161 ;DEBUG_CMOS_STACK32 10h
162
163 ;
164 ; Call worker.
165 ;
166 FIXUP FIX_HC_CPUM_OFF, 1, 0
167 mov edx, 0ffffffffh
168 push cs ; allow for far return and restore cs correctly.
169 call NAME(vmmR0ToRawModeAsm)
170
171%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
172 ; Restore blocked Local APIC NMI vectors
173 ; Do this here to ensure the host CS is already restored
174 mov ecx, [edx + CPUMCPU.fApicDisVectors]
175 test ecx, ecx
176 jz gth_apic_done
177 cmp byte [edx + CPUMCPU.fX2Apic], 1
178 je gth_x2apic
179
180 mov edx, [edx + CPUMCPU.pvApicBase]
181 shr ecx, 1
182 jnc gth_nolint0
183 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
184gth_nolint0:
185 shr ecx, 1
186 jnc gth_nolint1
187 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
188gth_nolint1:
189 shr ecx, 1
190 jnc gth_nopc
191 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
192gth_nopc:
193 shr ecx, 1
194 jnc gth_notherm
195 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
196gth_notherm:
197 jmp gth_apic_done
198
199gth_x2apic:
200 ;DEBUG_CMOS_STACK32 7ch
201 push eax ; save eax
202 push ebx ; save it for fApicDisVectors
203 push edx ; save edx just in case.
204 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
205 shr ebx, 1
206 jnc gth_x2_nolint0
207 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
208 rdmsr
209 and eax, ~APIC_REG_LVT_MASKED
210 wrmsr
211gth_x2_nolint0:
212 shr ebx, 1
213 jnc gth_x2_nolint1
214 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
215 rdmsr
216 and eax, ~APIC_REG_LVT_MASKED
217 wrmsr
218gth_x2_nolint1:
219 shr ebx, 1
220 jnc gth_x2_nopc
221 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
222 rdmsr
223 and eax, ~APIC_REG_LVT_MASKED
224 wrmsr
225gth_x2_nopc:
226 shr ebx, 1
227 jnc gth_x2_notherm
228 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
229 rdmsr
230 and eax, ~APIC_REG_LVT_MASKED
231 wrmsr
232gth_x2_notherm:
233 pop edx
234 pop ebx
235 pop eax
236
237gth_apic_done:
238%endif
239
240 ; restore original flags
241 ;DEBUG_CMOS_STACK32 7eh
242 popf
243 pop ebp
244
245%ifdef VBOX_WITH_STATISTICS
246 ;
247 ; Switcher stats.
248 ;
249 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
250 mov edx, 0ffffffffh
251 STAM_PROFILE_ADV_STOP edx
252%endif
253
254 ;DEBUG_CMOS_STACK32 7fh
255 ret
256
257ENDPROC vmmR0ToRawMode
258
259; *****************************************************************************
260; vmmR0ToRawModeAsm
261;
262; Phase one of the switch from host to guest context (host MMU context)
263;
264; INPUT:
265; - edx virtual address of CPUM structure (valid in host context)
266; - ebp offset of the CPUMCPU structure relative to CPUM.
267;
268; USES/DESTROYS:
269; - eax, ecx, edx, esi
270;
271; ASSUMPTION:
272; - current CS and DS selectors are wide open
273;
274; *****************************************************************************
275ALIGNCODE(16)
276BEGINPROC vmmR0ToRawModeAsm
277 ;;
278 ;; Save CPU host context
279 ;; Skip eax, edx and ecx as these are not preserved over calls.
280 ;;
281 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
282%ifdef VBOX_WITH_CRASHDUMP_MAGIC
283 ; phys address of scratch page
284 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
285 mov cr2, eax
286
287 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
288%endif
289
290 ; general registers.
291 mov [edx + CPUMCPU.Host.ebx], ebx
292 mov [edx + CPUMCPU.Host.edi], edi
293 mov [edx + CPUMCPU.Host.esi], esi
294 mov [edx + CPUMCPU.Host.esp], esp
295 mov [edx + CPUMCPU.Host.ebp], ebp ; offCpumCpu!
296 ; selectors.
297 mov [edx + CPUMCPU.Host.ds], ds
298 mov [edx + CPUMCPU.Host.es], es
299 mov [edx + CPUMCPU.Host.fs], fs
300 mov [edx + CPUMCPU.Host.gs], gs
301 mov [edx + CPUMCPU.Host.ss], ss
302 ; special registers.
303 DEBUG32_S_CHAR('s')
304 DEBUG32_S_CHAR(';')
305 sldt [edx + CPUMCPU.Host.ldtr]
306 sidt [edx + CPUMCPU.Host.idtr]
307 sgdt [edx + CPUMCPU.Host.gdtr]
308 str [edx + CPUMCPU.Host.tr]
309
310%ifdef VBOX_WITH_CRASHDUMP_MAGIC
311 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
312%endif
313
314%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
315 DEBUG32_S_CHAR('f')
316 DEBUG32_S_CHAR(';')
317 cmp byte [edx + CPUMCPU.pvApicBase], 1
318 je htg_x2apic
319
320 mov ebx, [edx + CPUMCPU.pvApicBase]
321 or ebx, ebx
322 jz htg_apic_done
323 mov eax, [ebx + APIC_REG_LVT_LINT0]
324 mov ecx, eax
325 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
326 cmp ecx, APIC_REG_LVT_MODE_NMI
327 jne htg_nolint0
328 or edi, 0x01
329 or eax, APIC_REG_LVT_MASKED
330 mov [ebx + APIC_REG_LVT_LINT0], eax
331 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
332htg_nolint0:
333 mov eax, [ebx + APIC_REG_LVT_LINT1]
334 mov ecx, eax
335 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
336 cmp ecx, APIC_REG_LVT_MODE_NMI
337 jne htg_nolint1
338 or edi, 0x02
339 or eax, APIC_REG_LVT_MASKED
340 mov [ebx + APIC_REG_LVT_LINT1], eax
341 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
342htg_nolint1:
343 mov eax, [ebx + APIC_REG_LVT_PC]
344 mov ecx, eax
345 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
346 cmp ecx, APIC_REG_LVT_MODE_NMI
347 jne htg_nopc
348 or edi, 0x04
349 or eax, APIC_REG_LVT_MASKED
350 mov [ebx + APIC_REG_LVT_PC], eax
351 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
352htg_nopc:
353 mov eax, [ebx + APIC_REG_VERSION]
354 shr eax, 16
355 cmp al, 5
356 jb htg_notherm
357 mov eax, [ebx + APIC_REG_LVT_THMR]
358 mov ecx, eax
359 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
360 cmp ecx, APIC_REG_LVT_MODE_NMI
361 jne htg_notherm
362 or edi, 0x08
363 or eax, APIC_REG_LVT_MASKED
364 mov [ebx + APIC_REG_LVT_THMR], eax
365 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
366htg_notherm:
367 mov [edx + CPUMCPU.fApicDisVectors], edi
368 jmp htg_apic_done
369
370htg_x2apic:
371 mov esi, edx ; Save edx.
372 xor edi, edi ; fApicDisVectors
373
374 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
375 rdmsr
376 mov ebx, eax
377 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
378 cmp ebx, APIC_REG_LVT_MODE_NMI
379 jne htg_x2_nolint0
380 or edi, 0x01
381 or eax, APIC_REG_LVT_MASKED
382 wrmsr
383htg_x2_nolint0:
384 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
385 rdmsr
386 mov ebx, eax
387 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
388 cmp ebx, APIC_REG_LVT_MODE_NMI
389 jne htg_x2_nolint1
390 or edi, 0x02
391 or eax, APIC_REG_LVT_MASKED
392 wrmsr
393htg_x2_nolint1:
394 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
395 rdmsr
396 mov ebx, eax
397 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
398 cmp ebx, APIC_REG_LVT_MODE_NMI
399 jne htg_x2_nopc
400 or edi, 0x04
401 or eax, APIC_REG_LVT_MASKED
402 wrmsr
403htg_x2_nopc:
404 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
405 rdmsr
406 shr eax, 16
407 cmp al, 5
408 jb htg_x2_notherm
409 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
410 rdmsr
411 mov ebx, eax
412 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
413 cmp ebx, APIC_REG_LVT_MODE_NMI
414 jne htg_x2_notherm
415 or edi, 0x08
416 or eax, APIC_REG_LVT_MASKED
417 wrmsr
418htg_x2_notherm:
419 mov edx, esi ; Restore edx.
420 mov [edx + CPUMCPU.fApicDisVectors], edi
421
422htg_apic_done:
423%endif
424
425 ; control registers.
426 mov eax, cr0
427 mov [edx + CPUMCPU.Host.cr0], eax
428 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
429 mov eax, cr3
430 mov [edx + CPUMCPU.Host.cr3], eax
431 mov esi, cr4 ; esi = cr4, we'll modify it further down.
432 mov [edx + CPUMCPU.Host.cr4], esi
433
434 DEBUG32_S_CHAR('c')
435 DEBUG32_S_CHAR(';')
436
437 ; save the host EFER msr
438 mov ebx, edx
439 mov ecx, MSR_K6_EFER
440 rdmsr
441 mov [ebx + CPUMCPU.Host.efer], eax
442 mov [ebx + CPUMCPU.Host.efer + 4], edx
443 mov edx, ebx
444 DEBUG32_S_CHAR('e')
445 DEBUG32_S_CHAR(';')
446
447%ifdef VBOX_WITH_CRASHDUMP_MAGIC
448 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
449%endif
450
451 ; Load new gdt so we can do a far jump after going into 64 bits mode
452 ;DEBUG_CMOS_STACK32 16h
453 lgdt [edx + CPUMCPU.Hyper.gdtr]
454
455 DEBUG32_S_CHAR('g')
456 DEBUG32_S_CHAR('!')
457%ifdef VBOX_WITH_CRASHDUMP_MAGIC
458 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
459%endif
460
461 ;;
462 ;; Clean up CR4. X86_CR4_PGE, X86_CR4_PCE, X86_CR4_PCIDE (not really
463 ;; relevant for 32-bit, but whatever) and X86_CR4_VMXE must be cleared.
464 ;;
465 and esi, X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE \
466 | X86_CR4_MCE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT | X86_CR4_SMXE | X86_CR4_OSXSAVE
467 mov cr4, esi
468
469 ;;
470 ;; Load Intermediate memory context.
471 ;;
472 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
473 mov eax, 0ffffffffh
474 mov cr3, eax
475 DEBUG32_CHAR('?')
476%ifdef VBOX_WITH_64ON32_CMOS_DEBUG
477 DEBUG_CMOS_TRASH_AL 17h
478%endif
479
480 ;;
481 ;; Jump to identity mapped location
482 ;;
483 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
484 jmp near NAME(IDEnterTarget)
485
486
487 ; We're now on identity mapped pages!
488ALIGNCODE(16)
489GLOBALNAME IDEnterTarget
490 DEBUG32_CHAR('1')
491 DEBUG_CMOS_TRASH_AL 19h
492
493 ; 1. Disable paging.
494 mov ebx, cr0
495 and ebx, ~X86_CR0_PG
496 mov cr0, ebx
497 DEBUG32_CHAR('2')
498 DEBUG_CMOS_TRASH_AL 1ah
499
500%ifdef VBOX_WITH_CRASHDUMP_MAGIC
501 mov eax, cr2
502 mov dword [eax], 3
503%endif
504
505 ; 2. Enable PAE.
506 mov ecx, cr4
507 or ecx, X86_CR4_PAE
508 mov cr4, ecx
509 DEBUG_CMOS_TRASH_AL 1bh
510
511 ; 3. Load long mode intermediate CR3.
512 FIXUP FIX_INTER_AMD64_CR3, 1
513 mov ecx, 0ffffffffh
514 mov cr3, ecx
515 DEBUG32_CHAR('3')
516 DEBUG_CMOS_TRASH_AL 1ch
517
518%ifdef VBOX_WITH_CRASHDUMP_MAGIC
519 mov eax, cr2
520 mov dword [eax], 4
521%endif
522
523 ; 4. Enable long mode.
524 mov esi, edx
525 mov ecx, MSR_K6_EFER
526 rdmsr
527 FIXUP FIX_EFER_OR_MASK, 1
528 or eax, 0ffffffffh
529 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
530 wrmsr
531 mov edx, esi
532 DEBUG32_CHAR('4')
533 DEBUG_CMOS_TRASH_AL 1dh
534
535%ifdef VBOX_WITH_CRASHDUMP_MAGIC
536 mov eax, cr2
537 mov dword [eax], 5
538%endif
539
540 ; 5. Enable paging.
541 or ebx, X86_CR0_PG
542 ; Disable ring 0 write protection too
543 and ebx, ~X86_CR0_WRITE_PROTECT
544 mov cr0, ebx
545 DEBUG32_CHAR('5')
546
547 ; Jump from compatibility mode to 64-bit mode.
548 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
549 jmp 0ffffh:0fffffffeh
550
551 ;
552 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
553BITS 64
554ALIGNCODE(16)
555NAME(IDEnter64Mode):
556 DEBUG64_CHAR('6')
557 DEBUG_CMOS_TRASH_AL 1eh
558 jmp [NAME(pICEnterTarget) wrt rip]
559
560; 64-bit jump target
561NAME(pICEnterTarget):
562FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
563dq 0ffffffffffffffffh
564
565; 64-bit pCpum address.
566NAME(pCpumIC):
567FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
568dq 0ffffffffffffffffh
569
570%ifdef VBOX_WITH_CRASHDUMP_MAGIC
571NAME(pMarker):
572db 'Switch_marker'
573%endif
574
575 ;
576 ; When we arrive here we're in 64 bits mode in the intermediate context
577 ;
578ALIGNCODE(16)
579GLOBALNAME ICEnterTarget
580 ;DEBUG_CMOS_TRASH_AL 1fh
581 ; Load CPUM pointer into rdx
582 mov rdx, [NAME(pCpumIC) wrt rip]
583 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
584
585 mov rax, cs
586 mov ds, rax
587 mov es, rax
588
589 ; Invalidate fs & gs
590 mov rax, 0
591 mov fs, rax
592 mov gs, rax
593
594%ifdef VBOX_WITH_CRASHDUMP_MAGIC
595 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
596%endif
597
598 ; Setup stack.
599 DEBUG64_CHAR('7')
600 mov rsp, 0
601 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
602 mov ss, ax
603 mov esp, [rdx + CPUMCPU.Hyper.esp]
604
605%ifdef VBOX_WITH_CRASHDUMP_MAGIC
606 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
607%endif
608
609%ifdef VBOX_WITH_64ON32_IDT
610 ; Set up emergency trap handlers.
611 lidt [rdx + CPUMCPU.Hyper.idtr]
612%endif
613
614 ; load the hypervisor function address
615 mov r9, [rdx + CPUMCPU.Hyper.eip]
616 DEBUG64_S_CHAR('8')
617
618 ; Check if we need to restore the guest FPU state
619 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
620 test esi, CPUM_SYNC_FPU_STATE
621 jz near htg_fpu_no
622
623%ifdef VBOX_WITH_CRASHDUMP_MAGIC
624 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
625%endif
626
627 mov rax, cr0
628 mov rcx, rax ; save old CR0
629 and rax, ~(X86_CR0_TS | X86_CR0_EM)
630 mov cr0, rax
631 ; Use explicit REX prefix. See @bugref{6398}.
632 o64 fxrstor [rdx + CPUMCPU.Guest.fpu]
633 mov cr0, rcx ; and restore old CR0 again
634
635 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
636
637htg_fpu_no:
638 ; Check if we need to restore the guest debug state
639 test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
640 jz htg_debug_done
641
642%ifdef VBOX_WITH_CRASHDUMP_MAGIC
643 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
644%endif
645 test esi, CPUM_SYNC_DEBUG_REGS_HYPER
646 jnz htg_debug_hyper
647
648 ; Guest values in DRx, letting the guest access them directly.
649 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
650 mov dr0, rax
651 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
652 mov dr1, rax
653 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
654 mov dr2, rax
655 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
656 mov dr3, rax
657 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
658 mov dr6, rax ; not required for AMD-V
659
660 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST
661 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST
662 jmp htg_debug_done
663
664htg_debug_hyper:
665 ; Combined values in DRx, intercepting all accesses.
666 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
667 mov dr0, rax
668 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
669 mov dr1, rax
670 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
671 mov dr2, rax
672 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
673 mov dr3, rax
674 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
675 mov dr6, rax ; not required for AMD-V
676
677 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER
678 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
679
680htg_debug_done:
681
682%ifdef VBOX_WITH_CRASHDUMP_MAGIC
683 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
684%endif
685
686 ;
687 ; "Call" the specified helper function.
688 ;
689
690 ; parameter for all helper functions (pCtx)
691 DEBUG64_CHAR('9')
692 lea rsi, [rdx + CPUMCPU.Guest.fpu]
693 lea rax, [htg_return wrt rip]
694 push rax ; return address
695
696 cmp r9d, HM64ON32OP_VMXRCStartVM64
697 jz NAME(VMXRCStartVM64)
698 cmp r9d, HM64ON32OP_SVMRCVMRun64
699 jz NAME(SVMRCVMRun64)
700 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
701 jz NAME(HMRCSaveGuestFPU64)
702 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
703 jz NAME(HMRCSaveGuestDebug64)
704 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
705 jz NAME(HMRCTestSwitcher64)
706 mov eax, VERR_HM_INVALID_HM64ON32OP
707htg_return:
708 DEBUG64_CHAR('r')
709
710 ; Load CPUM pointer into rdx
711 mov rdx, [NAME(pCpumIC) wrt rip]
712 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
713
714%ifdef VBOX_WITH_CRASHDUMP_MAGIC
715 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
716%endif
717
718 ; Save the return code
719 mov dword [rdx + CPUMCPU.u32RetCode], eax
720
721 ; now let's switch back
722 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
723
724ENDPROC vmmR0ToRawModeAsm
725
726
727
728
729;
730;
731; HM code (used to be HMRCA.asm at one point).
732; HM code (used to be HMRCA.asm at one point).
733; HM code (used to be HMRCA.asm at one point).
734;
735;
736
737;; @def MYPUSHSEGS
738; Macro saving all segment registers on the stack.
739; @param 1 full width register name
740%macro MYPUSHSEGS 1
741 mov %1, es
742 push %1
743 mov %1, ds
744 push %1
745%endmacro
746
747;; @def MYPOPSEGS
748; Macro restoring all segment registers on the stack
749; @param 1 full width register name
750%macro MYPOPSEGS 1
751 pop %1
752 mov ds, %1
753 pop %1
754 mov es, %1
755%endmacro
756
757
758;/**
759; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
760; *
761; * @returns VBox status code
762; * @param HCPhysCpuPage VMXON physical address [rsp+8]
763; * @param HCPhysVmcs VMCS physical address [rsp+16]
764; * @param pCache VMCS cache [rsp+24]
765; * @param pCtx Guest context (rsi)
766; */
767BEGINPROC VMXRCStartVM64
768 push rbp
769 mov rbp, rsp
770 DEBUG_CMOS_STACK64 20h
771
772 ; Make sure VT-x instructions are allowed.
773 mov rax, cr4
774 or rax, X86_CR4_VMXE
775 mov cr4, rax
776
777 ; Enter VMX Root Mode.
778 vmxon [rbp + 8 + 8]
779 jnc .vmxon_success
780 mov rax, VERR_VMX_INVALID_VMXON_PTR
781 jmp .vmstart64_vmxon_failed
782
783.vmxon_success:
784 jnz .vmxon_success2
785 mov rax, VERR_VMX_VMXON_FAILED
786 jmp .vmstart64_vmxon_failed
787
788.vmxon_success2:
789 ; Activate the VMCS pointer
790 vmptrld [rbp + 16 + 8]
791 jnc .vmptrld_success
792 mov rax, VERR_VMX_INVALID_VMCS_PTR
793 jmp .vmstart64_vmxoff_end
794
795.vmptrld_success:
796 jnz .vmptrld_success2
797 mov rax, VERR_VMX_VMPTRLD_FAILED
798 jmp .vmstart64_vmxoff_end
799
800.vmptrld_success2:
801
802 ; Save the VMCS pointer on the stack
803 push qword [rbp + 16 + 8];
804
805 ; Save segment registers.
806 MYPUSHSEGS rax
807
808%ifdef VMX_USE_CACHED_VMCS_ACCESSES
809 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
810 mov rbx, [rbp + 24 + 8] ; pCache
811
812 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
813 mov qword [rbx + VMCSCACHE.uPos], 2
814 %endif
815
816 %ifdef DEBUG
817 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
818 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
819 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
820 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
821 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
822 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
823 %endif
824
825 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
826 cmp ecx, 0
827 je .no_cached_writes
828 mov rdx, rcx
829 mov rcx, 0
830 jmp .cached_write
831
832ALIGN(16)
833.cached_write:
834 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
835 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
836 inc rcx
837 cmp rcx, rdx
838 jl .cached_write
839
840 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
841.no_cached_writes:
842
843 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
844 mov qword [rbx + VMCSCACHE.uPos], 3
845 %endif
846 ; Save the pCache pointer.
847 push rbx
848%endif
849
850 ; Save the host state that's relevant in the temporary 64-bit mode.
851 mov rdx, cr0
852 mov eax, VMX_VMCS_HOST_CR0
853 vmwrite rax, rdx
854
855 mov rdx, cr3
856 mov eax, VMX_VMCS_HOST_CR3
857 vmwrite rax, rdx
858
859 mov rdx, cr4
860 mov eax, VMX_VMCS_HOST_CR4
861 vmwrite rax, rdx
862
863 mov rdx, cs
864 mov eax, VMX_VMCS_HOST_FIELD_CS
865 vmwrite rax, rdx
866
867 mov rdx, ss
868 mov eax, VMX_VMCS_HOST_FIELD_SS
869 vmwrite rax, rdx
870
871%if 0 ; Another experiment regarding tripple faults... Seems not to be necessary.
872 sub rsp, 16
873 str [rsp]
874 movsx rdx, word [rsp]
875 mov eax, VMX_VMCS_HOST_FIELD_TR
876 vmwrite rax, rdx
877 add rsp, 16
878%endif
879
880 sub rsp, 16
881 sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.)
882 mov eax, VMX_VMCS_HOST_GDTR_BASE
883 vmwrite rax, [rsp + 6 + 2]
884 add rsp, 16
885
886%ifdef VBOX_WITH_64ON32_IDT
887 sub rsp, 16
888 sidt [rsp + 6]
889 mov eax, VMX_VMCS_HOST_IDTR_BASE
890 vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work?
891 add rsp, 16
892 ;call NAME(vmm64On32PrintIdtr)
893%endif
894
895%ifdef VBOX_WITH_CRASHDUMP_MAGIC
896 mov qword [rbx + VMCSCACHE.uPos], 4
897%endif
898
899 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
900
901 ; First we have to save some final CPU context registers.
902 lea rdx, [.vmlaunch64_done wrt rip]
903 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
904 vmwrite rax, rdx
905 ; Note: assumes success!
906
907 ; Manual save and restore:
908 ; - General purpose registers except RIP, RSP
909 ;
910 ; Trashed:
911 ; - CR2 (we don't care)
912 ; - LDTR (reset to 0)
913 ; - DRx (presumably not changed at all)
914 ; - DR7 (reset to 0x400)
915 ; - EFLAGS (reset to RT_BIT(1); not relevant)
916
917%ifdef VBOX_WITH_CRASHDUMP_MAGIC
918 mov qword [rbx + VMCSCACHE.uPos], 5
919%endif
920
921 ; Save the pCtx pointer
922 push rsi
923
924 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
925 mov rbx, qword [rsi + CPUMCTX.cr2]
926 mov rdx, cr2
927 cmp rdx, rbx
928 je .skipcr2write64
929 mov cr2, rbx
930
931.skipcr2write64:
932 mov eax, VMX_VMCS_HOST_RSP
933 vmwrite rax, rsp
934 ; Note: assumes success!
935 ; Don't mess with ESP anymore!!!
936
937 ; Save Guest's general purpose registers.
938 mov rax, qword [rsi + CPUMCTX.eax]
939 mov rbx, qword [rsi + CPUMCTX.ebx]
940 mov rcx, qword [rsi + CPUMCTX.ecx]
941 mov rdx, qword [rsi + CPUMCTX.edx]
942 mov rbp, qword [rsi + CPUMCTX.ebp]
943 mov r8, qword [rsi + CPUMCTX.r8]
944 mov r9, qword [rsi + CPUMCTX.r9]
945 mov r10, qword [rsi + CPUMCTX.r10]
946 mov r11, qword [rsi + CPUMCTX.r11]
947 mov r12, qword [rsi + CPUMCTX.r12]
948 mov r13, qword [rsi + CPUMCTX.r13]
949 mov r14, qword [rsi + CPUMCTX.r14]
950 mov r15, qword [rsi + CPUMCTX.r15]
951
952 ; Save rdi & rsi.
953 mov rdi, qword [rsi + CPUMCTX.edi]
954 mov rsi, qword [rsi + CPUMCTX.esi]
955
956 vmlaunch
957 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
958
959ALIGNCODE(16)
960.vmlaunch64_done:
961%if 0 ;fixme later - def VBOX_WITH_64ON32_IDT
962 push rdx
963 mov rdx, [rsp + 8] ; pCtx
964 lidt [rdx + CPUMCPU.Hyper.idtr]
965 pop rdx
966%endif
967 jc near .vmstart64_invalid_vmcs_ptr
968 jz near .vmstart64_start_failed
969
970 push rdi
971 mov rdi, [rsp + 8] ; pCtx
972
973 mov qword [rdi + CPUMCTX.eax], rax
974 mov qword [rdi + CPUMCTX.ebx], rbx
975 mov qword [rdi + CPUMCTX.ecx], rcx
976 mov qword [rdi + CPUMCTX.edx], rdx
977 mov qword [rdi + CPUMCTX.esi], rsi
978 mov qword [rdi + CPUMCTX.ebp], rbp
979 mov qword [rdi + CPUMCTX.r8], r8
980 mov qword [rdi + CPUMCTX.r9], r9
981 mov qword [rdi + CPUMCTX.r10], r10
982 mov qword [rdi + CPUMCTX.r11], r11
983 mov qword [rdi + CPUMCTX.r12], r12
984 mov qword [rdi + CPUMCTX.r13], r13
985 mov qword [rdi + CPUMCTX.r14], r14
986 mov qword [rdi + CPUMCTX.r15], r15
987 mov rax, cr2
988 mov qword [rdi + CPUMCTX.cr2], rax
989
990 pop rax ; The guest edi we pushed above
991 mov qword [rdi + CPUMCTX.edi], rax
992
993 pop rsi ; pCtx (needed in rsi by the macros below)
994
995%ifdef VMX_USE_CACHED_VMCS_ACCESSES
996 pop rdi ; Saved pCache
997
998 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
999 mov dword [rdi + VMCSCACHE.uPos], 7
1000 %endif
1001 %ifdef DEBUG
1002 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1003 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1004 mov rax, cr8
1005 mov [rdi + VMCSCACHE.TestOut.cr8], rax
1006 %endif
1007
1008 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
1009 cmp ecx, 0 ; Can't happen
1010 je .no_cached_reads
1011 jmp .cached_read
1012
1013ALIGN(16)
1014.cached_read:
1015 dec rcx
1016 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
1017 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
1018 cmp rcx, 0
1019 jnz .cached_read
1020.no_cached_reads:
1021 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1022 mov dword [rdi + VMCSCACHE.uPos], 8
1023 %endif
1024%endif
1025
1026 ; Restore segment registers.
1027 MYPOPSEGS rax
1028
1029 mov eax, VINF_SUCCESS
1030
1031%ifdef VBOX_WITH_CRASHDUMP_MAGIC
1032 mov dword [rdi + VMCSCACHE.uPos], 9
1033%endif
1034.vmstart64_end:
1035
1036%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1037 %ifdef DEBUG
1038 mov rdx, [rsp] ; HCPhysVmcs
1039 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
1040 %endif
1041%endif
1042
1043 ; Write back the data and disable the VMCS.
1044 vmclear qword [rsp] ; Pushed pVMCS
1045 add rsp, 8
1046
1047.vmstart64_vmxoff_end:
1048 ; Disable VMX root mode.
1049 vmxoff
1050.vmstart64_vmxon_failed:
1051%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1052 %ifdef DEBUG
1053 cmp eax, VINF_SUCCESS
1054 jne .skip_flags_save
1055
1056 pushf
1057 pop rdx
1058 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
1059 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1060 mov dword [rdi + VMCSCACHE.uPos], 12
1061 %endif
1062.skip_flags_save:
1063 %endif
1064%endif
1065 pop rbp
1066 ret
1067
1068
1069.vmstart64_invalid_vmcs_ptr:
1070 pop rsi ; pCtx (needed in rsi by the macros below)
1071
1072%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1073 pop rdi ; pCache
1074 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1075 mov dword [rdi + VMCSCACHE.uPos], 10
1076 %endif
1077
1078 %ifdef DEBUG
1079 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1080 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1081 %endif
1082%endif
1083
1084 ; Restore segment registers.
1085 MYPOPSEGS rax
1086
1087 ; Restore all general purpose host registers.
1088 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
1089 jmp .vmstart64_end
1090
1091.vmstart64_start_failed:
1092 pop rsi ; pCtx (needed in rsi by the macros below)
1093
1094%ifdef VMX_USE_CACHED_VMCS_ACCESSES
1095 pop rdi ; pCache
1096
1097 %ifdef DEBUG
1098 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
1099 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
1100 %endif
1101 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
1102 mov dword [rdi + VMCSCACHE.uPos], 11
1103 %endif
1104%endif
1105
1106 ; Restore segment registers.
1107 MYPOPSEGS rax
1108
1109 ; Restore all general purpose host registers.
1110 mov eax, VERR_VMX_UNABLE_TO_START_VM
1111 jmp .vmstart64_end
1112ENDPROC VMXRCStartVM64
1113
1114
1115;/**
1116; * Prepares for and executes VMRUN (64 bits guests)
1117; *
1118; * @returns VBox status code
1119; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
1120; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
1121; * @param pCtx Guest context (rsi)
1122; */
1123BEGINPROC SVMRCVMRun64
1124 push rbp
1125 mov rbp, rsp
1126 pushf
1127 DEBUG_CMOS_STACK64 30h
1128
1129 ; Manual save and restore:
1130 ; - General purpose registers except RIP, RSP, RAX
1131 ;
1132 ; Trashed:
1133 ; - CR2 (we don't care)
1134 ; - LDTR (reset to 0)
1135 ; - DRx (presumably not changed at all)
1136 ; - DR7 (reset to 0x400)
1137
1138 ; Save the Guest CPU context pointer.
1139 push rsi ; Push for saving the state at the end
1140
1141 ; Save host fs, gs, sysenter msr etc
1142 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
1143 push rax ; Save for the vmload after vmrun
1144 vmsave
1145
1146 ; Setup eax for VMLOAD
1147 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
1148
1149 ; Restore Guest's general purpose registers.
1150 ; rax is loaded from the VMCB by VMRUN.
1151 mov rbx, qword [rsi + CPUMCTX.ebx]
1152 mov rcx, qword [rsi + CPUMCTX.ecx]
1153 mov rdx, qword [rsi + CPUMCTX.edx]
1154 mov rdi, qword [rsi + CPUMCTX.edi]
1155 mov rbp, qword [rsi + CPUMCTX.ebp]
1156 mov r8, qword [rsi + CPUMCTX.r8]
1157 mov r9, qword [rsi + CPUMCTX.r9]
1158 mov r10, qword [rsi + CPUMCTX.r10]
1159 mov r11, qword [rsi + CPUMCTX.r11]
1160 mov r12, qword [rsi + CPUMCTX.r12]
1161 mov r13, qword [rsi + CPUMCTX.r13]
1162 mov r14, qword [rsi + CPUMCTX.r14]
1163 mov r15, qword [rsi + CPUMCTX.r15]
1164 mov rsi, qword [rsi + CPUMCTX.esi]
1165
1166 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
1167 clgi
1168 sti
1169
1170 ; Load guest fs, gs, sysenter msr etc
1171 vmload
1172 ; Run the VM
1173 vmrun
1174
1175 ; rax is in the VMCB already; we can use it here.
1176
1177 ; Save guest fs, gs, sysenter msr etc.
1178 vmsave
1179
1180 ; Load host fs, gs, sysenter msr etc.
1181 pop rax ; Pushed above
1182 vmload
1183
1184 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1185 cli
1186 stgi
1187
1188 pop rax ; pCtx
1189
1190 mov qword [rax + CPUMCTX.ebx], rbx
1191 mov qword [rax + CPUMCTX.ecx], rcx
1192 mov qword [rax + CPUMCTX.edx], rdx
1193 mov qword [rax + CPUMCTX.esi], rsi
1194 mov qword [rax + CPUMCTX.edi], rdi
1195 mov qword [rax + CPUMCTX.ebp], rbp
1196 mov qword [rax + CPUMCTX.r8], r8
1197 mov qword [rax + CPUMCTX.r9], r9
1198 mov qword [rax + CPUMCTX.r10], r10
1199 mov qword [rax + CPUMCTX.r11], r11
1200 mov qword [rax + CPUMCTX.r12], r12
1201 mov qword [rax + CPUMCTX.r13], r13
1202 mov qword [rax + CPUMCTX.r14], r14
1203 mov qword [rax + CPUMCTX.r15], r15
1204
1205 mov eax, VINF_SUCCESS
1206
1207 popf
1208 pop rbp
1209 ret
1210ENDPROC SVMRCVMRun64
1211
1212;/**
1213; * Saves the guest FPU context
1214; *
1215; * @returns VBox status code
1216; * @param pCtx Guest context [rsi]
1217; */
1218BEGINPROC HMRCSaveGuestFPU64
1219 DEBUG_CMOS_STACK64 40h
1220 mov rax, cr0
1221 mov rcx, rax ; save old CR0
1222 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1223 mov cr0, rax
1224
1225 ; Use explicit REX prefix. See @bugref{6398}.
1226 o64 fxsave [rsi + CPUMCTX.fpu]
1227
1228 mov cr0, rcx ; and restore old CR0 again
1229
1230 mov eax, VINF_SUCCESS
1231 ret
1232ENDPROC HMRCSaveGuestFPU64
1233
1234;/**
1235; * Saves the guest debug context (DR0-3, DR6)
1236; *
1237; * @returns VBox status code
1238; * @param pCtx Guest context [rsi]
1239; */
1240BEGINPROC HMRCSaveGuestDebug64
1241 DEBUG_CMOS_STACK64 41h
1242 mov rax, dr0
1243 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1244 mov rax, dr1
1245 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1246 mov rax, dr2
1247 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1248 mov rax, dr3
1249 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1250 mov rax, dr6
1251 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1252 mov eax, VINF_SUCCESS
1253 ret
1254ENDPROC HMRCSaveGuestDebug64
1255
1256;/**
1257; * Dummy callback handler
1258; *
1259; * @returns VBox status code
1260; * @param param1 Parameter 1 [rsp+8]
1261; * @param param2 Parameter 2 [rsp+12]
1262; * @param param3 Parameter 3 [rsp+16]
1263; * @param param4 Parameter 4 [rsp+20]
1264; * @param param5 Parameter 5 [rsp+24]
1265; * @param pCtx Guest context [rsi]
1266; */
1267BEGINPROC HMRCTestSwitcher64
1268 DEBUG_CMOS_STACK64 42h
1269 mov eax, [rsp+8]
1270 ret
1271ENDPROC HMRCTestSwitcher64
1272
1273
1274%ifdef VBOX_WITH_64ON32_IDT
1275;
1276; Trap handling.
1277;
1278
1279;; Here follows an array of trap handler entry points, 8 byte in size.
1280BEGINPROC vmm64On32TrapHandlers
1281%macro vmm64On32TrapEntry 1
1282GLOBALNAME vmm64On32Trap %+ i
1283 db 06ah, i ; push imm8 - note that this is a signextended value.
1284 jmp NAME(%1)
1285 ALIGNCODE(8)
1286%assign i i+1
1287%endmacro
1288%assign i 0 ; start counter.
1289 vmm64On32TrapEntry vmm64On32Trap ; 0
1290 vmm64On32TrapEntry vmm64On32Trap ; 1
1291 vmm64On32TrapEntry vmm64On32Trap ; 2
1292 vmm64On32TrapEntry vmm64On32Trap ; 3
1293 vmm64On32TrapEntry vmm64On32Trap ; 4
1294 vmm64On32TrapEntry vmm64On32Trap ; 5
1295 vmm64On32TrapEntry vmm64On32Trap ; 6
1296 vmm64On32TrapEntry vmm64On32Trap ; 7
1297 vmm64On32TrapEntry vmm64On32TrapErrCode ; 8
1298 vmm64On32TrapEntry vmm64On32Trap ; 9
1299 vmm64On32TrapEntry vmm64On32TrapErrCode ; a
1300 vmm64On32TrapEntry vmm64On32TrapErrCode ; b
1301 vmm64On32TrapEntry vmm64On32TrapErrCode ; c
1302 vmm64On32TrapEntry vmm64On32TrapErrCode ; d
1303 vmm64On32TrapEntry vmm64On32TrapErrCode ; e
1304 vmm64On32TrapEntry vmm64On32Trap ; f (reserved)
1305 vmm64On32TrapEntry vmm64On32Trap ; 10
1306 vmm64On32TrapEntry vmm64On32TrapErrCode ; 11
1307 vmm64On32TrapEntry vmm64On32Trap ; 12
1308 vmm64On32TrapEntry vmm64On32Trap ; 13
1309%rep (0x100 - 0x14)
1310 vmm64On32TrapEntry vmm64On32Trap
1311%endrep
1312ENDPROC vmm64On32TrapHandlers
1313
1314;; Fake an error code and jump to the real thing.
1315BEGINPROC vmm64On32Trap
1316 push qword [rsp]
1317 jmp NAME(vmm64On32TrapErrCode)
1318ENDPROC vmm64On32Trap
1319
1320
1321;;
1322; Trap frame:
1323; [rbp + 38h] = ss
1324; [rbp + 30h] = rsp
1325; [rbp + 28h] = eflags
1326; [rbp + 20h] = cs
1327; [rbp + 18h] = rip
1328; [rbp + 10h] = error code (or trap number)
1329; [rbp + 08h] = trap number
1330; [rbp + 00h] = rbp
1331; [rbp - 08h] = rax
1332; [rbp - 10h] = rbx
1333; [rbp - 18h] = ds
1334;
1335BEGINPROC vmm64On32TrapErrCode
1336 push rbp
1337 mov rbp, rsp
1338 push rax
1339 push rbx
1340 mov ax, ds
1341 push rax
1342 sub rsp, 20h
1343
1344 mov ax, cs
1345 mov ds, ax
1346
1347%if 1
1348 COM64_S_NEWLINE
1349 COM64_S_CHAR '!'
1350 COM64_S_CHAR 't'
1351 COM64_S_CHAR 'r'
1352 COM64_S_CHAR 'a'
1353 COM64_S_CHAR 'p'
1354 movzx eax, byte [rbp + 08h]
1355 COM64_S_DWORD_REG eax
1356 COM64_S_CHAR '!'
1357%endif
1358
1359%if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM
1360 sidt [rsp]
1361 movsx eax, word [rsp]
1362 shr eax, 12 ; div by 16 * 256 (0x1000).
1363%else
1364 ; hardcoded VCPU(0) for now...
1365 mov rbx, [NAME(pCpumIC) wrt rip]
1366 mov eax, [rbx + CPUM.offCPUMCPU0]
1367%endif
1368 push rax ; Save the offset for rbp later.
1369
1370 add rbx, rax ; rbx = CPUMCPU
1371
1372 ;
1373 ; Deal with recursive traps due to vmxoff (lazy bird).
1374 ;
1375 lea rax, [.vmxoff_trap_location wrt rip]
1376 cmp rax, [rbp + 18h]
1377 je .not_vmx_root
1378
1379 ;
1380 ; Save the context.
1381 ;
1382 mov rax, [rbp - 8]
1383 mov [rbx + CPUMCPU.Hyper.eax], rax
1384 mov [rbx + CPUMCPU.Hyper.ecx], rcx
1385 mov [rbx + CPUMCPU.Hyper.edx], rdx
1386 mov rax, [rbp - 10h]
1387 mov [rbx + CPUMCPU.Hyper.ebx], rax
1388 mov rax, [rbp]
1389 mov [rbx + CPUMCPU.Hyper.ebp], rax
1390 mov rax, [rbp + 30h]
1391 mov [rbx + CPUMCPU.Hyper.esp], rax
1392 mov [rbx + CPUMCPU.Hyper.edi], rdi
1393 mov [rbx + CPUMCPU.Hyper.esi], rsi
1394 mov [rbx + CPUMCPU.Hyper.r8], r8
1395 mov [rbx + CPUMCPU.Hyper.r9], r9
1396 mov [rbx + CPUMCPU.Hyper.r10], r10
1397 mov [rbx + CPUMCPU.Hyper.r11], r11
1398 mov [rbx + CPUMCPU.Hyper.r12], r12
1399 mov [rbx + CPUMCPU.Hyper.r13], r13
1400 mov [rbx + CPUMCPU.Hyper.r14], r14
1401 mov [rbx + CPUMCPU.Hyper.r15], r15
1402
1403 mov rax, [rbp + 18h]
1404 mov [rbx + CPUMCPU.Hyper.eip], rax
1405 movzx ax, [rbp + 20h]
1406 mov [rbx + CPUMCPU.Hyper.cs.Sel], ax
1407 mov ax, [rbp + 38h]
1408 mov [rbx + CPUMCPU.Hyper.ss.Sel], ax
1409 mov ax, [rbp - 18h]
1410 mov [rbx + CPUMCPU.Hyper.ds.Sel], ax
1411
1412 mov rax, [rbp + 28h]
1413 mov [rbx + CPUMCPU.Hyper.eflags], rax
1414
1415 mov rax, cr2
1416 mov [rbx + CPUMCPU.Hyper.cr2], rax
1417
1418 mov rax, [rbp + 10h]
1419 mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code
1420 movzx eax, byte [rbp + 08h]
1421 mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number
1422
1423 ;
1424 ; Finally, leave VMX root operation before trying to return to the host.
1425 ;
1426 mov rax, cr4
1427 test rax, X86_CR4_VMXE
1428 jz .not_vmx_root
1429.vmxoff_trap_location:
1430 vmxoff
1431.not_vmx_root:
1432
1433 ;
1434 ; Go back to the host.
1435 ;
1436 pop rbp
1437 mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC
1438 jmp NAME(vmmRCToHostAsm)
1439ENDPROC vmm64On32TrapErrCode
1440
1441;; We allocate the IDT here to avoid having to allocate memory separately somewhere.
1442ALIGNCODE(16)
1443GLOBALNAME vmm64On32Idt
1444%assign i 0
1445%rep 256
1446 dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets.
1447 dq 0
1448%assign i (i + 1)
1449%endrep
1450
1451
1452 %if 0
1453;; For debugging purposes.
1454BEGINPROC vmm64On32PrintIdtr
1455 push rax
1456 push rsi ; paranoia
1457 push rdi ; ditto
1458 sub rsp, 16
1459
1460 COM64_S_CHAR ';'
1461 COM64_S_CHAR 'i'
1462 COM64_S_CHAR 'd'
1463 COM64_S_CHAR 't'
1464 COM64_S_CHAR 'r'
1465 COM64_S_CHAR '='
1466 sidt [rsp + 6]
1467 mov eax, [rsp + 8 + 4]
1468 COM64_S_DWORD_REG eax
1469 mov eax, [rsp + 8]
1470 COM64_S_DWORD_REG eax
1471 COM64_S_CHAR ':'
1472 movzx eax, word [rsp + 6]
1473 COM64_S_DWORD_REG eax
1474 COM64_S_CHAR '!'
1475
1476 add rsp, 16
1477 pop rdi
1478 pop rsi
1479 pop rax
1480 ret
1481ENDPROC vmm64On32PrintIdtr
1482 %endif
1483
1484 %if 1
1485;; For debugging purposes.
1486BEGINPROC vmm64On32DumpCmos
1487 push rax
1488 push rdx
1489 push rcx
1490 push rsi ; paranoia
1491 push rdi ; ditto
1492 sub rsp, 16
1493
1494%if 0
1495 mov al, 3
1496 out 72h, al
1497 mov al, 68h
1498 out 73h, al
1499%endif
1500
1501 COM64_S_NEWLINE
1502 COM64_S_CHAR 'c'
1503 COM64_S_CHAR 'm'
1504 COM64_S_CHAR 'o'
1505 COM64_S_CHAR 's'
1506 COM64_S_CHAR '0'
1507 COM64_S_CHAR ':'
1508
1509 xor ecx, ecx
1510.loop1:
1511 mov al, cl
1512 out 70h, al
1513 in al, 71h
1514 COM64_S_BYTE_REG eax
1515 COM64_S_CHAR ' '
1516 inc ecx
1517 cmp ecx, 128
1518 jb .loop1
1519
1520 COM64_S_NEWLINE
1521 COM64_S_CHAR 'c'
1522 COM64_S_CHAR 'm'
1523 COM64_S_CHAR 'o'
1524 COM64_S_CHAR 's'
1525 COM64_S_CHAR '1'
1526 COM64_S_CHAR ':'
1527 xor ecx, ecx
1528.loop2:
1529 mov al, cl
1530 out 72h, al
1531 in al, 73h
1532 COM64_S_BYTE_REG eax
1533 COM64_S_CHAR ' '
1534 inc ecx
1535 cmp ecx, 128
1536 jb .loop2
1537
1538%if 0
1539 COM64_S_NEWLINE
1540 COM64_S_CHAR 'c'
1541 COM64_S_CHAR 'm'
1542 COM64_S_CHAR 'o'
1543 COM64_S_CHAR 's'
1544 COM64_S_CHAR '2'
1545 COM64_S_CHAR ':'
1546 xor ecx, ecx
1547.loop3:
1548 mov al, cl
1549 out 74h, al
1550 in al, 75h
1551 COM64_S_BYTE_REG eax
1552 COM64_S_CHAR ' '
1553 inc ecx
1554 cmp ecx, 128
1555 jb .loop3
1556
1557 COM64_S_NEWLINE
1558 COM64_S_CHAR 'c'
1559 COM64_S_CHAR 'm'
1560 COM64_S_CHAR 'o'
1561 COM64_S_CHAR 's'
1562 COM64_S_CHAR '3'
1563 COM64_S_CHAR ':'
1564 xor ecx, ecx
1565.loop4:
1566 mov al, cl
1567 out 72h, al
1568 in al, 73h
1569 COM64_S_BYTE_REG eax
1570 COM64_S_CHAR ' '
1571 inc ecx
1572 cmp ecx, 128
1573 jb .loop4
1574
1575 COM64_S_NEWLINE
1576%endif
1577
1578 add rsp, 16
1579 pop rdi
1580 pop rsi
1581 pop rcx
1582 pop rdx
1583 pop rax
1584 ret
1585ENDPROC vmm64On32DumpCmos
1586 %endif
1587
1588%endif ; VBOX_WITH_64ON32_IDT
1589
1590
1591
1592;
1593;
1594; Back to switcher code.
1595; Back to switcher code.
1596; Back to switcher code.
1597;
1598;
1599
1600
1601
1602;;
1603; Trampoline for doing a call when starting the hyper visor execution.
1604;
1605; Push any arguments to the routine.
1606; Push the argument frame size (cArg * 4).
1607; Push the call target (_cdecl convention).
1608; Push the address of this routine.
1609;
1610;
1611BITS 64
1612ALIGNCODE(16)
1613BEGINPROC vmmRCCallTrampoline
1614%ifdef DEBUG_STUFF
1615 COM64_S_CHAR 'c'
1616 COM64_S_CHAR 't'
1617 COM64_S_CHAR '!'
1618%endif
1619 int3
1620ENDPROC vmmRCCallTrampoline
1621
1622
1623;;
1624; The C interface.
1625;
1626BITS 64
1627ALIGNCODE(16)
1628BEGINPROC vmmRCToHost
1629%ifdef DEBUG_STUFF
1630 push rsi
1631 COM_NEWLINE
1632 COM_CHAR 'b'
1633 COM_CHAR 'a'
1634 COM_CHAR 'c'
1635 COM_CHAR 'k'
1636 COM_CHAR '!'
1637 COM_NEWLINE
1638 pop rsi
1639%endif
1640 int3
1641ENDPROC vmmRCToHost
1642
1643;;
1644; vmmRCToHostAsm
1645;
1646; This is an alternative entry point which we'll be using
1647; when the we have saved the guest state already or we haven't
1648; been messing with the guest at all.
1649;
1650; @param rbp The virtual cpu number.
1651; @param
1652;
1653BITS 64
1654ALIGNCODE(16)
1655BEGINPROC vmmRCToHostAsm
1656NAME(vmmRCToHostAsmNoReturn):
1657 ;; We're still in the intermediate memory context!
1658
1659 ;;
1660 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1661 ;;
1662 jmp far [NAME(fpIDEnterTarget) wrt rip]
1663
1664; 16:32 Pointer to IDEnterTarget.
1665NAME(fpIDEnterTarget):
1666 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1667dd 0
1668 FIXUP FIX_HYPER_CS, 0
1669dd 0
1670
1671 ; We're now on identity mapped pages!
1672ALIGNCODE(16)
1673GLOBALNAME IDExitTarget
1674BITS 32
1675 DEBUG32_CHAR('1')
1676
1677 ; 1. Deactivate long mode by turning off paging.
1678 mov ebx, cr0
1679 and ebx, ~X86_CR0_PG
1680 mov cr0, ebx
1681 DEBUG32_CHAR('2')
1682
1683 ; 2. Load intermediate page table.
1684 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1685 mov edx, 0ffffffffh
1686 mov cr3, edx
1687 DEBUG32_CHAR('3')
1688
1689 ; 3. Disable long mode.
1690 mov ecx, MSR_K6_EFER
1691 rdmsr
1692 DEBUG32_CHAR('5')
1693 and eax, ~(MSR_K6_EFER_LME)
1694 wrmsr
1695 DEBUG32_CHAR('6')
1696
1697%ifndef NEED_PAE_ON_HOST
1698 ; 3b. Disable PAE.
1699 mov eax, cr4
1700 and eax, ~X86_CR4_PAE
1701 mov cr4, eax
1702 DEBUG32_CHAR('7')
1703%endif
1704
1705 ; 4. Enable paging.
1706 or ebx, X86_CR0_PG
1707 mov cr0, ebx
1708 jmp short just_a_jump
1709just_a_jump:
1710 DEBUG32_CHAR('8')
1711
1712 ;;
1713 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1714 ;;
1715 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1716 jmp near NAME(ICExitTarget)
1717
1718 ;;
1719 ;; When we arrive at this label we're at the host mapping of the
1720 ;; switcher code, but with intermediate page tables.
1721 ;;
1722BITS 32
1723ALIGNCODE(16)
1724GLOBALNAME ICExitTarget
1725 DEBUG32_CHAR('9')
1726 ;DEBUG_CMOS_TRASH_AL 70h
1727
1728 ; load the hypervisor data selector into ds & es
1729 FIXUP FIX_HYPER_DS, 1
1730 mov eax, 0ffffh
1731 mov ds, eax
1732 mov es, eax
1733 DEBUG32_CHAR('a')
1734
1735 FIXUP FIX_GC_CPUM_OFF, 1, 0
1736 mov edx, 0ffffffffh
1737 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1738
1739 DEBUG32_CHAR('b')
1740 mov esi, [edx + CPUMCPU.Host.cr3]
1741 mov cr3, esi
1742 DEBUG32_CHAR('c')
1743
1744 ;; now we're in host memory context, let's restore regs
1745 FIXUP FIX_HC_CPUM_OFF, 1, 0
1746 mov edx, 0ffffffffh
1747 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1748 DEBUG32_CHAR('e')
1749
1750 ; restore the host EFER
1751 mov ebx, edx
1752 mov ecx, MSR_K6_EFER
1753 mov eax, [ebx + CPUMCPU.Host.efer]
1754 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1755 DEBUG32_CHAR('f')
1756 wrmsr
1757 mov edx, ebx
1758 DEBUG32_CHAR('g')
1759
1760 ; activate host gdt and idt
1761 lgdt [edx + CPUMCPU.Host.gdtr]
1762 DEBUG32_CHAR('0')
1763 lidt [edx + CPUMCPU.Host.idtr]
1764 DEBUG32_CHAR('1')
1765
1766 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1767 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1768 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1769 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1770 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1771 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1772 ltr word [edx + CPUMCPU.Host.tr]
1773
1774 ; activate ldt
1775 DEBUG32_CHAR('2')
1776 lldt [edx + CPUMCPU.Host.ldtr]
1777
1778 ; Restore segment registers
1779 mov eax, [edx + CPUMCPU.Host.ds]
1780 mov ds, eax
1781 mov eax, [edx + CPUMCPU.Host.es]
1782 mov es, eax
1783 mov eax, [edx + CPUMCPU.Host.fs]
1784 mov fs, eax
1785 mov eax, [edx + CPUMCPU.Host.gs]
1786 mov gs, eax
1787 ; restore stack
1788 lss esp, [edx + CPUMCPU.Host.esp]
1789
1790 ; Control registers.
1791 mov ecx, [edx + CPUMCPU.Host.cr4]
1792 mov cr4, ecx
1793 mov ecx, [edx + CPUMCPU.Host.cr0]
1794 mov cr0, ecx
1795 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1796 ;mov cr2, ecx
1797
1798 ; restore general registers.
1799 mov edi, [edx + CPUMCPU.Host.edi]
1800 mov esi, [edx + CPUMCPU.Host.esi]
1801 mov ebx, [edx + CPUMCPU.Host.ebx]
1802 mov ebp, [edx + CPUMCPU.Host.ebp]
1803
1804 ; store the return code in eax
1805 DEBUG_CMOS_TRASH_AL 79h
1806 mov eax, [edx + CPUMCPU.u32RetCode]
1807 retf
1808ENDPROC vmmRCToHostAsm
1809
1810
1811GLOBALNAME End
1812;
1813; The description string (in the text section).
1814;
1815NAME(Description):
1816 db SWITCHER_DESCRIPTION
1817 db 0
1818
1819extern NAME(Relocate)
1820
1821;
1822; End the fixup records.
1823;
1824BEGINDATA
1825 db FIX_THE_END ; final entry.
1826GLOBALNAME FixupsEnd
1827
1828;;
1829; The switcher definition structure.
1830ALIGNDATA(16)
1831GLOBALNAME Def
1832 istruc VMMSWITCHERDEF
1833 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1834 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1835 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1836 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1837 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1838 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1839 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1840 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1841 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1842 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1843 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1844 ; disasm help
1845 at VMMSWITCHERDEF.offHCCode0, dd 0
1846 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1847 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1848 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1849 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1850 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1851 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1852 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
1853%ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT.
1854 at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start)
1855%else
1856 at VMMSWITCHERDEF.offGCCode, dd 0
1857%endif
1858 at VMMSWITCHERDEF.cbGCCode, dd 0
1859
1860 iend
1861
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette