VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 30948

Last change on this file since 30948 was 30180, checked in by vboxsync, 14 years ago

Make 32->64 switcher SMP aware.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.7 KB
Line 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2007 Oracle Corporation
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15
16;%define DEBUG_STUFF 1
17;%define STRICT_IF 1
18
19;*******************************************************************************
20;* Defined Constants And Macros *
21;*******************************************************************************
22
23
24;*******************************************************************************
25;* Header Files *
26;*******************************************************************************
27%include "VBox/asmdefs.mac"
28%include "VBox/x86.mac"
29%include "VBox/cpum.mac"
30%include "VBox/stam.mac"
31%include "VBox/vm.mac"
32%include "CPUMInternal.mac"
33%include "VMMSwitcher/VMMSwitcher.mac"
34
35
36;
37; Start the fixup records
38; We collect the fixups in the .data section as we go along
39; It is therefore VITAL that no-one is using the .data section
40; for anything else between 'Start' and 'End'.
41;
42BEGINDATA
43GLOBALNAME Fixups
44
45
46
47BEGINCODE
48GLOBALNAME Start
49
50BITS 32
51
52;;
53; The C interface.
54; @param [esp + 04h] Param 1 - VM handle
55; @param [esp + 08h] Param 2 - VMCPU offset
56;
57BEGINPROC vmmR0HostToGuest
58 %ifdef DEBUG_STUFF
59 COM32_S_NEWLINE
60 COM32_S_CHAR '^'
61 %endif
62
63 %ifdef VBOX_WITH_STATISTICS
64 ;
65 ; Switcher stats.
66 ;
67 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
68 mov edx, 0ffffffffh
69 STAM_PROFILE_ADV_START edx
70 %endif
71
72 push ebp
73 mov ebp, [esp + 12] ; VMCPU offset
74
75 ; turn off interrupts
76 pushf
77 cli
78
79 ;
80 ; Call worker.
81 ;
82 FIXUP FIX_HC_CPUM_OFF, 1, 0
83 mov edx, 0ffffffffh
84 push cs ; allow for far return and restore cs correctly.
85 call NAME(vmmR0HostToGuestAsm)
86
87 ; restore original flags
88 popf
89 pop ebp
90
91%ifdef VBOX_WITH_STATISTICS
92 ;
93 ; Switcher stats.
94 ;
95 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
96 mov edx, 0ffffffffh
97 STAM_PROFILE_ADV_STOP edx
98%endif
99
100 ret
101
102ENDPROC vmmR0HostToGuest
103
104; *****************************************************************************
105; vmmR0HostToGuestAsm
106;
107; Phase one of the switch from host to guest context (host MMU context)
108;
109; INPUT:
110; - edx virtual address of CPUM structure (valid in host context)
111; - ebp offset of the CPUMCPU structure
112;
113; USES/DESTROYS:
114; - eax, ecx, edx, esi
115;
116; ASSUMPTION:
117; - current CS and DS selectors are wide open
118;
119; *****************************************************************************
120ALIGNCODE(16)
121BEGINPROC vmmR0HostToGuestAsm
122 ;;
123 ;; Save CPU host context
124 ;; Skip eax, edx and ecx as these are not preserved over calls.
125 ;;
126 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
127%ifdef VBOX_WITH_CRASHDUMP_MAGIC
128 ; phys address of scratch page
129 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
130 mov cr2, eax
131
132 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
133%endif
134
135 ; general registers.
136 mov [edx + CPUMCPU.Host.ebx], ebx
137 mov [edx + CPUMCPU.Host.edi], edi
138 mov [edx + CPUMCPU.Host.esi], esi
139 mov [edx + CPUMCPU.Host.esp], esp
140 mov [edx + CPUMCPU.Host.ebp], ebp
141 ; selectors.
142 mov [edx + CPUMCPU.Host.ds], ds
143 mov [edx + CPUMCPU.Host.es], es
144 mov [edx + CPUMCPU.Host.fs], fs
145 mov [edx + CPUMCPU.Host.gs], gs
146 mov [edx + CPUMCPU.Host.ss], ss
147 ; special registers.
148 sldt [edx + CPUMCPU.Host.ldtr]
149 sidt [edx + CPUMCPU.Host.idtr]
150 sgdt [edx + CPUMCPU.Host.gdtr]
151 str [edx + CPUMCPU.Host.tr]
152
153%ifdef VBOX_WITH_CRASHDUMP_MAGIC
154 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
155%endif
156
157 ; control registers.
158 mov eax, cr0
159 mov [edx + CPUMCPU.Host.cr0], eax
160 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
161 mov eax, cr3
162 mov [edx + CPUMCPU.Host.cr3], eax
163 mov eax, cr4
164 mov [edx + CPUMCPU.Host.cr4], eax
165
166 ; save the host EFER msr
167 mov ebx, edx
168 mov ecx, MSR_K6_EFER
169 rdmsr
170 mov [ebx + CPUMCPU.Host.efer], eax
171 mov [ebx + CPUMCPU.Host.efer + 4], edx
172 mov edx, ebx
173
174%ifdef VBOX_WITH_CRASHDUMP_MAGIC
175 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
176%endif
177
178 ; Load new gdt so we can do a far jump after going into 64 bits mode
179 lgdt [edx + CPUMCPU.Hyper.gdtr]
180
181%ifdef VBOX_WITH_CRASHDUMP_MAGIC
182 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
183%endif
184
185 ;;
186 ;; Load Intermediate memory context.
187 ;;
188 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
189 mov eax, 0ffffffffh
190 mov cr3, eax
191 DEBUG_CHAR('?')
192
193 ;;
194 ;; Jump to identity mapped location
195 ;;
196 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
197 jmp near NAME(IDEnterTarget)
198
199
200 ; We're now on identity mapped pages!
201ALIGNCODE(16)
202GLOBALNAME IDEnterTarget
203 DEBUG_CHAR('2')
204
205 ; 1. Disable paging.
206 mov ebx, cr0
207 and ebx, ~X86_CR0_PG
208 mov cr0, ebx
209 DEBUG_CHAR('2')
210
211%ifdef VBOX_WITH_CRASHDUMP_MAGIC
212 mov eax, cr2
213 mov dword [eax], 3
214%endif
215
216 ; 2. Enable PAE.
217 mov ecx, cr4
218 or ecx, X86_CR4_PAE
219 mov cr4, ecx
220
221 ; 3. Load long mode intermediate CR3.
222 FIXUP FIX_INTER_AMD64_CR3, 1
223 mov ecx, 0ffffffffh
224 mov cr3, ecx
225 DEBUG_CHAR('3')
226
227%ifdef VBOX_WITH_CRASHDUMP_MAGIC
228 mov eax, cr2
229 mov dword [eax], 4
230%endif
231
232 ; 4. Enable long mode.
233 mov esi, edx
234 mov ecx, MSR_K6_EFER
235 rdmsr
236 FIXUP FIX_EFER_OR_MASK, 1
237 or eax, 0ffffffffh
238 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
239 wrmsr
240 mov edx, esi
241 DEBUG_CHAR('4')
242
243%ifdef VBOX_WITH_CRASHDUMP_MAGIC
244 mov eax, cr2
245 mov dword [eax], 5
246%endif
247
248 ; 5. Enable paging.
249 or ebx, X86_CR0_PG
250 ; Disable ring 0 write protection too
251 and ebx, ~X86_CR0_WRITE_PROTECT
252 mov cr0, ebx
253 DEBUG_CHAR('5')
254
255 ; Jump from compatibility mode to 64-bit mode.
256 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
257 jmp 0ffffh:0fffffffeh
258
259 ;
260 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
261BITS 64
262ALIGNCODE(16)
263NAME(IDEnter64Mode):
264 DEBUG_CHAR('6')
265 jmp [NAME(pICEnterTarget) wrt rip]
266
267; 64-bit jump target
268NAME(pICEnterTarget):
269FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
270dq 0ffffffffffffffffh
271
272; 64-bit pCpum address.
273NAME(pCpumIC):
274FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
275dq 0ffffffffffffffffh
276
277%ifdef VBOX_WITH_CRASHDUMP_MAGIC
278NAME(pMarker):
279db 'Switch_marker'
280%endif
281
282 ;
283 ; When we arrive here we're in 64 bits mode in the intermediate context
284 ;
285ALIGNCODE(16)
286GLOBALNAME ICEnterTarget
287 ; Load CPUM pointer into rdx
288 mov rdx, [NAME(pCpumIC) wrt rip]
289 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
290
291 mov rax, cs
292 mov ds, rax
293 mov es, rax
294
295 ; Invalidate fs & gs
296 mov rax, 0
297 mov fs, rax
298 mov gs, rax
299
300%ifdef VBOX_WITH_CRASHDUMP_MAGIC
301 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
302%endif
303
304 ; Setup stack; use the lss_esp, ss pair for lss
305 DEBUG_CHAR('7')
306 mov rsp, 0
307 mov eax, [rdx + CPUMCPU.Hyper.esp]
308 mov [rdx + CPUMCPU.Hyper.lss_esp], eax
309 lss esp, [rdx + CPUMCPU.Hyper.lss_esp]
310
311%ifdef VBOX_WITH_CRASHDUMP_MAGIC
312 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
313%endif
314
315
316 ; load the hypervisor function address
317 mov r9, [rdx + CPUMCPU.Hyper.eip]
318
319 ; Check if we need to restore the guest FPU state
320 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
321 test esi, CPUM_SYNC_FPU_STATE
322 jz near gth_fpu_no
323
324%ifdef VBOX_WITH_CRASHDUMP_MAGIC
325 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
326%endif
327
328 mov rax, cr0
329 mov rcx, rax ; save old CR0
330 and rax, ~(X86_CR0_TS | X86_CR0_EM)
331 mov cr0, rax
332 fxrstor [rdx + CPUMCPU.Guest.fpu]
333 mov cr0, rcx ; and restore old CR0 again
334
335 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
336
337gth_fpu_no:
338 ; Check if we need to restore the guest debug state
339 test esi, CPUM_SYNC_DEBUG_STATE
340 jz near gth_debug_no
341
342%ifdef VBOX_WITH_CRASHDUMP_MAGIC
343 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
344%endif
345
346 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
347 mov dr0, rax
348 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
349 mov dr1, rax
350 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
351 mov dr2, rax
352 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
353 mov dr3, rax
354 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
355 mov dr6, rax ; not required for AMD-V
356
357 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
358
359gth_debug_no:
360
361%ifdef VBOX_WITH_CRASHDUMP_MAGIC
362 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
363%endif
364
365 ; parameter for all helper functions (pCtx)
366 lea rsi, [rdx + CPUMCPU.Guest.fpu]
367 call r9
368
369 ; Load CPUM pointer into rdx
370 mov rdx, [NAME(pCpumIC) wrt rip]
371 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
372
373%ifdef VBOX_WITH_CRASHDUMP_MAGIC
374 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
375%endif
376
377 ; Save the return code
378 mov dword [rdx + CPUMCPU.u32RetCode], eax
379
380 ; now let's switch back
381 jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
382
383ENDPROC vmmR0HostToGuestAsm
384
385
386;;
387; Trampoline for doing a call when starting the hyper visor execution.
388;
389; Push any arguments to the routine.
390; Push the argument frame size (cArg * 4).
391; Push the call target (_cdecl convention).
392; Push the address of this routine.
393;
394;
395BITS 64
396ALIGNCODE(16)
397BEGINPROC vmmGCCallTrampoline
398%ifdef DEBUG_STUFF
399 COM64_S_CHAR 'c'
400 COM64_S_CHAR 't'
401 COM64_S_CHAR '!'
402%endif
403 int3
404ENDPROC vmmGCCallTrampoline
405
406
407;;
408; The C interface.
409;
410BITS 64
411ALIGNCODE(16)
412BEGINPROC vmmGCGuestToHost
413%ifdef DEBUG_STUFF
414 push rsi
415 COM_NEWLINE
416 DEBUG_CHAR('b')
417 DEBUG_CHAR('a')
418 DEBUG_CHAR('c')
419 DEBUG_CHAR('k')
420 DEBUG_CHAR('!')
421 COM_NEWLINE
422 pop rsi
423%endif
424 int3
425ENDPROC vmmGCGuestToHost
426
427;;
428; VMMGCGuestToHostAsm
429;
430; This is an alternative entry point which we'll be using
431; when the we have saved the guest state already or we haven't
432; been messing with the guest at all.
433;
434; @param eax Return code.
435; @uses eax, edx, ecx (or it may use them in the future)
436;
437BITS 64
438ALIGNCODE(16)
439BEGINPROC VMMGCGuestToHostAsm
440 ;; We're still in the intermediate memory context!
441
442 ;;
443 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
444 ;;
445 jmp far [NAME(fpIDEnterTarget) wrt rip]
446
447; 16:32 Pointer to IDEnterTarget.
448NAME(fpIDEnterTarget):
449 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
450dd 0
451 FIXUP FIX_HYPER_CS, 0
452dd 0
453
454 ; We're now on identity mapped pages!
455ALIGNCODE(16)
456GLOBALNAME IDExitTarget
457BITS 32
458 DEBUG_CHAR('1')
459
460 ; 1. Deactivate long mode by turning off paging.
461 mov ebx, cr0
462 and ebx, ~X86_CR0_PG
463 mov cr0, ebx
464 DEBUG_CHAR('2')
465
466 ; 2. Load intermediate page table.
467 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
468 mov edx, 0ffffffffh
469 mov cr3, edx
470 DEBUG_CHAR('3')
471
472 ; 3. Disable long mode.
473 mov ecx, MSR_K6_EFER
474 rdmsr
475 DEBUG_CHAR('5')
476 and eax, ~(MSR_K6_EFER_LME)
477 wrmsr
478 DEBUG_CHAR('6')
479
480%ifndef NEED_PAE_ON_HOST
481 ; 3b. Disable PAE.
482 mov eax, cr4
483 and eax, ~X86_CR4_PAE
484 mov cr4, eax
485 DEBUG_CHAR('7')
486%endif
487
488 ; 4. Enable paging.
489 or ebx, X86_CR0_PG
490 mov cr0, ebx
491 jmp short just_a_jump
492just_a_jump:
493 DEBUG_CHAR('8')
494
495 ;;
496 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
497 ;;
498 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
499 jmp near NAME(ICExitTarget)
500
501 ;;
502 ;; When we arrive at this label we're at the
503 ;; intermediate mapping of the switching code.
504 ;;
505BITS 32
506ALIGNCODE(16)
507GLOBALNAME ICExitTarget
508 DEBUG_CHAR('8')
509
510 ; load the hypervisor data selector into ds & es
511 FIXUP FIX_HYPER_DS, 1
512 mov eax, 0ffffh
513 mov ds, eax
514 mov es, eax
515
516 FIXUP FIX_GC_CPUM_OFF, 1, 0
517 mov edx, 0ffffffffh
518 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
519 mov esi, [edx + CPUMCPU.Host.cr3]
520 mov cr3, esi
521
522 ;; now we're in host memory context, let's restore regs
523 FIXUP FIX_HC_CPUM_OFF, 1, 0
524 mov edx, 0ffffffffh
525 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
526
527 ; restore the host EFER
528 mov ebx, edx
529 mov ecx, MSR_K6_EFER
530 mov eax, [ebx + CPUMCPU.Host.efer]
531 mov edx, [ebx + CPUMCPU.Host.efer + 4]
532 wrmsr
533 mov edx, ebx
534
535 ; activate host gdt and idt
536 lgdt [edx + CPUMCPU.Host.gdtr]
537 DEBUG_CHAR('0')
538 lidt [edx + CPUMCPU.Host.idtr]
539 DEBUG_CHAR('1')
540
541 ; Restore TSS selector; must mark it as not busy before using ltr (!)
542 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
543 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
544 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
545 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
546 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
547 ltr word [edx + CPUMCPU.Host.tr]
548
549 ; activate ldt
550 DEBUG_CHAR('2')
551 lldt [edx + CPUMCPU.Host.ldtr]
552
553 ; Restore segment registers
554 mov eax, [edx + CPUMCPU.Host.ds]
555 mov ds, eax
556 mov eax, [edx + CPUMCPU.Host.es]
557 mov es, eax
558 mov eax, [edx + CPUMCPU.Host.fs]
559 mov fs, eax
560 mov eax, [edx + CPUMCPU.Host.gs]
561 mov gs, eax
562 ; restore stack
563 lss esp, [edx + CPUMCPU.Host.esp]
564
565 ; Control registers.
566 mov ecx, [edx + CPUMCPU.Host.cr4]
567 mov cr4, ecx
568 mov ecx, [edx + CPUMCPU.Host.cr0]
569 mov cr0, ecx
570 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
571 ;mov cr2, ecx
572
573 ; restore general registers.
574 mov edi, [edx + CPUMCPU.Host.edi]
575 mov esi, [edx + CPUMCPU.Host.esi]
576 mov ebx, [edx + CPUMCPU.Host.ebx]
577 mov ebp, [edx + CPUMCPU.Host.ebp]
578
579 ; store the return code in eax
580 mov eax, [edx + CPUMCPU.u32RetCode]
581 retf
582ENDPROC VMMGCGuestToHostAsm
583
584;;
585; VMMGCGuestToHostAsmHyperCtx
586;
587; This is an alternative entry point which we'll be using
588; when the we have the hypervisor context and need to save
589; that before going to the host.
590;
591; This is typically useful when abandoning the hypervisor
592; because of a trap and want the trap state to be saved.
593;
594; @param eax Return code.
595; @param ecx Points to CPUMCTXCORE.
596; @uses eax,edx,ecx
597ALIGNCODE(16)
598BEGINPROC VMMGCGuestToHostAsmHyperCtx
599 int3
600
601;;
602; VMMGCGuestToHostAsmGuestCtx
603;
604; Switches from Guest Context to Host Context.
605; Of course it's only called from within the GC.
606;
607; @param eax Return code.
608; @param esp + 4 Pointer to CPUMCTXCORE.
609;
610; @remark ASSUMES interrupts disabled.
611;
612ALIGNCODE(16)
613BEGINPROC VMMGCGuestToHostAsmGuestCtx
614 int3
615
616GLOBALNAME End
617;
618; The description string (in the text section).
619;
620NAME(Description):
621 db SWITCHER_DESCRIPTION
622 db 0
623
624extern NAME(Relocate)
625
626;
627; End the fixup records.
628;
629BEGINDATA
630 db FIX_THE_END ; final entry.
631GLOBALNAME FixupsEnd
632
633;;
634; The switcher definition structure.
635ALIGNDATA(16)
636GLOBALNAME Def
637 istruc VMMSWITCHERDEF
638 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
639 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
640 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
641 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
642 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
643 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
644 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
645 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
646 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
647 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
648 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
649 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
650 ; disasm help
651 at VMMSWITCHERDEF.offHCCode0, dd 0
652 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
653 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
654 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
655 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
656 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
657 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
658 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
659 at VMMSWITCHERDEF.offGCCode, dd 0
660 at VMMSWITCHERDEF.cbGCCode, dd 0
661
662 iend
663
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette