VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 25401

Last change on this file since 25401 was 18927, checked in by vboxsync, 16 years ago

Big step to separate VMM data structures for guest SMP. (pgm, em)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.6 KB
Line 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2007 Sun Microsystems, Inc.
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
16; Clara, CA 95054 USA or visit http://www.sun.com if you need
17; additional information or have any questions.
18;
19
20;%define DEBUG_STUFF 1
21;%define STRICT_IF 1
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26
27
28;*******************************************************************************
29;* Header Files *
30;*******************************************************************************
31%include "VBox/asmdefs.mac"
32%include "VBox/x86.mac"
33%include "VBox/cpum.mac"
34%include "VBox/stam.mac"
35%include "VBox/vm.mac"
36%include "CPUMInternal.mac"
37%include "VMMSwitcher/VMMSwitcher.mac"
38
39
40;
41; Start the fixup records
42; We collect the fixups in the .data section as we go along
43; It is therefore VITAL that no-one is using the .data section
44; for anything else between 'Start' and 'End'.
45;
46BEGINDATA
47GLOBALNAME Fixups
48
49
50
51BEGINCODE
52GLOBALNAME Start
53
54BITS 32
55
56;;
57; The C interface.
58;
59BEGINPROC vmmR0HostToGuest
60 %ifdef DEBUG_STUFF
61 COM32_S_NEWLINE
62 COM32_S_CHAR '^'
63 %endif
64
65 %ifdef VBOX_WITH_STATISTICS
66 ;
67 ; Switcher stats.
68 ;
69 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
70 mov edx, 0ffffffffh
71 STAM_PROFILE_ADV_START edx
72 %endif
73
74 ; turn off interrupts
75 pushf
76 cli
77
78 ;
79 ; Call worker.
80 ;
81 FIXUP FIX_HC_CPUM_OFF, 1, 0
82 mov edx, 0ffffffffh
83 push cs ; allow for far return and restore cs correctly.
84 call NAME(vmmR0HostToGuestAsm)
85
86 ; restore original flags
87 popf
88
89%ifdef VBOX_WITH_STATISTICS
90 ;
91 ; Switcher stats.
92 ;
93 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
94 mov edx, 0ffffffffh
95 STAM_PROFILE_ADV_STOP edx
96%endif
97
98 ret
99
100ENDPROC vmmR0HostToGuest
101
102; *****************************************************************************
103; vmmR0HostToGuestAsm
104;
105; Phase one of the switch from host to guest context (host MMU context)
106;
107; INPUT:
108; - edx virtual address of CPUM structure (valid in host context)
109;
110; USES/DESTROYS:
111; - eax, ecx, edx, esi
112;
113; ASSUMPTION:
114; - current CS and DS selectors are wide open
115;
116; *****************************************************************************
117ALIGNCODE(16)
118BEGINPROC vmmR0HostToGuestAsm
119 ;;
120 ;; Save CPU host context
121 ;; Skip eax, edx and ecx as these are not preserved over calls.
122 ;;
123 CPUMCPU_FROM_CPUM(edx)
124%ifdef VBOX_WITH_CRASHDUMP_MAGIC
125 ; phys address of scratch page
126 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
127 mov cr2, eax
128
129 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
130%endif
131
132 ; general registers.
133 mov [edx + CPUMCPU.Host.ebx], ebx
134 mov [edx + CPUMCPU.Host.edi], edi
135 mov [edx + CPUMCPU.Host.esi], esi
136 mov [edx + CPUMCPU.Host.esp], esp
137 mov [edx + CPUMCPU.Host.ebp], ebp
138 ; selectors.
139 mov [edx + CPUMCPU.Host.ds], ds
140 mov [edx + CPUMCPU.Host.es], es
141 mov [edx + CPUMCPU.Host.fs], fs
142 mov [edx + CPUMCPU.Host.gs], gs
143 mov [edx + CPUMCPU.Host.ss], ss
144 ; special registers.
145 sldt [edx + CPUMCPU.Host.ldtr]
146 sidt [edx + CPUMCPU.Host.idtr]
147 sgdt [edx + CPUMCPU.Host.gdtr]
148 str [edx + CPUMCPU.Host.tr]
149
150%ifdef VBOX_WITH_CRASHDUMP_MAGIC
151 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
152%endif
153
154 ; control registers.
155 mov eax, cr0
156 mov [edx + CPUMCPU.Host.cr0], eax
157 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
158 mov eax, cr3
159 mov [edx + CPUMCPU.Host.cr3], eax
160 mov eax, cr4
161 mov [edx + CPUMCPU.Host.cr4], eax
162
163 ; save the host EFER msr
164 mov ebx, edx
165 mov ecx, MSR_K6_EFER
166 rdmsr
167 mov [ebx + CPUMCPU.Host.efer], eax
168 mov [ebx + CPUMCPU.Host.efer + 4], edx
169 mov edx, ebx
170
171%ifdef VBOX_WITH_CRASHDUMP_MAGIC
172 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
173%endif
174
175 ; Load new gdt so we can do a far jump after going into 64 bits mode
176 lgdt [edx + CPUMCPU.Hyper.gdtr]
177
178%ifdef VBOX_WITH_CRASHDUMP_MAGIC
179 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
180%endif
181
182 ;;
183 ;; Load Intermediate memory context.
184 ;;
185 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
186 mov eax, 0ffffffffh
187 mov cr3, eax
188 DEBUG_CHAR('?')
189
190 ;;
191 ;; Jump to identity mapped location
192 ;;
193 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
194 jmp near NAME(IDEnterTarget)
195
196
197 ; We're now on identity mapped pages!
198ALIGNCODE(16)
199GLOBALNAME IDEnterTarget
200 DEBUG_CHAR('2')
201
202 ; 1. Disable paging.
203 mov ebx, cr0
204 and ebx, ~X86_CR0_PG
205 mov cr0, ebx
206 DEBUG_CHAR('2')
207
208%ifdef VBOX_WITH_CRASHDUMP_MAGIC
209 mov eax, cr2
210 mov dword [eax], 3
211%endif
212
213 ; 2. Enable PAE.
214 mov ecx, cr4
215 or ecx, X86_CR4_PAE
216 mov cr4, ecx
217
218 ; 3. Load long mode intermediate CR3.
219 FIXUP FIX_INTER_AMD64_CR3, 1
220 mov ecx, 0ffffffffh
221 mov cr3, ecx
222 DEBUG_CHAR('3')
223
224%ifdef VBOX_WITH_CRASHDUMP_MAGIC
225 mov eax, cr2
226 mov dword [eax], 4
227%endif
228
229 ; 4. Enable long mode.
230 mov ebp, edx
231 mov ecx, MSR_K6_EFER
232 rdmsr
233 or eax, MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE
234 wrmsr
235 mov edx, ebp
236 DEBUG_CHAR('4')
237
238%ifdef VBOX_WITH_CRASHDUMP_MAGIC
239 mov eax, cr2
240 mov dword [eax], 5
241%endif
242
243 ; 5. Enable paging.
244 or ebx, X86_CR0_PG
245 ; Disable ring 0 write protection too
246 and ebx, ~X86_CR0_WRITE_PROTECT
247 mov cr0, ebx
248 DEBUG_CHAR('5')
249
250 ; Jump from compatibility mode to 64-bit mode.
251 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
252 jmp 0ffffh:0fffffffeh
253
254 ;
255 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
256BITS 64
257ALIGNCODE(16)
258NAME(IDEnter64Mode):
259 DEBUG_CHAR('6')
260 jmp [NAME(pICEnterTarget) wrt rip]
261
262; 64-bit jump target
263NAME(pICEnterTarget):
264FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
265dq 0ffffffffffffffffh
266
267; 64-bit pCpum address.
268NAME(pCpumIC):
269FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
270dq 0ffffffffffffffffh
271
272%ifdef VBOX_WITH_CRASHDUMP_MAGIC
273NAME(pMarker):
274db 'Switch_marker'
275%endif
276
277 ;
278 ; When we arrive here we're in 64 bits mode in the intermediate context
279 ;
280ALIGNCODE(16)
281GLOBALNAME ICEnterTarget
282 ; Load CPUM pointer into rdx
283 mov rdx, [NAME(pCpumIC) wrt rip]
284 CPUMCPU_FROM_CPUM(edx)
285
286 mov rax, cs
287 mov ds, rax
288 mov es, rax
289
290 ; Invalidate fs & gs
291 mov rax, 0
292 mov fs, rax
293 mov gs, rax
294
295%ifdef VBOX_WITH_CRASHDUMP_MAGIC
296 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
297%endif
298
299 ; Setup stack; use the lss_esp, ss pair for lss
300 DEBUG_CHAR('7')
301 mov rsp, 0
302 mov eax, [rdx + CPUMCPU.Hyper.esp]
303 mov [rdx + CPUMCPU.Hyper.lss_esp], eax
304 lss esp, [rdx + CPUMCPU.Hyper.lss_esp]
305
306%ifdef VBOX_WITH_CRASHDUMP_MAGIC
307 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
308%endif
309
310
311 ; load the hypervisor function address
312 mov r9, [rdx + CPUMCPU.Hyper.eip]
313
314 ; Check if we need to restore the guest FPU state
315 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
316 test esi, CPUM_SYNC_FPU_STATE
317 jz near gth_fpu_no
318
319%ifdef VBOX_WITH_CRASHDUMP_MAGIC
320 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
321%endif
322
323 mov rax, cr0
324 mov rcx, rax ; save old CR0
325 and rax, ~(X86_CR0_TS | X86_CR0_EM)
326 mov cr0, rax
327 fxrstor [rdx + CPUMCPU.Guest.fpu]
328 mov cr0, rcx ; and restore old CR0 again
329
330 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
331
332gth_fpu_no:
333 ; Check if we need to restore the guest debug state
334 test esi, CPUM_SYNC_DEBUG_STATE
335 jz near gth_debug_no
336
337%ifdef VBOX_WITH_CRASHDUMP_MAGIC
338 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
339%endif
340
341 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
342 mov dr0, rax
343 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
344 mov dr1, rax
345 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
346 mov dr2, rax
347 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
348 mov dr3, rax
349 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
350 mov dr6, rax ; not required for AMD-V
351
352 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
353
354gth_debug_no:
355
356%ifdef VBOX_WITH_CRASHDUMP_MAGIC
357 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
358%endif
359
360 ; parameter for all helper functions (pCtx)
361 lea rsi, [rdx + CPUMCPU.Guest.fpu]
362 call r9
363
364 ; Load CPUM pointer into rdx
365 mov rdx, [NAME(pCpumIC) wrt rip]
366 CPUMCPU_FROM_CPUM(edx)
367
368%ifdef VBOX_WITH_CRASHDUMP_MAGIC
369 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
370%endif
371
372 ; Save the return code
373 mov dword [rdx + CPUMCPU.u32RetCode], eax
374
375 ; now let's switch back
376 jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
377
378ENDPROC vmmR0HostToGuestAsm
379
380
381;;
382; Trampoline for doing a call when starting the hyper visor execution.
383;
384; Push any arguments to the routine.
385; Push the argument frame size (cArg * 4).
386; Push the call target (_cdecl convention).
387; Push the address of this routine.
388;
389;
390BITS 64
391ALIGNCODE(16)
392BEGINPROC vmmGCCallTrampoline
393%ifdef DEBUG_STUFF
394 COM32_S_CHAR 'c'
395 COM32_S_CHAR 't'
396 COM32_S_CHAR '!'
397%endif
398 int3
399ENDPROC vmmGCCallTrampoline
400
401
402;;
403; The C interface.
404;
405BITS 64
406ALIGNCODE(16)
407BEGINPROC vmmGCGuestToHost
408%ifdef DEBUG_STUFF
409 push esi
410 COM_NEWLINE
411 DEBUG_CHAR('b')
412 DEBUG_CHAR('a')
413 DEBUG_CHAR('c')
414 DEBUG_CHAR('k')
415 DEBUG_CHAR('!')
416 COM_NEWLINE
417 pop esi
418%endif
419 int3
420ENDPROC vmmGCGuestToHost
421
422;;
423; VMMGCGuestToHostAsm
424;
425; This is an alternative entry point which we'll be using
426; when the we have saved the guest state already or we haven't
427; been messing with the guest at all.
428;
429; @param eax Return code.
430; @uses eax, edx, ecx (or it may use them in the future)
431;
432BITS 64
433ALIGNCODE(16)
434BEGINPROC VMMGCGuestToHostAsm
435 ;; We're still in the intermediate memory context!
436
437 ;;
438 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
439 ;;
440 jmp far [NAME(fpIDEnterTarget) wrt rip]
441
442; 16:32 Pointer to IDEnterTarget.
443NAME(fpIDEnterTarget):
444 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
445dd 0
446 FIXUP FIX_HYPER_CS, 0
447dd 0
448
449 ; We're now on identity mapped pages!
450ALIGNCODE(16)
451GLOBALNAME IDExitTarget
452BITS 32
453 DEBUG_CHAR('1')
454
455 ; 1. Deactivate long mode by turning off paging.
456 mov ebx, cr0
457 and ebx, ~X86_CR0_PG
458 mov cr0, ebx
459 DEBUG_CHAR('2')
460
461 ; 2. Load intermediate page table.
462 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
463 mov edx, 0ffffffffh
464 mov cr3, edx
465 DEBUG_CHAR('3')
466
467 ; 3. Disable long mode.
468 mov ecx, MSR_K6_EFER
469 rdmsr
470 DEBUG_CHAR('5')
471 and eax, ~(MSR_K6_EFER_LME)
472 wrmsr
473 DEBUG_CHAR('6')
474
475%ifndef NEED_PAE_ON_HOST
476 ; 3b. Disable PAE.
477 mov eax, cr4
478 and eax, ~X86_CR4_PAE
479 mov cr4, eax
480 DEBUG_CHAR('7')
481%endif
482
483 ; 4. Enable paging.
484 or ebx, X86_CR0_PG
485 mov cr0, ebx
486 jmp short just_a_jump
487just_a_jump:
488 DEBUG_CHAR('8')
489
490 ;;
491 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
492 ;;
493 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
494 jmp near NAME(ICExitTarget)
495
496 ;;
497 ;; When we arrive at this label we're at the
498 ;; intermediate mapping of the switching code.
499 ;;
500BITS 32
501ALIGNCODE(16)
502GLOBALNAME ICExitTarget
503 DEBUG_CHAR('8')
504
505 ; load the hypervisor data selector into ds & es
506 FIXUP FIX_HYPER_DS, 1
507 mov eax, 0ffffh
508 mov ds, eax
509 mov es, eax
510
511 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
512 mov edx, 0ffffffffh
513 mov esi, [edx + CPUMCPU.Host.cr3]
514 mov cr3, esi
515
516 ;; now we're in host memory context, let's restore regs
517 FIXUP FIX_HC_CPUM_OFF, 1, 0
518 mov edx, 0ffffffffh
519 CPUMCPU_FROM_CPUM(edx)
520
521 ; restore the host EFER
522 mov ebx, edx
523 mov ecx, MSR_K6_EFER
524 mov eax, [ebx + CPUMCPU.Host.efer]
525 mov edx, [ebx + CPUMCPU.Host.efer + 4]
526 wrmsr
527 mov edx, ebx
528
529 ; activate host gdt and idt
530 lgdt [edx + CPUMCPU.Host.gdtr]
531 DEBUG_CHAR('0')
532 lidt [edx + CPUMCPU.Host.idtr]
533 DEBUG_CHAR('1')
534
535 ; Restore TSS selector; must mark it as not busy before using ltr (!)
536 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
537 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
538 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
539 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
540 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
541 ltr word [edx + CPUMCPU.Host.tr]
542
543 ; activate ldt
544 DEBUG_CHAR('2')
545 lldt [edx + CPUMCPU.Host.ldtr]
546
547 ; Restore segment registers
548 mov eax, [edx + CPUMCPU.Host.ds]
549 mov ds, eax
550 mov eax, [edx + CPUMCPU.Host.es]
551 mov es, eax
552 mov eax, [edx + CPUMCPU.Host.fs]
553 mov fs, eax
554 mov eax, [edx + CPUMCPU.Host.gs]
555 mov gs, eax
556 ; restore stack
557 lss esp, [edx + CPUMCPU.Host.esp]
558
559 ; Control registers.
560 mov ecx, [edx + CPUMCPU.Host.cr4]
561 mov cr4, ecx
562 mov ecx, [edx + CPUMCPU.Host.cr0]
563 mov cr0, ecx
564 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
565 ;mov cr2, ecx
566
567 ; restore general registers.
568 mov edi, [edx + CPUMCPU.Host.edi]
569 mov esi, [edx + CPUMCPU.Host.esi]
570 mov ebx, [edx + CPUMCPU.Host.ebx]
571 mov ebp, [edx + CPUMCPU.Host.ebp]
572
573 ; store the return code in eax
574 mov eax, [edx + CPUMCPU.u32RetCode]
575 retf
576ENDPROC VMMGCGuestToHostAsm
577
578;;
579; VMMGCGuestToHostAsmHyperCtx
580;
581; This is an alternative entry point which we'll be using
582; when the we have the hypervisor context and need to save
583; that before going to the host.
584;
585; This is typically useful when abandoning the hypervisor
586; because of a trap and want the trap state to be saved.
587;
588; @param eax Return code.
589; @param ecx Points to CPUMCTXCORE.
590; @uses eax,edx,ecx
591ALIGNCODE(16)
592BEGINPROC VMMGCGuestToHostAsmHyperCtx
593 int3
594
595;;
596; VMMGCGuestToHostAsmGuestCtx
597;
598; Switches from Guest Context to Host Context.
599; Of course it's only called from within the GC.
600;
601; @param eax Return code.
602; @param esp + 4 Pointer to CPUMCTXCORE.
603;
604; @remark ASSUMES interrupts disabled.
605;
606ALIGNCODE(16)
607BEGINPROC VMMGCGuestToHostAsmGuestCtx
608 int3
609
610GLOBALNAME End
611;
612; The description string (in the text section).
613;
614NAME(Description):
615 db SWITCHER_DESCRIPTION
616 db 0
617
618extern NAME(Relocate)
619
620;
621; End the fixup records.
622;
623BEGINDATA
624 db FIX_THE_END ; final entry.
625GLOBALNAME FixupsEnd
626
627;;
628; The switcher definition structure.
629ALIGNDATA(16)
630GLOBALNAME Def
631 istruc VMMSWITCHERDEF
632 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
633 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
634 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
635 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
636 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
637 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
638 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
639 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
640 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
641 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
642 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
643 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
644 ; disasm help
645 at VMMSWITCHERDEF.offHCCode0, dd 0
646 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
647 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
648 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
649 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
650 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
651 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
652 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
653 at VMMSWITCHERDEF.offGCCode, dd 0
654 at VMMSWITCHERDEF.cbGCCode, dd 0
655
656 iend
657
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette