VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 30146

Last change on this file since 30146 was 30146, checked in by vboxsync, 14 years ago

Activate fixup for efer mask

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.4 KB
Line 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2007 Oracle Corporation
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.virtualbox.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15
16;%define DEBUG_STUFF 1
17;%define STRICT_IF 1
18
19;*******************************************************************************
20;* Defined Constants And Macros *
21;*******************************************************************************
22
23
24;*******************************************************************************
25;* Header Files *
26;*******************************************************************************
27%include "VBox/asmdefs.mac"
28%include "VBox/x86.mac"
29%include "VBox/cpum.mac"
30%include "VBox/stam.mac"
31%include "VBox/vm.mac"
32%include "CPUMInternal.mac"
33%include "VMMSwitcher/VMMSwitcher.mac"
34
35
36;
37; Start the fixup records
38; We collect the fixups in the .data section as we go along
39; It is therefore VITAL that no-one is using the .data section
40; for anything else between 'Start' and 'End'.
41;
42BEGINDATA
43GLOBALNAME Fixups
44
45
46
47BEGINCODE
48GLOBALNAME Start
49
50BITS 32
51
52;;
53; The C interface.
54;
55BEGINPROC vmmR0HostToGuest
56 %ifdef DEBUG_STUFF
57 COM32_S_NEWLINE
58 COM32_S_CHAR '^'
59 %endif
60
61 %ifdef VBOX_WITH_STATISTICS
62 ;
63 ; Switcher stats.
64 ;
65 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
66 mov edx, 0ffffffffh
67 STAM_PROFILE_ADV_START edx
68 %endif
69
70 ; turn off interrupts
71 pushf
72 cli
73
74 ;
75 ; Call worker.
76 ;
77 FIXUP FIX_HC_CPUM_OFF, 1, 0
78 mov edx, 0ffffffffh
79 push cs ; allow for far return and restore cs correctly.
80 call NAME(vmmR0HostToGuestAsm)
81
82 ; restore original flags
83 popf
84
85%ifdef VBOX_WITH_STATISTICS
86 ;
87 ; Switcher stats.
88 ;
89 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
90 mov edx, 0ffffffffh
91 STAM_PROFILE_ADV_STOP edx
92%endif
93
94 ret
95
96ENDPROC vmmR0HostToGuest
97
98; *****************************************************************************
99; vmmR0HostToGuestAsm
100;
101; Phase one of the switch from host to guest context (host MMU context)
102;
103; INPUT:
104; - edx virtual address of CPUM structure (valid in host context)
105;
106; USES/DESTROYS:
107; - eax, ecx, edx, esi
108;
109; ASSUMPTION:
110; - current CS and DS selectors are wide open
111;
112; *****************************************************************************
113ALIGNCODE(16)
114BEGINPROC vmmR0HostToGuestAsm
115 ;;
116 ;; Save CPU host context
117 ;; Skip eax, edx and ecx as these are not preserved over calls.
118 ;;
119 CPUMCPU_FROM_CPUM(edx)
120%ifdef VBOX_WITH_CRASHDUMP_MAGIC
121 ; phys address of scratch page
122 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
123 mov cr2, eax
124
125 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
126%endif
127
128 ; general registers.
129 mov [edx + CPUMCPU.Host.ebx], ebx
130 mov [edx + CPUMCPU.Host.edi], edi
131 mov [edx + CPUMCPU.Host.esi], esi
132 mov [edx + CPUMCPU.Host.esp], esp
133 mov [edx + CPUMCPU.Host.ebp], ebp
134 ; selectors.
135 mov [edx + CPUMCPU.Host.ds], ds
136 mov [edx + CPUMCPU.Host.es], es
137 mov [edx + CPUMCPU.Host.fs], fs
138 mov [edx + CPUMCPU.Host.gs], gs
139 mov [edx + CPUMCPU.Host.ss], ss
140 ; special registers.
141 sldt [edx + CPUMCPU.Host.ldtr]
142 sidt [edx + CPUMCPU.Host.idtr]
143 sgdt [edx + CPUMCPU.Host.gdtr]
144 str [edx + CPUMCPU.Host.tr]
145
146%ifdef VBOX_WITH_CRASHDUMP_MAGIC
147 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
148%endif
149
150 ; control registers.
151 mov eax, cr0
152 mov [edx + CPUMCPU.Host.cr0], eax
153 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
154 mov eax, cr3
155 mov [edx + CPUMCPU.Host.cr3], eax
156 mov eax, cr4
157 mov [edx + CPUMCPU.Host.cr4], eax
158
159 ; save the host EFER msr
160 mov ebx, edx
161 mov ecx, MSR_K6_EFER
162 rdmsr
163 mov [ebx + CPUMCPU.Host.efer], eax
164 mov [ebx + CPUMCPU.Host.efer + 4], edx
165 mov edx, ebx
166
167%ifdef VBOX_WITH_CRASHDUMP_MAGIC
168 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
169%endif
170
171 ; Load new gdt so we can do a far jump after going into 64 bits mode
172 lgdt [edx + CPUMCPU.Hyper.gdtr]
173
174%ifdef VBOX_WITH_CRASHDUMP_MAGIC
175 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
176%endif
177
178 ;;
179 ;; Load Intermediate memory context.
180 ;;
181 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
182 mov eax, 0ffffffffh
183 mov cr3, eax
184 DEBUG_CHAR('?')
185
186 ;;
187 ;; Jump to identity mapped location
188 ;;
189 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
190 jmp near NAME(IDEnterTarget)
191
192
193 ; We're now on identity mapped pages!
194ALIGNCODE(16)
195GLOBALNAME IDEnterTarget
196 DEBUG_CHAR('2')
197
198 ; 1. Disable paging.
199 mov ebx, cr0
200 and ebx, ~X86_CR0_PG
201 mov cr0, ebx
202 DEBUG_CHAR('2')
203
204%ifdef VBOX_WITH_CRASHDUMP_MAGIC
205 mov eax, cr2
206 mov dword [eax], 3
207%endif
208
209 ; 2. Enable PAE.
210 mov ecx, cr4
211 or ecx, X86_CR4_PAE
212 mov cr4, ecx
213
214 ; 3. Load long mode intermediate CR3.
215 FIXUP FIX_INTER_AMD64_CR3, 1
216 mov ecx, 0ffffffffh
217 mov cr3, ecx
218 DEBUG_CHAR('3')
219
220%ifdef VBOX_WITH_CRASHDUMP_MAGIC
221 mov eax, cr2
222 mov dword [eax], 4
223%endif
224
225 ; 4. Enable long mode.
226 mov ebp, edx
227 mov ecx, MSR_K6_EFER
228 rdmsr
229 FIXUP FIX_EFER_OR_MASK, 1
230 or eax, 0ffffffffh
231 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
232 wrmsr
233 mov edx, ebp
234 DEBUG_CHAR('4')
235
236%ifdef VBOX_WITH_CRASHDUMP_MAGIC
237 mov eax, cr2
238 mov dword [eax], 5
239%endif
240
241 ; 5. Enable paging.
242 or ebx, X86_CR0_PG
243 ; Disable ring 0 write protection too
244 and ebx, ~X86_CR0_WRITE_PROTECT
245 mov cr0, ebx
246 DEBUG_CHAR('5')
247
248 ; Jump from compatibility mode to 64-bit mode.
249 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
250 jmp 0ffffh:0fffffffeh
251
252 ;
253 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
254BITS 64
255ALIGNCODE(16)
256NAME(IDEnter64Mode):
257 DEBUG_CHAR('6')
258 jmp [NAME(pICEnterTarget) wrt rip]
259
260; 64-bit jump target
261NAME(pICEnterTarget):
262FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
263dq 0ffffffffffffffffh
264
265; 64-bit pCpum address.
266NAME(pCpumIC):
267FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
268dq 0ffffffffffffffffh
269
270%ifdef VBOX_WITH_CRASHDUMP_MAGIC
271NAME(pMarker):
272db 'Switch_marker'
273%endif
274
275 ;
276 ; When we arrive here we're in 64 bits mode in the intermediate context
277 ;
278ALIGNCODE(16)
279GLOBALNAME ICEnterTarget
280 ; Load CPUM pointer into rdx
281 mov rdx, [NAME(pCpumIC) wrt rip]
282 CPUMCPU_FROM_CPUM(edx)
283
284 mov rax, cs
285 mov ds, rax
286 mov es, rax
287
288 ; Invalidate fs & gs
289 mov rax, 0
290 mov fs, rax
291 mov gs, rax
292
293%ifdef VBOX_WITH_CRASHDUMP_MAGIC
294 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
295%endif
296
297 ; Setup stack; use the lss_esp, ss pair for lss
298 DEBUG_CHAR('7')
299 mov rsp, 0
300 mov eax, [rdx + CPUMCPU.Hyper.esp]
301 mov [rdx + CPUMCPU.Hyper.lss_esp], eax
302 lss esp, [rdx + CPUMCPU.Hyper.lss_esp]
303
304%ifdef VBOX_WITH_CRASHDUMP_MAGIC
305 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
306%endif
307
308
309 ; load the hypervisor function address
310 mov r9, [rdx + CPUMCPU.Hyper.eip]
311
312 ; Check if we need to restore the guest FPU state
313 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
314 test esi, CPUM_SYNC_FPU_STATE
315 jz near gth_fpu_no
316
317%ifdef VBOX_WITH_CRASHDUMP_MAGIC
318 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
319%endif
320
321 mov rax, cr0
322 mov rcx, rax ; save old CR0
323 and rax, ~(X86_CR0_TS | X86_CR0_EM)
324 mov cr0, rax
325 fxrstor [rdx + CPUMCPU.Guest.fpu]
326 mov cr0, rcx ; and restore old CR0 again
327
328 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
329
330gth_fpu_no:
331 ; Check if we need to restore the guest debug state
332 test esi, CPUM_SYNC_DEBUG_STATE
333 jz near gth_debug_no
334
335%ifdef VBOX_WITH_CRASHDUMP_MAGIC
336 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
337%endif
338
339 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
340 mov dr0, rax
341 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
342 mov dr1, rax
343 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
344 mov dr2, rax
345 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
346 mov dr3, rax
347 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
348 mov dr6, rax ; not required for AMD-V
349
350 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
351
352gth_debug_no:
353
354%ifdef VBOX_WITH_CRASHDUMP_MAGIC
355 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
356%endif
357
358 ; parameter for all helper functions (pCtx)
359 lea rsi, [rdx + CPUMCPU.Guest.fpu]
360 call r9
361
362 ; Load CPUM pointer into rdx
363 mov rdx, [NAME(pCpumIC) wrt rip]
364 CPUMCPU_FROM_CPUM(edx)
365
366%ifdef VBOX_WITH_CRASHDUMP_MAGIC
367 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
368%endif
369
370 ; Save the return code
371 mov dword [rdx + CPUMCPU.u32RetCode], eax
372
373 ; now let's switch back
374 jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
375
376ENDPROC vmmR0HostToGuestAsm
377
378
379;;
380; Trampoline for doing a call when starting the hyper visor execution.
381;
382; Push any arguments to the routine.
383; Push the argument frame size (cArg * 4).
384; Push the call target (_cdecl convention).
385; Push the address of this routine.
386;
387;
388BITS 64
389ALIGNCODE(16)
390BEGINPROC vmmGCCallTrampoline
391%ifdef DEBUG_STUFF
392 COM64_S_CHAR 'c'
393 COM64_S_CHAR 't'
394 COM64_S_CHAR '!'
395%endif
396 int3
397ENDPROC vmmGCCallTrampoline
398
399
400;;
401; The C interface.
402;
403BITS 64
404ALIGNCODE(16)
405BEGINPROC vmmGCGuestToHost
406%ifdef DEBUG_STUFF
407 push rsi
408 COM_NEWLINE
409 DEBUG_CHAR('b')
410 DEBUG_CHAR('a')
411 DEBUG_CHAR('c')
412 DEBUG_CHAR('k')
413 DEBUG_CHAR('!')
414 COM_NEWLINE
415 pop rsi
416%endif
417 int3
418ENDPROC vmmGCGuestToHost
419
420;;
421; VMMGCGuestToHostAsm
422;
423; This is an alternative entry point which we'll be using
424; when the we have saved the guest state already or we haven't
425; been messing with the guest at all.
426;
427; @param eax Return code.
428; @uses eax, edx, ecx (or it may use them in the future)
429;
430BITS 64
431ALIGNCODE(16)
432BEGINPROC VMMGCGuestToHostAsm
433 ;; We're still in the intermediate memory context!
434
435 ;;
436 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
437 ;;
438 jmp far [NAME(fpIDEnterTarget) wrt rip]
439
440; 16:32 Pointer to IDEnterTarget.
441NAME(fpIDEnterTarget):
442 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
443dd 0
444 FIXUP FIX_HYPER_CS, 0
445dd 0
446
447 ; We're now on identity mapped pages!
448ALIGNCODE(16)
449GLOBALNAME IDExitTarget
450BITS 32
451 DEBUG_CHAR('1')
452
453 ; 1. Deactivate long mode by turning off paging.
454 mov ebx, cr0
455 and ebx, ~X86_CR0_PG
456 mov cr0, ebx
457 DEBUG_CHAR('2')
458
459 ; 2. Load intermediate page table.
460 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
461 mov edx, 0ffffffffh
462 mov cr3, edx
463 DEBUG_CHAR('3')
464
465 ; 3. Disable long mode.
466 mov ecx, MSR_K6_EFER
467 rdmsr
468 DEBUG_CHAR('5')
469 and eax, ~(MSR_K6_EFER_LME)
470 wrmsr
471 DEBUG_CHAR('6')
472
473%ifndef NEED_PAE_ON_HOST
474 ; 3b. Disable PAE.
475 mov eax, cr4
476 and eax, ~X86_CR4_PAE
477 mov cr4, eax
478 DEBUG_CHAR('7')
479%endif
480
481 ; 4. Enable paging.
482 or ebx, X86_CR0_PG
483 mov cr0, ebx
484 jmp short just_a_jump
485just_a_jump:
486 DEBUG_CHAR('8')
487
488 ;;
489 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
490 ;;
491 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
492 jmp near NAME(ICExitTarget)
493
494 ;;
495 ;; When we arrive at this label we're at the
496 ;; intermediate mapping of the switching code.
497 ;;
498BITS 32
499ALIGNCODE(16)
500GLOBALNAME ICExitTarget
501 DEBUG_CHAR('8')
502
503 ; load the hypervisor data selector into ds & es
504 FIXUP FIX_HYPER_DS, 1
505 mov eax, 0ffffh
506 mov ds, eax
507 mov es, eax
508
509 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
510 mov edx, 0ffffffffh
511 mov esi, [edx + CPUMCPU.Host.cr3]
512 mov cr3, esi
513
514 ;; now we're in host memory context, let's restore regs
515 FIXUP FIX_HC_CPUM_OFF, 1, 0
516 mov edx, 0ffffffffh
517 CPUMCPU_FROM_CPUM(edx)
518
519 ; restore the host EFER
520 mov ebx, edx
521 mov ecx, MSR_K6_EFER
522 mov eax, [ebx + CPUMCPU.Host.efer]
523 mov edx, [ebx + CPUMCPU.Host.efer + 4]
524 wrmsr
525 mov edx, ebx
526
527 ; activate host gdt and idt
528 lgdt [edx + CPUMCPU.Host.gdtr]
529 DEBUG_CHAR('0')
530 lidt [edx + CPUMCPU.Host.idtr]
531 DEBUG_CHAR('1')
532
533 ; Restore TSS selector; must mark it as not busy before using ltr (!)
534 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
535 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
536 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
537 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
538 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
539 ltr word [edx + CPUMCPU.Host.tr]
540
541 ; activate ldt
542 DEBUG_CHAR('2')
543 lldt [edx + CPUMCPU.Host.ldtr]
544
545 ; Restore segment registers
546 mov eax, [edx + CPUMCPU.Host.ds]
547 mov ds, eax
548 mov eax, [edx + CPUMCPU.Host.es]
549 mov es, eax
550 mov eax, [edx + CPUMCPU.Host.fs]
551 mov fs, eax
552 mov eax, [edx + CPUMCPU.Host.gs]
553 mov gs, eax
554 ; restore stack
555 lss esp, [edx + CPUMCPU.Host.esp]
556
557 ; Control registers.
558 mov ecx, [edx + CPUMCPU.Host.cr4]
559 mov cr4, ecx
560 mov ecx, [edx + CPUMCPU.Host.cr0]
561 mov cr0, ecx
562 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
563 ;mov cr2, ecx
564
565 ; restore general registers.
566 mov edi, [edx + CPUMCPU.Host.edi]
567 mov esi, [edx + CPUMCPU.Host.esi]
568 mov ebx, [edx + CPUMCPU.Host.ebx]
569 mov ebp, [edx + CPUMCPU.Host.ebp]
570
571 ; store the return code in eax
572 mov eax, [edx + CPUMCPU.u32RetCode]
573 retf
574ENDPROC VMMGCGuestToHostAsm
575
576;;
577; VMMGCGuestToHostAsmHyperCtx
578;
579; This is an alternative entry point which we'll be using
580; when the we have the hypervisor context and need to save
581; that before going to the host.
582;
583; This is typically useful when abandoning the hypervisor
584; because of a trap and want the trap state to be saved.
585;
586; @param eax Return code.
587; @param ecx Points to CPUMCTXCORE.
588; @uses eax,edx,ecx
589ALIGNCODE(16)
590BEGINPROC VMMGCGuestToHostAsmHyperCtx
591 int3
592
593;;
594; VMMGCGuestToHostAsmGuestCtx
595;
596; Switches from Guest Context to Host Context.
597; Of course it's only called from within the GC.
598;
599; @param eax Return code.
600; @param esp + 4 Pointer to CPUMCTXCORE.
601;
602; @remark ASSUMES interrupts disabled.
603;
604ALIGNCODE(16)
605BEGINPROC VMMGCGuestToHostAsmGuestCtx
606 int3
607
608GLOBALNAME End
609;
610; The description string (in the text section).
611;
612NAME(Description):
613 db SWITCHER_DESCRIPTION
614 db 0
615
616extern NAME(Relocate)
617
618;
619; End the fixup records.
620;
621BEGINDATA
622 db FIX_THE_END ; final entry.
623GLOBALNAME FixupsEnd
624
625;;
626; The switcher definition structure.
627ALIGNDATA(16)
628GLOBALNAME Def
629 istruc VMMSWITCHERDEF
630 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
631 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
632 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
633 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
634 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
635 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
636 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
637 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
638 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
639 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
640 at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
641 at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
642 ; disasm help
643 at VMMSWITCHERDEF.offHCCode0, dd 0
644 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
645 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
646 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
647 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
648 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
649 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
650 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
651 at VMMSWITCHERDEF.offGCCode, dd 0
652 at VMMSWITCHERDEF.cbGCCode, dd 0
653
654 iend
655
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette