VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm@ 4953

Last change on this file since 4953 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.9 KB
Line 
1; $Id: HWACCMR0A.asm 4071 2007-08-07 17:07:59Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 innotek GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16
17;*******************************************************************************
18;* Header Files *
19;*******************************************************************************
20%include "VBox/asmdefs.mac"
21%include "VBox/err.mac"
22%include "VBox/hwacc_vmx.mac"
23%include "VBox/cpum.mac"
24%include "VBox/x86.mac"
25
26%ifdef RT_OS_OS2 ;; @todo build cvs nasm like on OS X.
27 %macro vmwrite 2,
28 int3
29 %endmacro
30 %define vmlaunch int3
31 %define vmresume int3
32%endif
33
34
35;; @def MYPUSHAD
36; Macro generating an equivalent to pushad
37
38;; @def MYPOPAD
39; Macro generating an equivalent to popad
40
41;; @def MYPUSHSEGS
42; Macro saving all segment registers on the stack.
43; @param 1 full width register name
44; @param 2 16-bit regsiter name for \a 1.
45
46;; @def MYPOPSEGS
47; Macro restoring all segment registers on the stack
48; @param 1 full width register name
49; @param 2 16-bit regsiter name for \a 1.
50
51%ifdef RT_ARCH_AMD64
52 %ifdef ASM_CALL64_GCC
53 %macro MYPUSHAD 0
54 push r15
55 push r14
56 push r13
57 push r12
58 push rbx
59 %endmacro
60 %macro MYPOPAD 0
61 pop rbx
62 pop r12
63 pop r13
64 pop r14
65 pop r15
66 %endmacro
67
68 %else ; ASM_CALL64_MSC
69 %macro MYPUSHAD 0
70 push r15
71 push r14
72 push r13
73 push r12
74 push rbx
75 push rsi
76 push rdi
77 %endmacro
78 %macro MYPOPAD 0
79 pop rdi
80 pop rsi
81 pop rbx
82 pop r12
83 pop r13
84 pop r14
85 pop r15
86 %endmacro
87 %endif
88
89 %macro MYPUSHSEGS 2
90 mov %2, es
91 push %1
92 mov %2, ds
93 push %1
94 push fs
95 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
96 push rcx
97 mov ecx, MSR_K8_GS_BASE
98 rdmsr
99 pop rcx
100 push rdx
101 push rax
102 push gs
103 %endmacro
104
105 %macro MYPOPSEGS 2
106 ; Note: do not step through this code with a debugger!
107 pop gs
108 pop rax
109 pop rdx
110 push rcx
111 mov ecx, MSR_K8_GS_BASE
112 wrmsr
113 pop rcx
114 ; Now it's safe to step again
115
116 pop fs
117 pop %1
118 mov ds, %2
119 pop %1
120 mov es, %2
121 %endmacro
122
123%else ; RT_ARCH_X86
124 %macro MYPUSHAD 0
125 pushad
126 %endmacro
127 %macro MYPOPAD 0
128 popad
129 %endmacro
130
131 %macro MYPUSHSEGS 2
132 push ds
133 push es
134 push fs
135 push gs
136 %endmacro
137 %macro MYPOPSEGS 2
138 pop gs
139 pop fs
140 pop es
141 pop ds
142 %endmacro
143%endif
144
145
146BEGINCODE
147
148;/**
149; * Prepares for and executes VMLAUNCH
150; *
151; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
152; *
153; * @returns VBox status code
154; * @param pCtx Guest context
155; */
156BEGINPROC VMXStartVM
157 push xBP
158 mov xBP, xSP
159
160 ;/* First we have to save some final CPU context registers. */
161%ifdef RT_ARCH_AMD64
162 mov rax, qword .vmlaunch_done
163 push rax
164%else
165 push .vmlaunch_done
166%endif
167 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
168 vmwrite xAX, [xSP]
169 ;/* @todo assumes success... */
170 add xSP, xS
171
172 ;/* Manual save and restore:
173 ; * - General purpose registers except RIP, RSP
174 ; *
175 ; * Trashed:
176 ; * - CR2 (we don't care)
177 ; * - LDTR (reset to 0)
178 ; * - DRx (presumably not changed at all)
179 ; * - DR7 (reset to 0x400)
180 ; * - EFLAGS (reset to BIT(1); not relevant)
181 ; *
182 ; */
183
184 ;/* Save all general purpose host registers. */
185 MYPUSHAD
186
187 ;/* Save segment registers */
188 MYPUSHSEGS xAX, ax
189
190 ;/* Save the Guest CPU context pointer. */
191%ifdef RT_ARCH_AMD64
192 %ifdef ASM_CALL64_GCC
193 mov rsi, rdi ; pCtx
194 %else
195 mov rsi, rcx ; pCtx
196 %endif
197%else
198 mov esi, [ebp + 8] ; pCtx
199%endif
200 push xSI
201
202 ; Save LDTR
203 xor eax, eax
204 sldt ax
205 push xAX
206
207 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
208 sub xSP, xS*2
209 sgdt [xSP]
210
211 sub xSP, xS*2
212 sidt [xSP]
213
214 ; Restore CR2
215 mov ebx, [xSI + CPUMCTX.cr2]
216 mov cr2, xBX
217
218 mov eax, VMX_VMCS_HOST_RSP
219 vmwrite xAX, xSP
220 ;/* @todo assumes success... */
221 ;/* Don't mess with ESP anymore!! */
222
223 ;/* Restore Guest's general purpose registers. */
224 mov eax, [xSI + CPUMCTX.eax]
225 mov ebx, [xSI + CPUMCTX.ebx]
226 mov ecx, [xSI + CPUMCTX.ecx]
227 mov edx, [xSI + CPUMCTX.edx]
228 mov edi, [xSI + CPUMCTX.edi]
229 mov ebp, [xSI + CPUMCTX.ebp]
230 mov esi, [xSI + CPUMCTX.esi]
231
232 vmlaunch
233 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
234
235ALIGNCODE(16)
236.vmlaunch_done:
237 jc near .vmxstart_invalid_vmxon_ptr
238 jz near .vmxstart_start_failed
239
240 ; Restore base and limit of the IDTR & GDTR
241 lidt [xSP]
242 add xSP, xS*2
243 lgdt [xSP]
244 add xSP, xS*2
245
246 push xDI
247 mov xDI, [xSP + xS * 2] ; pCtx
248
249 mov [ss:xDI + CPUMCTX.eax], eax
250 mov [ss:xDI + CPUMCTX.ebx], ebx
251 mov [ss:xDI + CPUMCTX.ecx], ecx
252 mov [ss:xDI + CPUMCTX.edx], edx
253 mov [ss:xDI + CPUMCTX.esi], esi
254 mov [ss:xDI + CPUMCTX.ebp], ebp
255%ifdef RT_ARCH_AMD64
256 pop xAX ; the guest edi we pushed above
257 mov dword [ss:xDI + CPUMCTX.edi], eax
258%else
259 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
260%endif
261
262 pop xAX ; saved LDTR
263 lldt ax
264
265 add xSP, xS ; pCtx
266
267 ; Restore segment registers
268 MYPOPSEGS xAX, ax
269
270 ; Restore general purpose registers
271 MYPOPAD
272
273 mov eax, VINF_SUCCESS
274
275.vmstart_end:
276 pop xBP
277 ret
278
279
280.vmxstart_invalid_vmxon_ptr:
281 ; Restore base and limit of the IDTR & GDTR
282 lidt [xSP]
283 add xSP, xS*2
284 lgdt [xSP]
285 add xSP, xS*2
286
287 pop xAX ; saved LDTR
288 lldt ax
289
290 add xSP, xS ; pCtx
291
292 ; Restore segment registers
293 MYPOPSEGS xAX, ax
294
295 ; Restore all general purpose host registers.
296 MYPOPAD
297 mov eax, VERR_VMX_INVALID_VMXON_PTR
298 jmp .vmstart_end
299
300.vmxstart_start_failed:
301 ; Restore base and limit of the IDTR & GDTR
302 lidt [xSP]
303 add xSP, xS*2
304 lgdt [xSP]
305 add xSP, xS*2
306
307 pop xAX ; saved LDTR
308 lldt ax
309
310 add xSP, xS ; pCtx
311
312 ; Restore segment registers
313 MYPOPSEGS xAX, ax
314
315 ; Restore all general purpose host registers.
316 MYPOPAD
317 mov eax, VERR_VMX_UNABLE_TO_START_VM
318 jmp .vmstart_end
319
320ENDPROC VMXStartVM
321
322
323;/**
324; * Prepares for and executes VMRESUME
325; *
326; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
327; *
328; * @returns VBox status code
329; * @param pCtx Guest context
330; */
331BEGINPROC VMXResumeVM
332 push xBP
333 mov xBP, xSP
334
335 ;/* First we have to save some final CPU context registers. */
336%ifdef RT_ARCH_AMD64
337 mov rax, qword .vmresume_done
338 push rax
339%else
340 push .vmresume_done
341%endif
342 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
343 vmwrite xAX, [xSP]
344 ;/* @todo assumes success... */
345 add xSP, xS
346
347 ;/* Manual save and restore:
348 ; * - General purpose registers except RIP, RSP
349 ; *
350 ; * Trashed:
351 ; * - CR2 (we don't care)
352 ; * - LDTR (reset to 0)
353 ; * - DRx (presumably not changed at all)
354 ; * - DR7 (reset to 0x400)
355 ; * - EFLAGS (reset to BIT(1); not relevant)
356 ; *
357 ; */
358
359 ;/* Save all general purpose host registers. */
360 MYPUSHAD
361
362 ;/* Save segment registers */
363 MYPUSHSEGS xAX, ax
364
365 ;/* Save the Guest CPU context pointer. */
366%ifdef RT_ARCH_AMD64
367 %ifdef ASM_CALL64_GCC
368 mov rsi, rdi ; pCtx
369 %else
370 mov rsi, rcx ; pCtx
371 %endif
372%else
373 mov esi, [ebp + 8] ; pCtx
374%endif
375 push xSI
376
377 ; Save LDTR
378 xor eax, eax
379 sldt ax
380 push xAX
381
382 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
383 sub xSP, xS*2
384 sgdt [xSP]
385
386 sub xSP, xS*2
387 sidt [xSP]
388
389 ; Restore CR2
390 mov xBX, [xSI + CPUMCTX.cr2]
391 mov cr2, xBX
392
393 mov eax, VMX_VMCS_HOST_RSP
394 vmwrite xAX, xSP
395 ;/* @todo assumes success... */
396 ;/* Don't mess with ESP anymore!! */
397
398 ;/* Restore Guest's general purpose registers. */
399 mov eax, [xSI + CPUMCTX.eax]
400 mov ebx, [xSI + CPUMCTX.ebx]
401 mov ecx, [xSI + CPUMCTX.ecx]
402 mov edx, [xSI + CPUMCTX.edx]
403 mov edi, [xSI + CPUMCTX.edi]
404 mov ebp, [xSI + CPUMCTX.ebp]
405 mov esi, [xSI + CPUMCTX.esi]
406
407 vmresume
408 jmp .vmresume_done; ;/* here if vmresume detected a failure. */
409
410ALIGNCODE(16)
411.vmresume_done:
412 jc near .vmxresume_invalid_vmxon_ptr
413 jz near .vmxresume_start_failed
414
415 ; Restore base and limit of the IDTR & GDTR
416 lidt [xSP]
417 add xSP, xS*2
418 lgdt [xSP]
419 add xSP, xS*2
420
421 push xDI
422 mov xDI, [xSP + xS * 2] ; pCtx
423
424 mov [ss:xDI + CPUMCTX.eax], eax
425 mov [ss:xDI + CPUMCTX.ebx], ebx
426 mov [ss:xDI + CPUMCTX.ecx], ecx
427 mov [ss:xDI + CPUMCTX.edx], edx
428 mov [ss:xDI + CPUMCTX.esi], esi
429 mov [ss:xDI + CPUMCTX.ebp], ebp
430%ifdef RT_ARCH_AMD64
431 pop xAX ; the guest edi we pushed above
432 mov dword [ss:xDI + CPUMCTX.edi], eax
433%else
434 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
435%endif
436
437 pop xAX ; saved LDTR
438 lldt ax
439
440 add xSP, xS ; pCtx
441
442 ; Restore segment registers
443 MYPOPSEGS xAX, ax
444
445 ; Restore general purpose registers
446 MYPOPAD
447
448 mov eax, VINF_SUCCESS
449
450.vmresume_end:
451 pop xBP
452 ret
453
454.vmxresume_invalid_vmxon_ptr:
455 ; Restore base and limit of the IDTR & GDTR
456 lidt [xSP]
457 add xSP, xS*2
458 lgdt [xSP]
459 add xSP, xS*2
460
461 pop xAX ; saved LDTR
462 lldt ax
463
464 add xSP, xS ; pCtx
465
466 ; Restore segment registers
467 MYPOPSEGS xAX, ax
468
469 ; Restore all general purpose host registers.
470 MYPOPAD
471 mov eax, VERR_VMX_INVALID_VMXON_PTR
472 jmp .vmresume_end
473
474.vmxresume_start_failed:
475 ; Restore base and limit of the IDTR & GDTR
476 lidt [xSP]
477 add xSP, xS*2
478 lgdt [xSP]
479 add xSP, xS*2
480
481 pop xAX ; saved LDTR
482 lldt ax
483
484 add xSP, xS ; pCtx
485
486 ; Restore segment registers
487 MYPOPSEGS xAX, ax
488
489 ; Restore all general purpose host registers.
490 MYPOPAD
491 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM
492 jmp .vmresume_end
493
494ENDPROC VMXResumeVM
495
496
497%ifdef RT_ARCH_AMD64
498;/**
499; * Executes VMWRITE
500; *
501; * @returns VBox status code
502; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index
503; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
504; */
505BEGINPROC VMXWriteVMCS64
506%ifdef ASM_CALL64_GCC
507 mov eax, 0ffffffffh
508 and rdi, rax
509 xor rax, rax
510 vmwrite rdi, rsi
511%else
512 mov eax, 0ffffffffh
513 and rcx, rax
514 xor rax, rax
515 vmwrite rcx, rdx
516%endif
517 jnc .valid_vmcs
518 mov eax, VERR_VMX_INVALID_VMCS_PTR
519 ret
520.valid_vmcs:
521 jnz .the_end
522 mov eax, VERR_VMX_INVALID_VMCS_FIELD
523.the_end:
524 ret
525ENDPROC VMXWriteVMCS64
526
527;/**
528; * Executes VMREAD
529; *
530; * @returns VBox status code
531; * @param idxField VMCS index
532; * @param pData Ptr to store VM field value
533; */
534;DECLASM(int) VMXReadVMCS64(uint32_t idxField, uint64_t *pData);
535BEGINPROC VMXReadVMCS64
536%ifdef ASM_CALL64_GCC
537 mov eax, 0ffffffffh
538 and rdi, rax
539 xor rax, rax
540 vmread [rsi], rdi
541%else
542 mov eax, 0ffffffffh
543 and rcx, rax
544 xor rax, rax
545 vmread [rdx], rcx
546%endif
547 jnc .valid_vmcs
548 mov eax, VERR_VMX_INVALID_VMCS_PTR
549 ret
550.valid_vmcs:
551 jnz .the_end
552 mov eax, VERR_VMX_INVALID_VMCS_FIELD
553.the_end:
554 ret
555ENDPROC VMXReadVMCS64
556
557
558;/**
559; * Executes VMXON
560; *
561; * @returns VBox status code
562; * @param HCPhysVMXOn Physical address of VMXON structure
563; */
564;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
565BEGINPROC VMXEnable
566%ifdef RT_ARCH_AMD64
567 xor rax, rax
568 %ifdef ASM_CALL64_GCC
569 push rdi
570 %else
571 push rcx
572 %endif
573 vmxon [rsp]
574%else
575 xor eax, eax
576 vmxon [esp + 4]
577%endif
578 jnc .good
579 mov eax, VERR_VMX_INVALID_VMXON_PTR
580 jmp .the_end
581
582.good:
583 jnz .the_end
584 mov eax, VERR_VMX_GENERIC
585
586.the_end:
587%ifdef RT_ARCH_AMD64
588 add rsp, 8
589%endif
590 ret
591ENDPROC VMXEnable
592
593
594;/**
595; * Executes VMXOFF
596; */
597;DECLASM(void) VMXDisable(void);
598BEGINPROC VMXDisable
599 vmxoff
600 ret
601ENDPROC VMXDisable
602
603
604;/**
605; * Executes VMCLEAR
606; *
607; * @returns VBox status code
608; * @param HCPhysVMCS Physical address of VM control structure
609; */
610;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS);
611BEGINPROC VMXClearVMCS
612%ifdef RT_ARCH_AMD64
613 xor rax, rax
614 %ifdef ASM_CALL64_GCC
615 push rdi
616 %else
617 push rcx
618 %endif
619 vmclear [rsp]
620%else
621 xor eax, eax
622 vmclear [esp + 4]
623%endif
624 jnc .the_end
625 mov eax, VERR_VMX_INVALID_VMCS_PTR
626.the_end:
627%ifdef RT_ARCH_AMD64
628 add rsp, 8
629%endif
630 ret
631ENDPROC VMXClearVMCS
632
633
634;/**
635; * Executes VMPTRLD
636; *
637; * @returns VBox status code
638; * @param HCPhysVMCS Physical address of VMCS structure
639; */
640;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS);
641BEGINPROC VMXActivateVMCS
642%ifdef RT_ARCH_AMD64
643 xor rax, rax
644 %ifdef ASM_CALL64_GCC
645 push rdi
646 %else
647 push rcx
648 %endif
649 vmptrld [rsp]
650%else
651 xor eax, eax
652 vmptrld [esp + 4]
653%endif
654 jnc .the_end
655 mov eax, VERR_VMX_INVALID_VMCS_PTR
656.the_end:
657%ifdef RT_ARCH_AMD64
658 add rsp, 8
659%endif
660 ret
661ENDPROC VMXActivateVMCS
662
663%endif ; RT_ARCH_AMD64
664
665
666;/**
667; * Prepares for and executes VMRUN
668; *
669; * @returns VBox status code
670; * @param HCPhysVMCB Physical address of host VMCB
671; * @param HCPhysVMCB Physical address of guest VMCB
672; * @param pCtx Guest context
673; */
674BEGINPROC SVMVMRun
675%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume.
676 %ifdef ASM_CALL64_GCC
677 push rdx
678 push rsi
679 push rdi
680 %else
681 push r8
682 push rdx
683 push rcx
684 %endif
685 push 0
686%endif
687 push xBP
688 mov xBP, xSP
689
690 ;/* Manual save and restore:
691 ; * - General purpose registers except RIP, RSP, RAX
692 ; *
693 ; * Trashed:
694 ; * - CR2 (we don't care)
695 ; * - LDTR (reset to 0)
696 ; * - DRx (presumably not changed at all)
697 ; * - DR7 (reset to 0x400)
698 ; */
699
700 ;/* Save all general purpose host registers. */
701 MYPUSHAD
702
703 ;/* Save the Guest CPU context pointer. */
704 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
705 push xSI ; push for saving the state at the end
706
707 ; Restore CR2
708 mov ebx, [xSI + CPUMCTX.cr2]
709 mov cr2, xBX
710
711 ; save host fs, gs, sysenter msr etc
712 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
713 push xAX ; save for the vmload after vmrun
714 DB 0x0F, 0x01, 0xDB ; VMSAVE
715
716 ; setup eax for VMLOAD
717 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
718
719 ;/* Restore Guest's general purpose registers. */
720 ;/* EAX is loaded from the VMCB by VMRUN */
721 mov ebx, [xSI + CPUMCTX.ebx]
722 mov ecx, [xSI + CPUMCTX.ecx]
723 mov edx, [xSI + CPUMCTX.edx]
724 mov edi, [xSI + CPUMCTX.edi]
725 mov ebp, [xSI + CPUMCTX.ebp]
726 mov esi, [xSI + CPUMCTX.esi]
727
728 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
729 DB 0x0f, 0x01, 0xDD ; CLGI
730 sti
731
732 ; load guest fs, gs, sysenter msr etc
733 DB 0x0f, 0x01, 0xDA ; VMLOAD
734 ; run the VM
735 DB 0x0F, 0x01, 0xD8 ; VMRUN
736
737 ;/* EAX is in the VMCB already; we can use it here. */
738
739 ; save guest fs, gs, sysenter msr etc
740 DB 0x0F, 0x01, 0xDB ; VMSAVE
741
742 ; load host fs, gs, sysenter msr etc
743 pop xAX ; pushed above
744 DB 0x0F, 0x01, 0xDA ; VMLOAD
745
746 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
747 cli
748 DB 0x0f, 0x01, 0xDC ; STGI
749
750 pop xAX ; pCtx
751
752 mov [ss:xAX + CPUMCTX.ebx], ebx
753 mov [ss:xAX + CPUMCTX.ecx], ecx
754 mov [ss:xAX + CPUMCTX.edx], edx
755 mov [ss:xAX + CPUMCTX.esi], esi
756 mov [ss:xAX + CPUMCTX.edi], edi
757 mov [ss:xAX + CPUMCTX.ebp], ebp
758
759 ; Restore general purpose registers
760 MYPOPAD
761
762 mov eax, VINF_SUCCESS
763
764 pop xBP
765%ifdef RT_ARCH_AMD64
766 add xSP, 4*xS
767%endif
768 ret
769ENDPROC SVMVMRun
770
771%ifdef RT_ARCH_AMD64
772%ifdef RT_OS_WINDOWS
773
774;;
775; Executes INVLPGA
776;
777; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
778; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
779;
780;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID);
781BEGINPROC SVMInvlpgA
782%ifdef RT_ARCH_AMD64
783 %ifdef ASM_CALL64_GCC
784 mov eax, edi ;; @todo 64-bit guest.
785 mov ecx, esi
786 %else
787 mov eax, ecx ;; @todo 64-bit guest.
788 mov ecx, edx
789 %endif
790 invlpga rax, ecx
791%else
792 mov eax, [esp + 4]
793 mov ecx, [esp + 8]
794 invlpga eax, ecx
795%endif
796 ret
797ENDPROC SVMInvlpgA
798%endif
799%endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette