VirtualBox

source: vbox/trunk/include/VBox/vmm/iem-x86-amd64.h@ 98980

Last change on this file since 98980 was 98980, checked in by vboxsync, 19 months ago

VMM: More ARMv8 x86/amd64 separation work, get past IEM, bugref:10385

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.7 KB
Line 
1/** @file
2 * IEM - Interpreted Execution Manager.
3 */
4
5/*
6 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_iem_x86_amd64_h
37#define VBOX_INCLUDED_vmm_iem_x86_amd64_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
43# include <VBox/vmm/hm_vmx.h>
44#endif
45
46
47RT_C_DECLS_BEGIN
48
49/** @addtogroup grp_iem
50 * @{ */
51
52
53/** @name IEMTARGETCPU_XXX - IEM target CPU specification.
54 *
55 * This is a gross simpliciation of CPUMMICROARCH for dealing with really old
56 * CPUs which didn't have much in the way of hinting at supported instructions
57 * and features. This slowly changes with the introduction of CPUID with the
58 * Intel Pentium.
59 *
60 * @{
61 */
62/** The dynamic target CPU mode is for getting thru the BIOS and then use
63 * the debugger or modifying instruction behaviour (e.g. HLT) to switch to a
64 * different target CPU. */
65#define IEMTARGETCPU_DYNAMIC UINT32_C(0)
66/** Intel 8086/8088. */
67#define IEMTARGETCPU_8086 UINT32_C(1)
68/** NEC V20/V30.
69 * @remarks must be between 8086 and 80186. */
70#define IEMTARGETCPU_V20 UINT32_C(2)
71/** Intel 80186/80188. */
72#define IEMTARGETCPU_186 UINT32_C(3)
73/** Intel 80286. */
74#define IEMTARGETCPU_286 UINT32_C(4)
75/** Intel 80386. */
76#define IEMTARGETCPU_386 UINT32_C(5)
77/** Intel 80486. */
78#define IEMTARGETCPU_486 UINT32_C(6)
79/** Intel Pentium . */
80#define IEMTARGETCPU_PENTIUM UINT32_C(7)
81/** Intel PentiumPro. */
82#define IEMTARGETCPU_PPRO UINT32_C(8)
83/** A reasonably current CPU, probably newer than the pentium pro when it comes
84 * to the feature set and behaviour. Generally the CPUID info and CPU vendor
85 * dicates the behaviour here. */
86#define IEMTARGETCPU_CURRENT UINT32_C(9)
87/** @} */
88
89
90/** The CPUMCTX_EXTRN_XXX mask required to be cleared when interpreting anything.
91 * IEM will ASSUME the caller of IEM APIs has ensured these are already present. */
92#define IEM_CPUMCTX_EXTRN_MUST_MASK ( CPUMCTX_EXTRN_GPRS_MASK \
93 | CPUMCTX_EXTRN_RIP \
94 | CPUMCTX_EXTRN_RFLAGS \
95 | CPUMCTX_EXTRN_SS \
96 | CPUMCTX_EXTRN_CS \
97 | CPUMCTX_EXTRN_CR0 \
98 | CPUMCTX_EXTRN_CR3 \
99 | CPUMCTX_EXTRN_CR4 \
100 | CPUMCTX_EXTRN_APIC_TPR \
101 | CPUMCTX_EXTRN_EFER \
102 | CPUMCTX_EXTRN_DR7 )
103/** The CPUMCTX_EXTRN_XXX mask needed when injecting an exception/interrupt.
104 * IEM will import missing bits, callers are encouraged to make these registers
105 * available prior to injection calls if fetching state anyway. */
106#define IEM_CPUMCTX_EXTRN_XCPT_MASK ( IEM_CPUMCTX_EXTRN_MUST_MASK \
107 | CPUMCTX_EXTRN_CR2 \
108 | CPUMCTX_EXTRN_SREG_MASK \
109 | CPUMCTX_EXTRN_TABLE_MASK )
110/** The CPUMCTX_EXTRN_XXX mask required to be cleared when calling any
111 * IEMExecDecoded API not using memory. IEM will ASSUME the caller of IEM
112 * APIs has ensured these are already present.
113 * @note ASSUMES execution engine has checked for instruction breakpoints
114 * during decoding. */
115#define IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK ( CPUMCTX_EXTRN_RIP \
116 | CPUMCTX_EXTRN_RFLAGS \
117 | CPUMCTX_EXTRN_SS /* for CPL */ \
118 | CPUMCTX_EXTRN_CS /* for mode */ \
119 | CPUMCTX_EXTRN_CR0 /* for mode */ \
120 | CPUMCTX_EXTRN_EFER /* for mode */ )
121/** The CPUMCTX_EXTRN_XXX mask required to be cleared when calling any
122 * IEMExecDecoded API using memory. IEM will ASSUME the caller of IEM
123 * APIs has ensured these are already present.
124 * @note ASSUMES execution engine has checked for instruction breakpoints
125 * during decoding. */
126#define IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK ( IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK \
127 | CPUMCTX_EXTRN_CR3 /* for page tables */ \
128 | CPUMCTX_EXTRN_CR4 /* for mode paging mode */ \
129 | CPUMCTX_EXTRN_DR7 /* for memory breakpoints */ )
130
131#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
132/** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecDecodedVmlaunchVmresume().
133 * IEM will ASSUME the caller has ensured these are already present. */
134# define IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK ( IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK \
135 | CPUMCTX_EXTRN_CR2 \
136 | CPUMCTX_EXTRN_HWVIRT )
137
138/** The CPUMCTX_EXTRN_XXX mask that the IEM VM-exit code will import on-demand when
139 * needed, primarily because there are several IEM VM-exit interface functions and
140 * some of which may not cause a VM-exit at all.
141 *
142 * This is currently unused, but keeping it here in case we can get away a bit more
143 * fine-grained state handling.
144 *
145 * @note Update HM_CHANGED_VMX_VMEXIT_MASK if something here changes. */
146# define IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK ( CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 \
147 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6 \
148 | CPUMCTX_EXTRN_EFER \
149 | CPUMCTX_EXTRN_SYSENTER_MSRS \
150 | CPUMCTX_EXTRN_OTHER_MSRS /* for PAT MSR */ \
151 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS \
152 | CPUMCTX_EXTRN_SREG_MASK \
153 | CPUMCTX_EXTRN_TR \
154 | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_IDTR \
155 | CPUMCTX_EXTRN_HWVIRT )
156#endif
157
158#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
159/** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecSvmVmexit().
160 * IEM will ASSUME the caller has ensured these are already present. */
161# define IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK ( CPUMCTX_EXTRN_RSP \
162 | CPUMCTX_EXTRN_RAX \
163 | CPUMCTX_EXTRN_RIP \
164 | CPUMCTX_EXTRN_RFLAGS \
165 | CPUMCTX_EXTRN_CS \
166 | CPUMCTX_EXTRN_SS \
167 | CPUMCTX_EXTRN_DS \
168 | CPUMCTX_EXTRN_ES \
169 | CPUMCTX_EXTRN_GDTR \
170 | CPUMCTX_EXTRN_IDTR \
171 | CPUMCTX_EXTRN_CR_MASK \
172 | CPUMCTX_EXTRN_EFER \
173 | CPUMCTX_EXTRN_DR6 \
174 | CPUMCTX_EXTRN_DR7 \
175 | CPUMCTX_EXTRN_OTHER_MSRS \
176 | CPUMCTX_EXTRN_HWVIRT \
177 | CPUMCTX_EXTRN_APIC_TPR \
178 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
179
180/** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecDecodedVmrun().
181 * IEM will ASSUME the caller has ensured these are already present. */
182# define IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK
183#endif
184
185/** @name Given Instruction Interpreters
186 * @{ */
187VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
188 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked);
189VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
190 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked);
191VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg);
192VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg);
193VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg);
194VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
195VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg);
196VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg);
197VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr);
198VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst);
199VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr);
200VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr);
201VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr);
202VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage);
203VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
204 uint64_t uType);
205VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr);
206VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr);
207VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr);
208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr);
209VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr);
210VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr);
211VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr);
212VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr);
213VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr);
214
215#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
216VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr);
217VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr);
218VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr);
219VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr);
220VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr);
221VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr);
222VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
223#endif
224
225#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
226VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst);
227VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val);
228VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Val, bool fWrite);
229VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu);
230VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu);
231VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending);
232VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
233VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu);
234VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu);
235VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector);
236VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
237VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr);
238VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
239VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
240VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
241VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t uExitQual);
242VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
243VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
244VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
245VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
246VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
247VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId);
248VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
249VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr);
250VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
251# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
252VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
253VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
254VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo);
255# endif
256#endif
257/** @} */
258
259/** @defgroup grp_iem_r0 The IEM Host Context Ring-0 API.
260 * @{
261 */
262VMMR0_INT_DECL(int) IEMR0InitVM(PGVM pGVM);
263/** @} */
264
265/** @} */
266
267RT_C_DECLS_END
268
269#endif /* !VBOX_INCLUDED_vmm_iem_x86_amd64_h */
270
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette