VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp@ 106432

Last change on this file since 106432 was 106061, checked in by vboxsync, 2 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 412.9 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM_VMX
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/apic.h>
36#include <VBox/vmm/cpum.h>
37#include <VBox/vmm/dbgf.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/gim.h>
40#include <VBox/vmm/hm.h>
41#include <VBox/vmm/pgm.h>
42#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
43# include <VBox/vmm/hmvmxinline.h>
44#endif
45#include <VBox/vmm/tm.h>
46#include "IEMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include <VBox/log.h>
49#include <VBox/err.h>
50#include <VBox/param.h>
51#include <VBox/disopcode-x86-amd64.h>
52#include <iprt/asm-math.h>
53#include <iprt/assert.h>
54#include <iprt/string.h>
55#include <iprt/x86.h>
56
57#include "IEMInline.h"
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
64/**
65 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
66 * relative offsets.
67 */
68# ifdef IEM_WITH_CODE_TLB /** @todo IEM TLB */
69# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { a_bModRm = 0; RT_NOREF(a_offModRm); } while (0)
70# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { a_bSib = 0; RT_NOREF(a_offSib); } while (0)
71# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { a_u16Disp = 0; RT_NOREF(a_offDisp); } while (0)
72# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { a_u16Disp = 0; RT_NOREF(a_offDisp); } while (0)
73# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { a_u32Disp = 0; RT_NOREF(a_offDisp); } while (0)
74# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { a_u32Disp = 0; RT_NOREF(a_offDisp); } while (0)
75# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { a_u64Disp = 0; RT_NOREF(a_offDisp); } while (0)
76# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { a_u64Disp = 0; RT_NOREF(a_offDisp); } while (0)
77# if 0
78# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
79# endif
80# else /* !IEM_WITH_CODE_TLB */
81# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
82 do \
83 { \
84 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
85 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
86 } while (0)
87
88# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
89
90# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
91 do \
92 { \
93 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
94 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
95 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
96 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
97 } while (0)
98
99# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
100 do \
101 { \
102 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
103 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
104 } while (0)
105
106# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
107 do \
108 { \
109 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
110 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
111 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
112 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
113 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
114 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
115 } while (0)
116
117# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
118 do \
119 { \
120 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
121 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
122 } while (0)
123
124# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
125 do \
126 { \
127 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
128 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
129 } while (0)
130
131# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
132 do \
133 { \
134 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
135 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
136 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
137 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
138 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
139 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
140 } while (0)
141# endif /* !IEM_WITH_CODE_TLB */
142
143/** Check for VMX instructions requiring to be in VMX operation.
144 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
145# define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
146 do \
147 { \
148 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
149 { /* likely */ } \
150 else \
151 { \
152 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
153 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
154 return iemRaiseUndefinedOpcode(a_pVCpu); \
155 } \
156 } while (0)
157
158/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
159# define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
160 do \
161 { \
162 LogRel(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
163 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
164 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
165 return VERR_VMX_VMENTRY_FAILED; \
166 } while (0)
167
168/** Marks a VM-entry failure with an return code, diagnostic reason, logs and
169 * returns. */
170# define IEM_VMX_VMENTRY_FAILED_RET_2(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag, a_rc) \
171 do \
172 { \
173 LogRel(("%s: VM-entry failed! rc=%Rrc enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_rc), (a_VmxDiag), \
174 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
175 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
176 return VERR_VMX_VMENTRY_FAILED; \
177 } while (0)
178
179/** Marks a VM-exit failure with a diagnostic reason and logs. */
180# define IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
181 do \
182 { \
183 LogRel(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
184 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
185 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
186 } while (0)
187
188/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
189# define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
190 do \
191 { \
192 IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag); \
193 return VERR_VMX_VMEXIT_FAILED; \
194 } while (0)
195
196
197/*********************************************************************************************************************************
198* Global Variables *
199*********************************************************************************************************************************/
200/** @todo NSTVMX: The following VM-exit intercepts are pending:
201 * VMX_EXIT_IO_SMI
202 * VMX_EXIT_SMI
203 * VMX_EXIT_GETSEC
204 * VMX_EXIT_RSM
205 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
206 * VMX_EXIT_ERR_MACHINE_CHECK (we never need to raise this?)
207 * VMX_EXIT_VMFUNC
208 * VMX_EXIT_ENCLS
209 * VMX_EXIT_PML_FULL
210 * VMX_EXIT_XSAVES
211 * VMX_EXIT_XRSTORS
212 */
213/**
214 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
215 *
216 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
217 * second dimension is the Index, see VMXVMCSFIELD.
218 */
219uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
220{
221 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
222 {
223 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
224 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
225 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
226 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u16HlatPrefixSize),
227 /* 4-11 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
229 /* 20-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
230 /* 28-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
231 },
232 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
233 {
234 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
235 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
236 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
237 /* 24-31 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
238 /* 32-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
239 },
240 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
241 {
242 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
243 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
244 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
245 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
246 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
247 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
248 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
249 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
250 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
251 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
252 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
253 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
254 /* 26-33 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
255 /* 34 */ UINT16_MAX
256 },
257 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
258 {
259 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
260 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
261 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
262 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
263 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
264 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
265 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
266 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
267 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
268 /* 23-30 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
269 /* 31-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
270 },
271 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
272 {
273 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
274 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
275 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
276 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
277 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
278 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
279 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
280 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
281 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
282 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
283 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
284 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
285 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
286 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptPtr),
287 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
288 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
289 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
290 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
291 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
292 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
293 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
294 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
295 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssExitBitmap),
296 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64EnclsExitBitmap),
297 /* 24 */ RT_UOFFSETOF(VMXVVMCS, u64SppTablePtr),
298 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier),
299 /* 26 */ RT_UOFFSETOF(VMXVVMCS, u64ProcCtls3),
300 /* 27 */ RT_UOFFSETOF(VMXVVMCS, u64EnclvExitBitmap),
301 /* 28 */ UINT16_MAX,
302 /* 29 */ UINT16_MAX,
303 /* 30 */ UINT16_MAX,
304 /* 31 */ RT_UOFFSETOF(VMXVVMCS, u64PconfigExitBitmap),
305 /* 32 */ RT_UOFFSETOF(VMXVVMCS, u64HlatPtr),
306 /* 33 */ UINT16_MAX,
307 /* 34 */ RT_UOFFSETOF(VMXVVMCS, u64ExitCtls2)
308 },
309 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
310 {
311 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
312 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
313 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
314 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
315 /* 25-32 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
316 /* 33-34*/ UINT16_MAX, UINT16_MAX
317 },
318 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
319 {
320 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
321 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
322 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
323 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
324 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
325 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
326 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
327 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
328 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
329 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
330 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRtitCtlMsr),
331 /* 11 */ UINT16_MAX,
332 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPkrsMsr),
333 /* 13-20 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
334 /* 21-28 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
335 /* 29-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
336 },
337 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
338 {
339 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
340 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
341 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
342 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostPkrsMsr),
343 /* 4-11 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
344 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
345 /* 20-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
346 /* 28-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
347 },
348 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
349 {
350 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
351 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
352 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
353 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
354 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
355 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
356 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
357 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
358 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
359 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
360 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
361 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
362 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
363 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
364 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
365 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
366 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
367 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
368 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
369 /* 26-33 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
370 /* 34 */ UINT16_MAX
371 },
372 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
373 {
374 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
375 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
376 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
377 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
378 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
379 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
380 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
381 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
382 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
383 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
384 /* 24-31 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
385 /* 32-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
386 },
387 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
388 {
389 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
390 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
391 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
392 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
393 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
394 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
395 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
396 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
397 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
398 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
399 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
400 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
401 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
402 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
403 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
404 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
405 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
406 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
407 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
408 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
409 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
410 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
411 /* 22 */ UINT16_MAX,
412 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
413 /* 24-31 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
414 /* 32-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
415 },
416 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
417 {
418 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
419 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
420 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
421 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
422 /* 25-32 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
423 /* 33-34 */ UINT16_MAX, UINT16_MAX
424 },
425 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_CONTROL: */
426 {
427 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
428 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
429 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
430 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
431 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
432 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
433 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
434 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
435 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
436 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
437 /* 24-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
438 /* 32-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
439 },
440 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
441 {
442 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
443 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
444 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
445 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
446 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
447 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
448 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
449 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
450 /* 22-29 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
451 /* 30-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
452 },
453 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
454 {
455 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
456 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
457 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
458 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
459 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
460 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
461 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
462 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
463 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
464 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
465 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
466 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
467 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
468 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
469 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
470 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
471 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
472 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpts),
473 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
474 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
475 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSCetMsr),
476 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsp),
477 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIntrSspTableAddrMsr),
478 /* 23-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
479 /* 31-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
480 },
481 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_HOST_STATE: */
482 {
483 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
484 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
485 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
486 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
487 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
488 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
489 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
490 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
491 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
492 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
493 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
494 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
495 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64HostSCetMsr),
496 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64HostSsp),
497 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64HostIntrSspTableAddrMsr),
498 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
499 /* 23-30 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
500 /* 31-34 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
501 }
502};
503
504
505/**
506 * Gets a host selector from the VMCS.
507 *
508 * @param pVmcs Pointer to the virtual VMCS.
509 * @param iSelReg The index of the segment register (X86_SREG_XXX).
510 */
511DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
512{
513 Assert(iSegReg < X86_SREG_COUNT);
514 RTSEL HostSel;
515 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
516 uint8_t const uType = VMX_VMCSFIELD_TYPE_HOST_STATE;
517 uint8_t const uWidthType = (uWidth << 2) | uType;
518 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_HOST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
519 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
520 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
521 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
522 uint8_t const *pbField = pbVmcs + offField;
523 HostSel = *(uint16_t *)pbField;
524 return HostSel;
525}
526
527
528/**
529 * Sets a guest segment register in the VMCS.
530 *
531 * @param pVmcs Pointer to the virtual VMCS.
532 * @param iSegReg The index of the segment register (X86_SREG_XXX).
533 * @param pSelReg Pointer to the segment register.
534 */
535static void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg) RT_NOEXCEPT
536{
537 Assert(pSelReg);
538 Assert(iSegReg < X86_SREG_COUNT);
539
540 /* Selector. */
541 {
542 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
543 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
544 uint8_t const uWidthType = (uWidth << 2) | uType;
545 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
546 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
547 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
548 uint8_t *pbVmcs = (uint8_t *)pVmcs;
549 uint8_t *pbField = pbVmcs + offField;
550 *(uint16_t *)pbField = pSelReg->Sel;
551 }
552
553 /* Limit. */
554 {
555 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
556 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
557 uint8_t const uWidthType = (uWidth << 2) | uType;
558 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCSFIELD_INDEX);
559 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
560 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
561 uint8_t *pbVmcs = (uint8_t *)pVmcs;
562 uint8_t *pbField = pbVmcs + offField;
563 *(uint32_t *)pbField = pSelReg->u32Limit;
564 }
565
566 /* Base. */
567 {
568 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_NATURAL;
569 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
570 uint8_t const uWidthType = (uWidth << 2) | uType;
571 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCSFIELD_INDEX);
572 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
573 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
574 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
575 uint8_t const *pbField = pbVmcs + offField;
576 *(uint64_t *)pbField = pSelReg->u64Base;
577 }
578
579 /* Attributes. */
580 {
581 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
582 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
583 | X86DESCATTR_UNUSABLE;
584 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
585 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
586 uint8_t const uWidthType = (uWidth << 2) | uType;
587 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCSFIELD_INDEX);
588 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
589 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
590 uint8_t *pbVmcs = (uint8_t *)pVmcs;
591 uint8_t *pbField = pbVmcs + offField;
592 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
593 }
594}
595
596
597/**
598 * Gets a guest segment register from the VMCS.
599 *
600 * @returns VBox status code.
601 * @param pVmcs Pointer to the virtual VMCS.
602 * @param iSegReg The index of the segment register (X86_SREG_XXX).
603 * @param pSelReg Where to store the segment register (only updated when
604 * VINF_SUCCESS is returned).
605 *
606 * @remarks Warning! This does not validate the contents of the retrieved segment
607 * register.
608 */
609static int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg) RT_NOEXCEPT
610{
611 Assert(pSelReg);
612 Assert(iSegReg < X86_SREG_COUNT);
613
614 /* Selector. */
615 uint16_t u16Sel;
616 {
617 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
618 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
619 uint8_t const uWidthType = (uWidth << 2) | uType;
620 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
621 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
622 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
623 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
624 uint8_t const *pbField = pbVmcs + offField;
625 u16Sel = *(uint16_t *)pbField;
626 }
627
628 /* Limit. */
629 uint32_t u32Limit;
630 {
631 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
632 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
633 uint8_t const uWidthType = (uWidth << 2) | uType;
634 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCSFIELD_INDEX);
635 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
636 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
637 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
638 uint8_t const *pbField = pbVmcs + offField;
639 u32Limit = *(uint32_t *)pbField;
640 }
641
642 /* Base. */
643 uint64_t u64Base;
644 {
645 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_NATURAL;
646 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
647 uint8_t const uWidthType = (uWidth << 2) | uType;
648 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCSFIELD_INDEX);
649 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
650 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
651 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
652 uint8_t const *pbField = pbVmcs + offField;
653 u64Base = *(uint64_t *)pbField;
654 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
655 }
656
657 /* Attributes. */
658 uint32_t u32Attr;
659 {
660 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
661 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
662 uint8_t const uWidthType = (uWidth << 2) | uType;
663 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCSFIELD_INDEX);
664 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
665 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
666 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
667 uint8_t const *pbField = pbVmcs + offField;
668 u32Attr = *(uint32_t *)pbField;
669 }
670
671 pSelReg->Sel = u16Sel;
672 pSelReg->ValidSel = u16Sel;
673 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
674 pSelReg->u32Limit = u32Limit;
675 pSelReg->u64Base = u64Base;
676 pSelReg->Attr.u = u32Attr;
677 return VINF_SUCCESS;
678}
679
680
681/**
682 * Converts an IEM exception event type to a VMX event type.
683 *
684 * @returns The VMX event type.
685 * @param uVector The interrupt / exception vector.
686 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
687 */
688DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
689{
690 /* Paranoia (callers may use these interchangeably). */
691 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
692 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
693 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
694 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
695 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
696 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
697 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
698 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
699 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
700 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
701 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
702 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
703
704 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
705 {
706 if (uVector == X86_XCPT_NMI)
707 return VMX_EXIT_INT_INFO_TYPE_NMI;
708 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
709 }
710
711 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
712 {
713 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
714 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
715 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
716 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
717 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
718 }
719
720 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
721 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
722}
723
724
725/**
726 * Determines whether the guest is using PAE paging given the VMCS.
727 *
728 * @returns @c true if PAE paging mode is used, @c false otherwise.
729 * @param pVmcs Pointer to the virtual VMCS.
730 *
731 * @warning Only use this prior to switching the guest-CPU state with the
732 * nested-guest CPU state!
733 */
734DECL_FORCE_INLINE(bool) iemVmxVmcsIsGuestPaePagingEnabled(PCVMXVVMCS pVmcs)
735{
736 return ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
737 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
738 && (pVmcs->u64GuestCr0.u & X86_CR0_PG));
739}
740
741
742/**
743 * Sets the Exit qualification VMCS field.
744 *
745 * @param pVCpu The cross context virtual CPU structure.
746 * @param u64ExitQual The Exit qualification.
747 */
748DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPUCC pVCpu, uint64_t u64ExitQual)
749{
750 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoExitQual.u = u64ExitQual;
751}
752
753
754/**
755 * Sets the VM-exit interruption information field.
756 *
757 * @param pVCpu The cross context virtual CPU structure.
758 * @param uExitIntInfo The VM-exit interruption information.
759 */
760DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPUCC pVCpu, uint32_t uExitIntInfo)
761{
762 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntInfo = uExitIntInfo;
763}
764
765
766/**
767 * Sets the VM-exit interruption error code.
768 *
769 * @param pVCpu The cross context virtual CPU structure.
770 * @param uErrCode The error code.
771 */
772DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPUCC pVCpu, uint32_t uErrCode)
773{
774 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntErrCode = uErrCode;
775}
776
777
778/**
779 * Sets the IDT-vectoring information field.
780 *
781 * @param pVCpu The cross context virtual CPU structure.
782 * @param uIdtVectorInfo The IDT-vectoring information.
783 */
784DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPUCC pVCpu, uint32_t uIdtVectorInfo)
785{
786 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo = uIdtVectorInfo;
787}
788
789
790/**
791 * Sets the IDT-vectoring error code field.
792 *
793 * @param pVCpu The cross context virtual CPU structure.
794 * @param uErrCode The error code.
795 */
796DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPUCC pVCpu, uint32_t uErrCode)
797{
798 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringErrCode = uErrCode;
799}
800
801
802/**
803 * Sets the VM-exit guest-linear address VMCS field.
804 *
805 * @param pVCpu The cross context virtual CPU structure.
806 * @param uGuestLinearAddr The VM-exit guest-linear address.
807 */
808DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPUCC pVCpu, uint64_t uGuestLinearAddr)
809{
810 /* Bits 63:32 of guest-linear address MBZ if the guest isn't in long mode prior to the VM-exit. */
811 Assert(CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)) || !(uGuestLinearAddr & UINT64_C(0xffffffff00000000)));
812 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestLinearAddr.u = uGuestLinearAddr;
813}
814
815
816/**
817 * Sets the VM-exit guest-physical address VMCS field.
818 *
819 * @param pVCpu The cross context virtual CPU structure.
820 * @param uGuestPhysAddr The VM-exit guest-physical address.
821 */
822DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPUCC pVCpu, uint64_t uGuestPhysAddr)
823{
824 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestPhysAddr.u = uGuestPhysAddr;
825}
826
827
828/**
829 * Sets the VM-exit instruction length VMCS field.
830 *
831 * @param pVCpu The cross context virtual CPU structure.
832 * @param cbInstr The VM-exit instruction length in bytes.
833 *
834 * @remarks Callers may clear this field to 0. Hence, this function does not check
835 * the validity of the instruction length.
836 */
837DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPUCC pVCpu, uint32_t cbInstr)
838{
839 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrLen = cbInstr;
840}
841
842
843/**
844 * Sets the VM-exit instruction info. VMCS field.
845 *
846 * @param pVCpu The cross context virtual CPU structure.
847 * @param uExitInstrInfo The VM-exit instruction information.
848 */
849DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPUCC pVCpu, uint32_t uExitInstrInfo)
850{
851 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrInfo = uExitInstrInfo;
852}
853
854
855/**
856 * Sets the guest pending-debug exceptions field.
857 *
858 * @param pVCpu The cross context virtual CPU structure.
859 * @param uGuestPendingDbgXcpts The guest pending-debug exceptions.
860 */
861DECL_FORCE_INLINE(void) iemVmxVmcsSetGuestPendingDbgXcpts(PVMCPUCC pVCpu, uint64_t uGuestPendingDbgXcpts)
862{
863 Assert(!(uGuestPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK));
864 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestPendingDbgXcpts.u = uGuestPendingDbgXcpts;
865}
866
867
868/**
869 * Implements VMSucceed for VMX instruction success.
870 *
871 * @param pVCpu The cross context virtual CPU structure.
872 */
873DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPUCC pVCpu)
874{
875 return CPUMSetGuestVmxVmSucceed(&pVCpu->cpum.GstCtx);
876}
877
878
879/**
880 * Implements VMFailInvalid for VMX instruction failure.
881 *
882 * @param pVCpu The cross context virtual CPU structure.
883 */
884DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPUCC pVCpu)
885{
886 return CPUMSetGuestVmxVmFailInvalid(&pVCpu->cpum.GstCtx);
887}
888
889
890/**
891 * Implements VMFail for VMX instruction failure.
892 *
893 * @param pVCpu The cross context virtual CPU structure.
894 * @param enmInsErr The VM instruction error.
895 */
896DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPUCC pVCpu, VMXINSTRERR enmInsErr)
897{
898 return CPUMSetGuestVmxVmFail(&pVCpu->cpum.GstCtx, enmInsErr);
899}
900
901
902/**
903 * Checks if the given auto-load/store MSR area count is valid for the
904 * implementation.
905 *
906 * @returns @c true if it's within the valid limit, @c false otherwise.
907 * @param pVCpu The cross context virtual CPU structure.
908 * @param uMsrCount The MSR area count to check.
909 */
910DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PCVMCPU pVCpu, uint32_t uMsrCount)
911{
912 uint64_t const u64VmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
913 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
914 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
915 if (uMsrCount <= cMaxSupportedMsrs)
916 return true;
917 return false;
918}
919
920
921/**
922 * Flushes the current VMCS contents back to guest memory.
923 *
924 * @returns VBox status code.
925 * @param pVCpu The cross context virtual CPU structure.
926 */
927DECL_FORCE_INLINE(int) iemVmxWriteCurrentVmcsToGstMem(PVMCPUCC pVCpu)
928{
929 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
930 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
931 &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs));
932 return rc;
933}
934
935
936/**
937 * Populates the current VMCS contents from guest memory.
938 *
939 * @returns VBox status code.
940 * @param pVCpu The cross context virtual CPU structure.
941 */
942DECL_FORCE_INLINE(int) iemVmxReadCurrentVmcsFromGstMem(PVMCPUCC pVCpu)
943{
944 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
945 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs,
946 IEM_VMX_GET_CURRENT_VMCS(pVCpu), sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs));
947 return rc;
948}
949
950
951/**
952 * Gets the instruction diagnostic for segment base checks during VM-entry of a
953 * nested-guest.
954 *
955 * @param iSegReg The segment index (X86_SREG_XXX).
956 */
957static VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg) RT_NOEXCEPT
958{
959 switch (iSegReg)
960 {
961 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
962 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
963 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
964 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
965 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
966 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
967 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
968 }
969}
970
971
972/**
973 * Gets the instruction diagnostic for segment base checks during VM-entry of a
974 * nested-guest that is in Virtual-8086 mode.
975 *
976 * @param iSegReg The segment index (X86_SREG_XXX).
977 */
978static VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg) RT_NOEXCEPT
979{
980 switch (iSegReg)
981 {
982 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
983 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
984 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
985 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
986 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
987 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
988 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
989 }
990}
991
992
993/**
994 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
995 * nested-guest that is in Virtual-8086 mode.
996 *
997 * @param iSegReg The segment index (X86_SREG_XXX).
998 */
999static VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg) RT_NOEXCEPT
1000{
1001 switch (iSegReg)
1002 {
1003 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1004 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1005 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1006 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1007 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1008 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1009 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1010 }
1011}
1012
1013
1014/**
1015 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1016 * nested-guest that is in Virtual-8086 mode.
1017 *
1018 * @param iSegReg The segment index (X86_SREG_XXX).
1019 */
1020static VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg) RT_NOEXCEPT
1021{
1022 switch (iSegReg)
1023 {
1024 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1025 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1026 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1027 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1028 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1029 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1030 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1031 }
1032}
1033
1034
1035/**
1036 * Gets the instruction diagnostic for segment attributes reserved bits failure
1037 * during VM-entry of a nested-guest.
1038 *
1039 * @param iSegReg The segment index (X86_SREG_XXX).
1040 */
1041static VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg) RT_NOEXCEPT
1042{
1043 switch (iSegReg)
1044 {
1045 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1046 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1047 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1048 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1049 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1050 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1051 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1052 }
1053}
1054
1055
1056/**
1057 * Gets the instruction diagnostic for segment attributes descriptor-type
1058 * (code/segment or system) failure during VM-entry of a nested-guest.
1059 *
1060 * @param iSegReg The segment index (X86_SREG_XXX).
1061 */
1062static VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg) RT_NOEXCEPT
1063{
1064 switch (iSegReg)
1065 {
1066 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1067 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1068 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1069 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1070 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1071 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1072 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1073 }
1074}
1075
1076
1077/**
1078 * Gets the instruction diagnostic for segment attributes descriptor-type
1079 * (code/segment or system) failure during VM-entry of a nested-guest.
1080 *
1081 * @param iSegReg The segment index (X86_SREG_XXX).
1082 */
1083static VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg) RT_NOEXCEPT
1084{
1085 switch (iSegReg)
1086 {
1087 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1088 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1089 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1090 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1091 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1092 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1093 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1094 }
1095}
1096
1097
1098/**
1099 * Gets the instruction diagnostic for segment attribute granularity failure during
1100 * VM-entry of a nested-guest.
1101 *
1102 * @param iSegReg The segment index (X86_SREG_XXX).
1103 */
1104static VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg) RT_NOEXCEPT
1105{
1106 switch (iSegReg)
1107 {
1108 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1109 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1110 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1111 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1112 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1113 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1114 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1115 }
1116}
1117
1118/**
1119 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1120 * VM-entry of a nested-guest.
1121 *
1122 * @param iSegReg The segment index (X86_SREG_XXX).
1123 */
1124static VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg) RT_NOEXCEPT
1125{
1126 switch (iSegReg)
1127 {
1128 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1129 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1130 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1131 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1132 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1133 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1134 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1135 }
1136}
1137
1138
1139/**
1140 * Gets the instruction diagnostic for segment attribute type accessed failure
1141 * during VM-entry of a nested-guest.
1142 *
1143 * @param iSegReg The segment index (X86_SREG_XXX).
1144 */
1145static VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg) RT_NOEXCEPT
1146{
1147 switch (iSegReg)
1148 {
1149 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1150 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1151 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1152 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1153 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1154 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1155 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1156 }
1157}
1158
1159
1160/**
1161 * Saves the guest control registers, debug registers and some MSRs are part of
1162 * VM-exit.
1163 *
1164 * @param pVCpu The cross context virtual CPU structure.
1165 */
1166static void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPUCC pVCpu) RT_NOEXCEPT
1167{
1168 /*
1169 * Saves the guest control registers, debug registers and some MSRs.
1170 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1171 */
1172 PVMXVVMCS pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1173
1174 /* Save control registers. */
1175 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1176 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1177 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1178
1179 /* Save SYSENTER CS, ESP, EIP. */
1180 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1181 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1182 {
1183 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1184 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1185 }
1186 else
1187 {
1188 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1189 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1190 }
1191
1192 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1193 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1194 {
1195 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1196 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1197 }
1198
1199 /* Save PAT MSR. */
1200 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1201 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1202
1203 /* Save EFER MSR. */
1204 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1205 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1206
1207 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1208 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1209
1210 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1211}
1212
1213
1214/**
1215 * Saves the guest force-flags in preparation of entering the nested-guest.
1216 *
1217 * @param pVCpu The cross context virtual CPU structure.
1218 */
1219static void iemVmxVmentrySaveNmiBlockingFF(PVMCPUCC pVCpu) RT_NOEXCEPT
1220{
1221 /* We shouldn't be called multiple times during VM-entry. */
1222 Assert(pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit == 0);
1223
1224 /* MTF should not be set outside VMX non-root mode. */
1225 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1226
1227 /*
1228 * Preserve the required force-flags.
1229 *
1230 * We cache and clear force-flags that would affect the execution of the
1231 * nested-guest. Cached flags are then restored while returning to the guest
1232 * if necessary.
1233 *
1234 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1235 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1236 * instruction. Interrupt inhibition for any nested-guest instruction
1237 * is supplied by the guest-interruptibility state VMCS field and will
1238 * be set up as part of loading the guest state. Technically
1239 * blocking-by-STI is possible with VMLAUNCH/VMRESUME but we currently
1240 * disallow it since we can't distinguish it from blocking-by-MovSS
1241 * and no nested-hypervisor we care about uses STI immediately
1242 * followed by VMLAUNCH/VMRESUME.
1243 *
1244 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1245 * successful VM-entry (due to invalid guest-state) need to continue
1246 * blocking NMIs if it was in effect before VM-entry.
1247 *
1248 * - MTF need not be preserved as it's used only in VMX non-root mode and
1249 * is supplied through the VM-execution controls.
1250 *
1251 * The remaining FFs (e.g. timers, APIC updates) can stay in place so that
1252 * we will be able to generate interrupts that may cause VM-exits for
1253 * the nested-guest.
1254 */
1255 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_INHIBIT_NMI;
1256}
1257
1258
1259/**
1260 * Restores the guest force-flags in preparation of exiting the nested-guest.
1261 *
1262 * @param pVCpu The cross context virtual CPU structure.
1263 */
1264static void iemVmxVmexitRestoreNmiBlockingFF(PVMCPUCC pVCpu) RT_NOEXCEPT
1265{
1266 /** @todo r=bird: why aren't we clearing the nested guest flags first here?
1267 * If there is some other code doing that already, it would be great
1268 * to point to it here... */
1269 pVCpu->cpum.GstCtx.eflags.uBoth |= pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit;
1270 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = 0;
1271}
1272
1273
1274/**
1275 * Performs the VMX transition to/from VMX non-root mode.
1276 *
1277 * @param pVCpu The cross context virtual CPU structure.
1278 * @param cbInstr The length of the current instruction.
1279 */
1280static int iemVmxTransition(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
1281{
1282 /*
1283 * Inform PGM about paging mode changes.
1284 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1285 * see comment in iemMemPageTranslateAndCheckAccess().
1286 */
1287 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
1288 true /* fForce */);
1289 if (RT_SUCCESS(rc))
1290 { /* likely */ }
1291 else
1292 return rc;
1293
1294 /* Invalidate IEM TLBs now that we've forced a PGM mode change. */
1295 IEMTlbInvalidateAllGlobal(pVCpu);
1296
1297 /* Inform CPUM (recompiler), can later be removed. */
1298 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1299
1300 /* Re-initialize IEM cache/state after the drastic mode switch. */
1301 iemReInitExec(pVCpu, cbInstr);
1302 return rc;
1303}
1304
1305
1306/**
1307 * Calculates the current VMX-preemption timer value.
1308 *
1309 * @returns The current VMX-preemption timer value.
1310 * @param pVCpu The cross context virtual CPU structure.
1311 */
1312static uint32_t iemVmxCalcPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT
1313{
1314 /*
1315 * Assume the following:
1316 * PreemptTimerShift = 5
1317 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
1318 * EntryTick = 50000 (TSC at time of VM-entry)
1319 *
1320 * CurTick Delta PreemptTimerVal
1321 * ----------------------------------
1322 * 60000 10000 2
1323 * 80000 30000 1
1324 * 90000 40000 0 -> VM-exit.
1325 *
1326 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
1327 * The saved VMX-preemption timer value is calculated as follows:
1328 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
1329 * E.g.:
1330 * Delta = 10000
1331 * Tmp = 10000 / (2 * 10000) = 0.5
1332 * NewPt = 2 - 0.5 = 2
1333 * Delta = 30000
1334 * Tmp = 30000 / (2 * 10000) = 1.5
1335 * NewPt = 2 - 1.5 = 1
1336 * Delta = 40000
1337 * Tmp = 40000 / 20000 = 2
1338 * NewPt = 2 - 2 = 0
1339 */
1340 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1341 uint32_t const uVmcsPreemptVal = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer;
1342 if (uVmcsPreemptVal > 0)
1343 {
1344 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
1345 uint64_t const uEntryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick;
1346 uint64_t const uDelta = uCurTick - uEntryTick;
1347 uint32_t const uPreemptTimer = uVmcsPreemptVal
1348 - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
1349 return uPreemptTimer;
1350 }
1351 return 0;
1352}
1353
1354
1355/**
1356 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1357 *
1358 * @param pVCpu The cross context virtual CPU structure.
1359 */
1360static void iemVmxVmexitSaveGuestSegRegs(PVMCPUCC pVCpu) RT_NOEXCEPT
1361{
1362 /*
1363 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1364 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1365 */
1366 /* CS, SS, ES, DS, FS, GS. */
1367 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1368 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1369 {
1370 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1371 if (!pSelReg->Attr.n.u1Unusable)
1372 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1373 else
1374 {
1375 /*
1376 * For unusable segments the attributes are undefined except for CS and SS.
1377 * For the rest we don't bother preserving anything but the unusable bit.
1378 */
1379 switch (iSegReg)
1380 {
1381 case X86_SREG_CS:
1382 pVmcs->GuestCs = pSelReg->Sel;
1383 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1384 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1385 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1386 | X86DESCATTR_UNUSABLE);
1387 break;
1388
1389 case X86_SREG_SS:
1390 pVmcs->GuestSs = pSelReg->Sel;
1391 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1392 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1393 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1394 break;
1395
1396 case X86_SREG_DS:
1397 pVmcs->GuestDs = pSelReg->Sel;
1398 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1399 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1400 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1401 break;
1402
1403 case X86_SREG_ES:
1404 pVmcs->GuestEs = pSelReg->Sel;
1405 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1406 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1407 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1408 break;
1409
1410 case X86_SREG_FS:
1411 pVmcs->GuestFs = pSelReg->Sel;
1412 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1413 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1414 break;
1415
1416 case X86_SREG_GS:
1417 pVmcs->GuestGs = pSelReg->Sel;
1418 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1419 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1420 break;
1421 }
1422 }
1423 }
1424
1425 /* Segment attribute bits 31:17 and 11:8 MBZ. */
1426 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1427 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1428 | X86DESCATTR_UNUSABLE;
1429 /* LDTR. */
1430 {
1431 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1432 pVmcs->GuestLdtr = pSelReg->Sel;
1433 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1434 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1435 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1436 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1437 }
1438
1439 /* TR. */
1440 {
1441 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1442 pVmcs->GuestTr = pSelReg->Sel;
1443 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1444 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1445 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
1446 }
1447
1448 /* GDTR. */
1449 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
1450 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
1451
1452 /* IDTR. */
1453 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
1454 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
1455}
1456
1457
1458/**
1459 * Saves guest non-register state as part of VM-exit.
1460 *
1461 * @param pVCpu The cross context virtual CPU structure.
1462 * @param uExitReason The VM-exit reason.
1463 */
1464static void iemVmxVmexitSaveGuestNonRegState(PVMCPUCC pVCpu, uint32_t uExitReason) RT_NOEXCEPT
1465{
1466 /*
1467 * Save guest non-register state.
1468 * See Intel spec. 27.3.4 "Saving Non-Register State".
1469 */
1470 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1471
1472 /*
1473 * Activity state.
1474 * Most VM-exits will occur in the active state. However, if the first instruction
1475 * following the VM-entry is a HLT instruction, and the MTF VM-execution control is set,
1476 * the VM-exit will be from the HLT activity state.
1477 *
1478 * See Intel spec. 25.5.2 "Monitor Trap Flag".
1479 */
1480 /** @todo NSTVMX: Does triple-fault VM-exit reflect a shutdown activity state or
1481 * not? */
1482 EMSTATE const enmActivityState = EMGetState(pVCpu);
1483 switch (enmActivityState)
1484 {
1485 case EMSTATE_HALTED: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_HLT; break;
1486 default: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_ACTIVE; break;
1487 }
1488
1489 /*
1490 * Interruptibility-state.
1491 */
1492 /* NMI. */
1493 pVmcs->u32GuestIntrState = 0;
1494 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
1495 {
1496 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
1497 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1498 }
1499 else
1500 {
1501 if (CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1502 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1503 }
1504
1505 /* Blocking-by-STI or blocking-by-MovSS. */
1506 uint32_t fInhibitShw;
1507 if (!CPUMIsInInterruptShadowWithUpdateEx(&pVCpu->cpum.GstCtx, &fInhibitShw))
1508 { /* probable */}
1509 else
1510 {
1511 if (pVCpu->cpum.GstCtx.rip == pVCpu->cpum.GstCtx.uRipInhibitInt)
1512 {
1513 /*
1514 * We must ensure only one of these bits is set.
1515 * Our emulation can have both set (perhaps because AMD doesn't distinguish
1516 * between the two?). Hence, the 'else' with blocking-by-MovSS taking priority
1517 * since it blocks more. Nested Ubuntu 22.04.2 running inside a Hyper-V enabled
1518 * Windows Server 2008 R2 guest runs into this issue.
1519 *
1520 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
1521 */
1522 if (fInhibitShw & CPUMCTX_INHIBIT_SHADOW_SS)
1523 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1524 else
1525 {
1526 Assert(fInhibitShw & CPUMCTX_INHIBIT_SHADOW_STI);
1527 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1528 }
1529 }
1530 }
1531 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
1532
1533 /*
1534 * Pending debug exceptions.
1535 *
1536 * For VM-exits where it is not applicable, we can safely zero out the field.
1537 * For VM-exits where it is applicable, it's expected to be updated by the caller already.
1538 */
1539 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
1540 && uExitReason != VMX_EXIT_SMI
1541 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
1542 && !VMXIsVmexitTrapLike(uExitReason))
1543 {
1544 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
1545 * block-by-MovSS is in effect. */
1546 pVmcs->u64GuestPendingDbgXcpts.u = 0;
1547 }
1548
1549 /*
1550 * Save the VMX-preemption timer value back into the VMCS if the feature is enabled.
1551 *
1552 * For VMX-preemption timer VM-exits, we should have already written back 0 if the
1553 * feature is supported back into the VMCS, and thus there is nothing further to do here.
1554 */
1555 if ( uExitReason != VMX_EXIT_PREEMPT_TIMER
1556 && (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
1557 pVmcs->u32PreemptTimer = iemVmxCalcPreemptTimer(pVCpu);
1558
1559 /*
1560 * Save the guest PAE PDPTEs.
1561 */
1562 if ( !CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx)
1563 || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT))
1564 {
1565 /*
1566 * Without EPT or when the nested-guest is not using PAE paging, the values saved
1567 * in the VMCS during VM-exit are undefined. We zero them here for consistency.
1568 */
1569 pVmcs->u64GuestPdpte0.u = 0;
1570 pVmcs->u64GuestPdpte1.u = 0;
1571 pVmcs->u64GuestPdpte2.u = 0;
1572 pVmcs->u64GuestPdpte3.u = 0;
1573 }
1574 else
1575 {
1576 /*
1577 * With EPT and when the nested-guest is using PAE paging, we update the PDPTEs from
1578 * the nested-guest CPU context. Both IEM (Mov CRx) and hardware-assisted execution
1579 * of the nested-guest is expected to have updated them.
1580 */
1581 pVmcs->u64GuestPdpte0.u = pVCpu->cpum.GstCtx.aPaePdpes[0].u;
1582 pVmcs->u64GuestPdpte1.u = pVCpu->cpum.GstCtx.aPaePdpes[1].u;
1583 pVmcs->u64GuestPdpte2.u = pVCpu->cpum.GstCtx.aPaePdpes[2].u;
1584 pVmcs->u64GuestPdpte3.u = pVCpu->cpum.GstCtx.aPaePdpes[3].u;
1585 }
1586
1587 /* Clear PGM's copy of the EPT pointer for added safety. */
1588 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
1589 PGMSetGuestEptPtr(pVCpu, 0 /* uEptPtr */);
1590}
1591
1592
1593/**
1594 * Saves the guest-state as part of VM-exit.
1595 *
1596 * @returns VBox status code.
1597 * @param pVCpu The cross context virtual CPU structure.
1598 * @param uExitReason The VM-exit reason.
1599 */
1600static void iemVmxVmexitSaveGuestState(PVMCPUCC pVCpu, uint32_t uExitReason) RT_NOEXCEPT
1601{
1602 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
1603 iemVmxVmexitSaveGuestSegRegs(pVCpu);
1604
1605 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
1606 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
1607 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
1608
1609 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
1610}
1611
1612
1613/**
1614 * Saves the guest MSRs into the VM-exit MSR-store area as part of VM-exit.
1615 *
1616 * @returns VBox status code.
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param uExitReason The VM-exit reason (for diagnostic purposes).
1619 */
1620static int iemVmxVmexitSaveGuestAutoMsrs(PVMCPUCC pVCpu, uint32_t uExitReason) RT_NOEXCEPT
1621{
1622 /*
1623 * Save guest MSRs.
1624 * See Intel spec. 27.4 "Saving MSRs".
1625 */
1626 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1627 const char * const pszFailure = "VMX-abort";
1628
1629 /*
1630 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
1631 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
1632 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1633 */
1634 uint32_t const cMsrs = RT_MIN(pVmcs->u32ExitMsrStoreCount, RT_ELEMENTS(pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrStoreArea));
1635 if (!cMsrs)
1636 return VINF_SUCCESS;
1637
1638 /*
1639 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
1640 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1641 * implementation causes a VMX-abort followed by a triple-fault.
1642 */
1643 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1644 if (fIsMsrCountValid)
1645 { /* likely */ }
1646 else
1647 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
1648
1649 /*
1650 * Optimization if the nested hypervisor is using the same guest-physical page for both
1651 * the VM-entry MSR-load area as well as the VM-exit MSR store area.
1652 */
1653 PVMXAUTOMSR pMsrArea;
1654 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
1655 RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u;
1656 if (GCPhysVmEntryMsrLoadArea == GCPhysVmExitMsrStoreArea)
1657 pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea;
1658 else
1659 {
1660 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrStoreArea[0],
1661 GCPhysVmExitMsrStoreArea, cMsrs * sizeof(VMXAUTOMSR));
1662 if (RT_SUCCESS(rc))
1663 pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrStoreArea;
1664 else
1665 {
1666 AssertMsgFailed(("VM-exit: Failed to read MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
1667 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrReadPhys);
1668 }
1669 }
1670
1671 /*
1672 * Update VM-exit MSR store area.
1673 */
1674 PVMXAUTOMSR pMsr = pMsrArea;
1675 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1676 {
1677 if ( !pMsr->u32Reserved
1678 && pMsr->u32Msr != MSR_IA32_SMBASE
1679 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1680 {
1681 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
1682 if (rcStrict == VINF_SUCCESS)
1683 continue;
1684
1685 /*
1686 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
1687 * If any nested hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
1688 * recording the MSR index in the auxiliary info. field and indicated further by our
1689 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
1690 * if possible, or come up with a better, generic solution.
1691 */
1692 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1693 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
1694 ? kVmxVDiag_Vmexit_MsrStoreRing3
1695 : kVmxVDiag_Vmexit_MsrStore;
1696 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
1697 }
1698 else
1699 {
1700 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1701 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
1702 }
1703 }
1704
1705 /*
1706 * Commit the VM-exit MSR store are to guest memory.
1707 */
1708 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea, pMsrArea, cMsrs * sizeof(VMXAUTOMSR));
1709 if (RT_SUCCESS(rc))
1710 return VINF_SUCCESS;
1711
1712 NOREF(uExitReason);
1713 NOREF(pszFailure);
1714
1715 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
1716 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
1717}
1718
1719
1720/**
1721 * Performs a VMX abort (due to an fatal error during VM-exit).
1722 *
1723 * @returns Strict VBox status code.
1724 * @param pVCpu The cross context virtual CPU structure.
1725 * @param enmAbort The VMX abort reason.
1726 */
1727static VBOXSTRICTRC iemVmxAbort(PVMCPUCC pVCpu, VMXABORT enmAbort) RT_NOEXCEPT
1728{
1729 /*
1730 * Perform the VMX abort.
1731 * See Intel spec. 27.7 "VMX Aborts".
1732 */
1733 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, VMXGetAbortDesc(enmAbort)));
1734
1735 /* We don't support SMX yet. */
1736 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
1737 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1738 {
1739 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
1740 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, enmVmxAbort);
1741 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
1742 }
1743
1744 return VINF_EM_TRIPLE_FAULT;
1745}
1746
1747
1748/**
1749 * Loads host control registers, debug registers and MSRs as part of VM-exit.
1750 *
1751 * @param pVCpu The cross context virtual CPU structure.
1752 */
1753static void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPUCC pVCpu) RT_NOEXCEPT
1754{
1755 /*
1756 * Load host control registers, debug registers and MSRs.
1757 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
1758 */
1759 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1760 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1761
1762 /* CR0. */
1763 {
1764 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 fixed bits are not modified. */
1765 uint64_t const fCr0IgnMask = VMX_EXIT_HOST_CR0_IGNORE_MASK;
1766 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
1767 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
1768 uint64_t const uValidHostCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
1769
1770 /* Verify we have not modified CR0 fixed bits in VMX operation. */
1771#ifdef VBOX_STRICT
1772 uint64_t const uCr0Mb1 = iemVmxGetCr0Fixed0(pVCpu, true /* fVmxNonRootMode */);
1773 bool const fUx = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
1774 AssertMsg( (uValidHostCr0 & uCr0Mb1) == uCr0Mb1
1775 && (uValidHostCr0 & ~VMX_V_CR0_FIXED1) == 0,
1776 ("host=%#RX64 guest=%#RX64 mb1=%#RX64 valid_host_cr0=%#RX64 fUx=%RTbool\n",
1777 uHostCr0, uGuestCr0, uCr0Mb1, uValidHostCr0, fUx));
1778#endif
1779 Assert(!(uValidHostCr0 >> 32));
1780 CPUMSetGuestCR0(pVCpu, uValidHostCr0);
1781 }
1782
1783 /* CR4. */
1784 {
1785 /* CR4 fixed bits are not modified. */
1786 uint64_t const uCr4Mb1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
1787 uint64_t const uCr4Mb0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
1788 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
1789 uint64_t uValidHostCr4 = (uHostCr4 & uCr4Mb0) | uCr4Mb1;
1790 if (fHostInLongMode)
1791 uValidHostCr4 |= X86_CR4_PAE;
1792 else
1793 uValidHostCr4 &= ~(uint64_t)X86_CR4_PCIDE;
1794
1795 /* Verify we have not modified CR4 fixed bits in VMX non-root operation. */
1796 AssertMsg( (uValidHostCr4 & uCr4Mb1) == uCr4Mb1
1797 && (uValidHostCr4 & ~uCr4Mb0) == 0,
1798 ("host=%#RX64 guest=%#RX64, uCr4Mb1=%#RX64 uCr4Mb0=%#RX64 valid_host_cr4=%#RX64\n",
1799 uHostCr4, pVCpu->cpum.GstCtx.cr4, uCr4Mb1, uCr4Mb0, uValidHostCr4));
1800 CPUMSetGuestCR4(pVCpu, uValidHostCr4);
1801 }
1802
1803 /* CR3 (host value validated while checking host-state during VM-entry). */
1804 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
1805
1806 /* DR7. */
1807 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
1808
1809 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1810
1811 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
1812 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
1813 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
1814 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
1815
1816 /* FS, GS bases are loaded later while we load host segment registers. */
1817
1818 /* EFER MSR (host value validated while checking host-state during VM-entry). */
1819 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
1820 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
1821 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1822 {
1823 if (fHostInLongMode)
1824 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
1825 else
1826 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
1827 }
1828
1829 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
1830
1831 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
1832 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
1833 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
1834
1835 /* We don't support IA32_BNDCFGS MSR yet. */
1836}
1837
1838
1839/**
1840 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
1841 *
1842 * @param pVCpu The cross context virtual CPU structure.
1843 */
1844static void iemVmxVmexitLoadHostSegRegs(PVMCPUCC pVCpu) RT_NOEXCEPT
1845{
1846 /*
1847 * Load host segment registers, GDTR, IDTR, LDTR and TR.
1848 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
1849 *
1850 * Warning! Be careful to not touch fields that are reserved by VT-x,
1851 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
1852 */
1853 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1854 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1855
1856 /* CS, SS, ES, DS, FS, GS. */
1857 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1858 {
1859 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
1860 bool const fUnusable = RT_BOOL(HostSel == 0);
1861 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1862
1863 /* Selector. */
1864 pSelReg->Sel = HostSel;
1865 pSelReg->ValidSel = HostSel;
1866 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
1867
1868 /* Limit. */
1869 pSelReg->u32Limit = 0xffffffff;
1870
1871 /* Base. */
1872 pSelReg->u64Base = 0;
1873
1874 /* Attributes. */
1875 if (iSegReg == X86_SREG_CS)
1876 {
1877 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
1878 pSelReg->Attr.n.u1DescType = 1;
1879 pSelReg->Attr.n.u2Dpl = 0;
1880 pSelReg->Attr.n.u1Present = 1;
1881 pSelReg->Attr.n.u1Long = fHostInLongMode;
1882 pSelReg->Attr.n.u1DefBig = !fHostInLongMode;
1883 pSelReg->Attr.n.u1Granularity = 1;
1884 Assert(!pSelReg->Attr.n.u1Unusable);
1885 Assert(!fUnusable);
1886 }
1887 else
1888 {
1889 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
1890 pSelReg->Attr.n.u1DescType = 1;
1891 pSelReg->Attr.n.u2Dpl = 0;
1892 pSelReg->Attr.n.u1Present = 1;
1893 pSelReg->Attr.n.u1DefBig = 1;
1894 pSelReg->Attr.n.u1Granularity = 1;
1895 pSelReg->Attr.n.u1Unusable = fUnusable;
1896 }
1897 }
1898
1899 /* FS base. */
1900 if ( !pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable
1901 || fHostInLongMode)
1902 {
1903 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
1904 pVCpu->cpum.GstCtx.fs.u64Base = pVmcs->u64HostFsBase.u;
1905 }
1906
1907 /* GS base. */
1908 if ( !pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable
1909 || fHostInLongMode)
1910 {
1911 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
1912 pVCpu->cpum.GstCtx.gs.u64Base = pVmcs->u64HostGsBase.u;
1913 }
1914
1915 /* TR. */
1916 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
1917 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
1918 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
1919 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
1920 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1921 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
1922 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
1923 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1924 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
1925 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
1926 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
1927 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
1928 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
1929
1930 /* LDTR (Warning! do not touch the base and limits here). */
1931 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
1932 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
1933 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1934 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
1935
1936 /* GDTR. */
1937 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
1938 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
1939 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xffff;
1940
1941 /* IDTR.*/
1942 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
1943 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
1944 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xffff;
1945}
1946
1947
1948/**
1949 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit.
1950 *
1951 * @returns VBox status code.
1952 * @param pVCpu The cross context virtual CPU structure.
1953 * @param uExitReason The VMX instruction name (for logging purposes).
1954 */
1955static int iemVmxVmexitLoadHostAutoMsrs(PVMCPUCC pVCpu, uint32_t uExitReason) RT_NOEXCEPT
1956{
1957 /*
1958 * Load host MSRs.
1959 * See Intel spec. 27.6 "Loading MSRs".
1960 */
1961 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1962 const char * const pszFailure = "VMX-abort";
1963
1964 /*
1965 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
1966 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
1967 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1968 */
1969 uint32_t const cMsrs = RT_MIN(pVmcs->u32ExitMsrLoadCount, RT_ELEMENTS(pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrLoadArea));
1970 if (!cMsrs)
1971 return VINF_SUCCESS;
1972
1973 /*
1974 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
1975 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1976 * implementation causes a VMX-abort followed by a triple-fault.
1977 */
1978 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1979 if (fIsMsrCountValid)
1980 { /* likely */ }
1981 else
1982 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
1983
1984 RTGCPHYS const GCPhysVmExitMsrLoadArea = pVmcs->u64AddrExitMsrLoad.u;
1985 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrLoadArea[0],
1986 GCPhysVmExitMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
1987 if (RT_SUCCESS(rc))
1988 {
1989 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrLoadArea;
1990 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1991 {
1992 if ( !pMsr->u32Reserved
1993 && pMsr->u32Msr != MSR_K8_FS_BASE
1994 && pMsr->u32Msr != MSR_K8_GS_BASE
1995 && pMsr->u32Msr != MSR_K6_EFER
1996 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
1997 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1998 {
1999 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2000 if (rcStrict == VINF_SUCCESS)
2001 continue;
2002
2003 /*
2004 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2005 * If any nested hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2006 * recording the MSR index in the auxiliary info. field and indicated further by our
2007 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2008 * if possible, or come up with a better, generic solution.
2009 */
2010 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2011 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2012 ? kVmxVDiag_Vmexit_MsrLoadRing3
2013 : kVmxVDiag_Vmexit_MsrLoad;
2014 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2015 }
2016 else
2017 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2018 }
2019 }
2020 else
2021 {
2022 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrLoadArea, rc));
2023 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2024 }
2025
2026 NOREF(uExitReason);
2027 NOREF(pszFailure);
2028 return VINF_SUCCESS;
2029}
2030
2031
2032/**
2033 * Loads the host state as part of VM-exit.
2034 *
2035 * @returns Strict VBox status code.
2036 * @param pVCpu The cross context virtual CPU structure.
2037 * @param uExitReason The VM-exit reason (for logging purposes).
2038 */
2039static VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPUCC pVCpu, uint32_t uExitReason) RT_NOEXCEPT
2040{
2041 /*
2042 * Load host state.
2043 * See Intel spec. 27.5 "Loading Host State".
2044 */
2045 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2046 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2047
2048 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2049 if ( CPUMIsGuestInLongMode(pVCpu)
2050 && !fHostInLongMode)
2051 {
2052 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2053 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2054 }
2055
2056 /*
2057 * Check host PAE PDPTEs prior to loading the host state.
2058 * See Intel spec. 26.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2059 */
2060 if ( (pVmcs->u64HostCr4.u & X86_CR4_PAE)
2061 && !fHostInLongMode
2062 && ( !CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx)
2063 || pVmcs->u64HostCr3.u != pVCpu->cpum.GstCtx.cr3))
2064 {
2065 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u);
2066 if (RT_SUCCESS(rc))
2067 { /* likely*/ }
2068 else
2069 {
2070 IEM_VMX_VMEXIT_FAILED(pVCpu, uExitReason, "VMX-abort", kVmxVDiag_Vmexit_HostPdpte);
2071 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2072 }
2073 }
2074
2075 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2076 iemVmxVmexitLoadHostSegRegs(pVCpu);
2077
2078 /*
2079 * Load host RIP, RSP and RFLAGS.
2080 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2081 */
2082 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2083 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2084 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2085
2086 /* Clear address range monitoring. */
2087 EMMonitorWaitClear(pVCpu);
2088
2089 /* Perform the VMX transition (PGM updates). */
2090 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, 0 /*cbInstr - whatever*/);
2091 if (rcStrict == VINF_SUCCESS)
2092 { /* likely */ }
2093 else if (RT_SUCCESS(rcStrict))
2094 {
2095 Log3(("VM-exit: iemVmxTransition returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2096 uExitReason));
2097 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2098 }
2099 else
2100 {
2101 Log3(("VM-exit: iemVmxTransition failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2102 return VBOXSTRICTRC_VAL(rcStrict);
2103 }
2104
2105 Assert(rcStrict == VINF_SUCCESS);
2106
2107 /* Load MSRs from the VM-exit auto-load MSR area. */
2108 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2109 if (RT_FAILURE(rc))
2110 {
2111 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2112 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2113 }
2114 return VINF_SUCCESS;
2115}
2116
2117
2118/**
2119 * Gets VM-exit instruction information along with any displacement for an
2120 * instruction VM-exit.
2121 *
2122 * @returns The VM-exit instruction information.
2123 * @param pVCpu The cross context virtual CPU structure.
2124 * @param uExitReason The VM-exit reason.
2125 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
2126 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
2127 * NULL.
2128 */
2129static uint32_t iemVmxGetExitInstrInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp) RT_NOEXCEPT
2130{
2131 RTGCPTR GCPtrDisp;
2132 VMXEXITINSTRINFO ExitInstrInfo;
2133 ExitInstrInfo.u = 0;
2134
2135 /*
2136 * Get and parse the ModR/M byte from our decoded opcodes.
2137 */
2138 uint8_t bRm;
2139 uint8_t const offModRm = pVCpu->iem.s.offModRm;
2140 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
2141 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2142 {
2143 /*
2144 * ModR/M indicates register addressing.
2145 *
2146 * The primary/secondary register operands are reported in the iReg1 or iReg2
2147 * fields depending on whether it is a read/write form.
2148 */
2149 uint8_t idxReg1;
2150 uint8_t idxReg2;
2151 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2152 {
2153 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2154 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2155 }
2156 else
2157 {
2158 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2159 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2160 }
2161 ExitInstrInfo.All.u2Scaling = 0;
2162 ExitInstrInfo.All.iReg1 = idxReg1;
2163 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2164 ExitInstrInfo.All.fIsRegOperand = 1;
2165 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2166 ExitInstrInfo.All.iSegReg = 0;
2167 ExitInstrInfo.All.iIdxReg = 0;
2168 ExitInstrInfo.All.fIdxRegInvalid = 1;
2169 ExitInstrInfo.All.iBaseReg = 0;
2170 ExitInstrInfo.All.fBaseRegInvalid = 1;
2171 ExitInstrInfo.All.iReg2 = idxReg2;
2172
2173 /* Displacement not applicable for register addressing. */
2174 GCPtrDisp = 0;
2175 }
2176 else
2177 {
2178 /*
2179 * ModR/M indicates memory addressing.
2180 */
2181 uint8_t uScale = 0;
2182 bool fBaseRegValid = false;
2183 bool fIdxRegValid = false;
2184 uint8_t iBaseReg = 0;
2185 uint8_t iIdxReg = 0;
2186 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
2187 {
2188 /*
2189 * Parse the ModR/M, displacement for 16-bit addressing mode.
2190 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
2191 */
2192 uint16_t u16Disp = 0;
2193 uint8_t const offDisp = offModRm + sizeof(bRm);
2194 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
2195 {
2196 /* Displacement without any registers. */
2197 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
2198 }
2199 else
2200 {
2201 /* Register (index and base). */
2202 switch (bRm & X86_MODRM_RM_MASK)
2203 {
2204 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2205 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2206 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2207 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2208 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2209 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2210 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
2211 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
2212 }
2213
2214 /* Register + displacement. */
2215 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2216 {
2217 case 0: break;
2218 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
2219 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
2220 default:
2221 {
2222 /* Register addressing, handled at the beginning. */
2223 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2224 break;
2225 }
2226 }
2227 }
2228
2229 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
2230 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
2231 }
2232 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
2233 {
2234 /*
2235 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
2236 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
2237 */
2238 uint32_t u32Disp = 0;
2239 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2240 {
2241 /* Displacement without any registers. */
2242 uint8_t const offDisp = offModRm + sizeof(bRm);
2243 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2244 }
2245 else
2246 {
2247 /* Register (and perhaps scale, index and base). */
2248 uint8_t offDisp = offModRm + sizeof(bRm);
2249 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2250 if (iBaseReg == 4)
2251 {
2252 /* An SIB byte follows the ModR/M byte, parse it. */
2253 uint8_t bSib;
2254 uint8_t const offSib = offModRm + sizeof(bRm);
2255 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2256
2257 /* A displacement may follow SIB, update its offset. */
2258 offDisp += sizeof(bSib);
2259
2260 /* Get the scale. */
2261 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2262
2263 /* Get the index register. */
2264 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
2265 fIdxRegValid = RT_BOOL(iIdxReg != 4);
2266
2267 /* Get the base register. */
2268 iBaseReg = bSib & X86_SIB_BASE_MASK;
2269 fBaseRegValid = true;
2270 if (iBaseReg == 5)
2271 {
2272 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2273 {
2274 /* Mod is 0 implies a 32-bit displacement with no base. */
2275 fBaseRegValid = false;
2276 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2277 }
2278 else
2279 {
2280 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
2281 iBaseReg = X86_GREG_xBP;
2282 }
2283 }
2284 }
2285
2286 /* Register + displacement. */
2287 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2288 {
2289 case 0: /* Handled above */ break;
2290 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
2291 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
2292 default:
2293 {
2294 /* Register addressing, handled at the beginning. */
2295 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2296 break;
2297 }
2298 }
2299 }
2300
2301 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
2302 }
2303 else
2304 {
2305 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
2306
2307 /*
2308 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
2309 * See Intel instruction spec. 2.2 "IA-32e Mode".
2310 */
2311 uint64_t u64Disp = 0;
2312 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
2313 if (fRipRelativeAddr)
2314 {
2315 /*
2316 * RIP-relative addressing mode.
2317 *
2318 * The displacement is 32-bit signed implying an offset range of +/-2G.
2319 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
2320 */
2321 uint8_t const offDisp = offModRm + sizeof(bRm);
2322 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2323 }
2324 else
2325 {
2326 uint8_t offDisp = offModRm + sizeof(bRm);
2327
2328 /*
2329 * Register (and perhaps scale, index and base).
2330 *
2331 * REX.B extends the most-significant bit of the base register. However, REX.B
2332 * is ignored while determining whether an SIB follows the opcode. Hence, we
2333 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
2334 *
2335 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
2336 */
2337 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2338 if (iBaseReg == 4)
2339 {
2340 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
2341 uint8_t bSib;
2342 uint8_t const offSib = offModRm + sizeof(bRm);
2343 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2344
2345 /* Displacement may follow SIB, update its offset. */
2346 offDisp += sizeof(bSib);
2347
2348 /* Get the scale. */
2349 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2350
2351 /* Get the index. */
2352 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
2353 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
2354
2355 /* Get the base. */
2356 iBaseReg = (bSib & X86_SIB_BASE_MASK);
2357 fBaseRegValid = true;
2358 if (iBaseReg == 5)
2359 {
2360 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2361 {
2362 /* Mod is 0 implies a signed 32-bit displacement with no base. */
2363 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2364 }
2365 else
2366 {
2367 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
2368 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
2369 }
2370 }
2371 }
2372 iBaseReg |= pVCpu->iem.s.uRexB;
2373
2374 /* Register + displacement. */
2375 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2376 {
2377 case 0: /* Handled above */ break;
2378 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
2379 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
2380 default:
2381 {
2382 /* Register addressing, handled at the beginning. */
2383 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2384 break;
2385 }
2386 }
2387 }
2388
2389 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
2390 }
2391
2392 /*
2393 * The primary or secondary register operand is reported in iReg2 depending
2394 * on whether the primary operand is in read/write form.
2395 */
2396 uint8_t idxReg2;
2397 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2398 {
2399 idxReg2 = bRm & X86_MODRM_RM_MASK;
2400 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2401 idxReg2 |= pVCpu->iem.s.uRexB;
2402 }
2403 else
2404 {
2405 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
2406 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2407 idxReg2 |= pVCpu->iem.s.uRexReg;
2408 }
2409 ExitInstrInfo.All.u2Scaling = uScale;
2410 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
2411 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2412 ExitInstrInfo.All.fIsRegOperand = 0;
2413 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2414 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
2415 ExitInstrInfo.All.iIdxReg = iIdxReg;
2416 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
2417 ExitInstrInfo.All.iBaseReg = iBaseReg;
2418 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
2419 ExitInstrInfo.All.iReg2 = idxReg2;
2420 }
2421
2422 /*
2423 * Handle exceptions to the norm for certain instructions.
2424 * (e.g. some instructions convey an instruction identity in place of iReg2).
2425 */
2426 switch (uExitReason)
2427 {
2428 case VMX_EXIT_GDTR_IDTR_ACCESS:
2429 {
2430 Assert(VMXINSTRID_IS_VALID(uInstrId));
2431 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2432 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2433 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
2434 break;
2435 }
2436
2437 case VMX_EXIT_LDTR_TR_ACCESS:
2438 {
2439 Assert(VMXINSTRID_IS_VALID(uInstrId));
2440 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2441 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2442 ExitInstrInfo.LdtTr.u2Undef0 = 0;
2443 break;
2444 }
2445
2446 case VMX_EXIT_RDRAND:
2447 case VMX_EXIT_RDSEED:
2448 {
2449 Assert(VMXINSTRID_IS_VALID(uInstrId));
2450 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2451 Assert(GCPtrDisp == 0);
2452 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
2453 ExitInstrInfo.RdrandRdseed.u3Undef0 = 0;
2454 ExitInstrInfo.RdrandRdseed.u4Undef0 = 0;
2455 ExitInstrInfo.RdrandRdseed.u19Undef0 = 0;
2456 break;
2457 }
2458 }
2459
2460 /* Update displacement and return the constructed VM-exit instruction information field. */
2461 if (pGCPtrDisp)
2462 *pGCPtrDisp = GCPtrDisp;
2463
2464 return ExitInstrInfo.u;
2465}
2466
2467
2468/**
2469 * VMX VM-exit handler.
2470 *
2471 * @returns Strict VBox status code.
2472 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2473 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2474 * triple-fault.
2475 *
2476 * @param pVCpu The cross context virtual CPU structure.
2477 * @param uExitReason The VM-exit reason.
2478 * @param u64ExitQual The Exit qualification.
2479 *
2480 * @remarks We need not necessarily have completed VM-entry before a VM-exit is
2481 * called. Failures during VM-entry can cause VM-exits as well, so we
2482 * -cannot- assert we're in VMX non-root mode here.
2483 */
2484VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT
2485{
2486# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
2487 RT_NOREF3(pVCpu, uExitReason, u64ExitQual);
2488 AssertMsgFailed(("VM-exit should only be invoked from ring-3 when nested-guest executes only in ring-3!\n"));
2489 return VERR_IEM_IPE_7;
2490# else
2491 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2492
2493 /* Just count this as an exit and be done with that. */
2494 pVCpu->iem.s.cPotentialExits++;
2495
2496 /*
2497 * Import all the guest-CPU state.
2498 *
2499 * HM on returning to guest execution would have to reset up a whole lot of state
2500 * anyway, (e.g., VM-entry/VM-exit controls) and we do not ever import a part of
2501 * the state and flag reloading the entire state on re-entry. So import the entire
2502 * state here, see HMNotifyVmxNstGstVmexit() for more comments.
2503 */
2504 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL);
2505
2506 /*
2507 * Ensure VM-entry interruption information valid bit is cleared.
2508 *
2509 * We do it here on every VM-exit so that even premature VM-exits (e.g. those caused
2510 * by invalid-guest state or machine-check exceptions) also clear this bit.
2511 *
2512 * See Intel spec. 27.2 "Recording VM-exit Information And Updating VM-entry control fields".
2513 */
2514 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
2515 pVmcs->u32EntryIntInfo &= ~VMX_ENTRY_INT_INFO_VALID;
2516
2517 /*
2518 * Update the VM-exit reason and Exit qualification.
2519 * Other VMCS read-only data fields are expected to be updated by the caller already.
2520 */
2521 pVmcs->u32RoExitReason = uExitReason;
2522 pVmcs->u64RoExitQual.u = u64ExitQual;
2523
2524 Log2(("vmexit: reason=%u qual=%#RX64 cs:rip=%04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 eflags=%#RX32\n", uExitReason,
2525 pVmcs->u64RoExitQual.u, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0,
2526 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.eflags.u));
2527
2528 /*
2529 * Update the IDT-vectoring information fields if the VM-exit is triggered during delivery of an event.
2530 * See Intel spec. 27.2.4 "Information for VM Exits During Event Delivery".
2531 */
2532 {
2533 uint8_t uVector;
2534 uint32_t fFlags;
2535 uint32_t uErrCode;
2536 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, &uVector, &fFlags, &uErrCode, NULL /* puCr2 */);
2537 if (fInEventDelivery)
2538 {
2539 /*
2540 * A VM-exit is not considered to occur during event delivery when the VM-exit is
2541 * caused by a triple-fault or the original event results in a double-fault that
2542 * causes the VM exit directly (exception bitmap). Therefore, we must not set the
2543 * original event information into the IDT-vectoring information fields.
2544 *
2545 * See Intel spec. 27.2.4 "Information for VM Exits During Event Delivery".
2546 */
2547 if ( uExitReason != VMX_EXIT_TRIPLE_FAULT
2548 && ( uExitReason != VMX_EXIT_XCPT_OR_NMI
2549 || !VMX_EXIT_INT_INFO_IS_XCPT_DF(pVmcs->u32RoExitIntInfo)))
2550 {
2551 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
2552 uint8_t const fErrCodeValid = RT_BOOL(fFlags & IEM_XCPT_FLAGS_ERR);
2553 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
2554 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
2555 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
2556 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
2557 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
2558 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
2559 Log2(("vmexit: idt_info=%#RX32 idt_err_code=%#RX32 cr2=%#RX64\n", uIdtVectoringInfo, uErrCode,
2560 pVCpu->cpum.GstCtx.cr2));
2561 }
2562 }
2563 }
2564
2565 /* The following VMCS fields should always be zero since we don't support injecting SMIs into a guest. */
2566 Assert(pVmcs->u64RoIoRcx.u == 0);
2567 Assert(pVmcs->u64RoIoRsi.u == 0);
2568 Assert(pVmcs->u64RoIoRdi.u == 0);
2569 Assert(pVmcs->u64RoIoRip.u == 0);
2570
2571 /*
2572 * Save the guest state back into the VMCS.
2573 * We only need to save the state when the VM-entry was successful.
2574 */
2575 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2576 if (!fVmentryFailed)
2577 {
2578 /* We should not cause an NMI-window/interrupt-window VM-exit when injecting events as part of VM-entry. */
2579 if (!CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx))
2580 {
2581 Assert(uExitReason != VMX_EXIT_NMI_WINDOW);
2582 Assert(uExitReason != VMX_EXIT_INT_WINDOW);
2583 }
2584
2585 /* For exception or NMI VM-exits, the VM-exit interruption info. field must be valid. */
2586 Assert(uExitReason != VMX_EXIT_XCPT_OR_NMI || VMX_EXIT_INT_INFO_IS_VALID(pVmcs->u32RoExitIntInfo));
2587
2588 /* For external interrupts that occur while "acknowledge interrupt on exit" VM-exit is set,
2589 the VM-exit interruption info. field must be valid. */
2590 Assert( uExitReason != VMX_EXIT_EXT_INT
2591 || !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
2592 || VMX_EXIT_INT_INFO_IS_VALID(pVmcs->u32RoExitIntInfo));
2593
2594 /*
2595 * If we support storing EFER.LMA into IA32e-mode guest field on VM-exit, we need to do that now.
2596 * See Intel spec. 27.2 "Recording VM-exit Information And Updating VM-entry Control".
2597 *
2598 * It is not clear from the Intel spec. if this is done only when VM-entry succeeds.
2599 * If a VM-exit happens before loading guest EFER, we risk restoring the host EFER.LMA
2600 * as guest-CPU state would not been modified. Hence for now, we do this only when
2601 * the VM-entry succeeded.
2602 */
2603 /** @todo r=ramshankar: Figure out if this bit gets set to host EFER.LMA on real
2604 * hardware when VM-exit fails during VM-entry (e.g. VERR_VMX_INVALID_GUEST_STATE). */
2605 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxExitSaveEferLma)
2606 {
2607 if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
2608 pVmcs->u32EntryCtls |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
2609 else
2610 pVmcs->u32EntryCtls &= ~VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
2611 }
2612
2613 /*
2614 * The rest of the high bits of the VM-exit reason are only relevant when the VM-exit
2615 * occurs in enclave mode/SMM which we don't support yet.
2616 *
2617 * If we ever add support for it, we can pass just the lower bits to the functions
2618 * below, till then an assert should suffice.
2619 */
2620 Assert(!RT_HI_U16(uExitReason));
2621
2622 /* Save the guest state into the VMCS and restore guest MSRs from the auto-store guest MSR area. */
2623 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2624 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2625 if (RT_SUCCESS(rc))
2626 { /* likely */ }
2627 else
2628 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2629
2630 /* Clear any saved NMI-blocking state so we don't assert on next VM-entry (if it was in effect on the previous one). */
2631 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit &= ~CPUMCTX_INHIBIT_NMI;
2632 }
2633 else
2634 {
2635 /* Restore the NMI-blocking state if VM-entry failed due to invalid guest state or while loading MSRs. */
2636 uint32_t const uExitReasonBasic = VMX_EXIT_REASON_BASIC(uExitReason);
2637 if ( uExitReasonBasic == VMX_EXIT_ERR_INVALID_GUEST_STATE
2638 || uExitReasonBasic == VMX_EXIT_ERR_MSR_LOAD)
2639 iemVmxVmexitRestoreNmiBlockingFF(pVCpu);
2640 }
2641
2642 /*
2643 * Stop any running VMX-preemption timer if necessary.
2644 */
2645 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
2646 CPUMStopGuestVmxPremptTimer(pVCpu);
2647
2648 /*
2649 * Clear the state of "NMI unblocked due to IRET" as otherwise we risk
2650 * reporting a stale state on a subsequent VM-exit. This state will be
2651 * re-established while emulating IRET in VMX non-root mode.
2652 */
2653 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = false;
2654
2655 /*
2656 * Clear any pending VMX nested-guest force-flags.
2657 * These force-flags have no effect on (outer) guest execution and will
2658 * be re-evaluated and setup on the next nested-guest VM-entry.
2659 */
2660 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_VMX_ALL_MASK);
2661
2662 /*
2663 * We're no longer in nested-guest execution mode.
2664 *
2665 * It is important to do this prior to loading the host state because
2666 * PGM looks at fInVmxNonRootMode to determine if it needs to perform
2667 * second-level address translation while switching to host CR3.
2668 */
2669 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2670
2671 /* Restore the host (outer guest) state. */
2672 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2673 if (RT_SUCCESS(rcStrict))
2674 {
2675 Assert(rcStrict == VINF_SUCCESS);
2676 rcStrict = VINF_VMX_VMEXIT;
2677 }
2678 else
2679 Log(("vmexit: Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2680
2681 /*
2682 * Restore non-zero Secondary-processor based VM-execution controls
2683 * when the "activate secondary controls" bit was not set.
2684 */
2685 if (pVmcs->u32RestoreProcCtls2)
2686 {
2687 Assert(!(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS));
2688 pVmcs->u32ProcCtls2 = pVmcs->u32RestoreProcCtls2;
2689 pVmcs->u32RestoreProcCtls2 = 0;
2690 }
2691
2692 if (VM_IS_HM_ENABLED(pVCpu->CTX_SUFF(pVM)))
2693 {
2694 /* Notify HM that the current VMCS fields have been modified. */
2695 HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
2696
2697 /* Notify HM that we've completed the VM-exit. */
2698 HMNotifyVmxNstGstVmexit(pVCpu);
2699 }
2700
2701# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2702 /* Revert any IEM-only nested-guest execution policy, otherwise return rcStrict. */
2703 Log(("vmexit: Disabling IEM-only EM execution policy!\n"));
2704 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
2705 if (rcSched != VINF_SUCCESS)
2706 iemSetPassUpStatus(pVCpu, rcSched);
2707# endif
2708 return rcStrict;
2709# endif
2710}
2711
2712
2713/**
2714 * VMX VM-exit handler for VM-exits due to instruction execution.
2715 *
2716 * This is intended for instructions where the caller provides all the relevant
2717 * VM-exit information.
2718 *
2719 * @returns Strict VBox status code.
2720 * @param pVCpu The cross context virtual CPU structure.
2721 * @param pExitInfo Pointer to the VM-exit information.
2722 */
2723static VBOXSTRICTRC iemVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
2724{
2725 /*
2726 * For instructions where any of the following fields are not applicable:
2727 * - Exit qualification must be cleared.
2728 * - VM-exit instruction info. is undefined.
2729 * - Guest-linear address is undefined.
2730 * - Guest-physical address is undefined.
2731 *
2732 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2733 * instruction execution. For VM-exits that are not due to instruction execution this
2734 * field is undefined.
2735 *
2736 * In our implementation in IEM, all undefined fields are generally cleared. However,
2737 * if the caller supplies information (from say the physical CPU directly) it is
2738 * then possible that the undefined fields are not cleared.
2739 *
2740 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2741 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2742 */
2743 Assert(pExitInfo);
2744 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2745 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2746 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2747
2748 /* Update all the relevant fields from the VM-exit instruction information struct. */
2749 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2750 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2751 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2752 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2753
2754 /* Perform the VM-exit. */
2755 return iemVmxVmexit(pVCpu, pExitInfo->uReason, pExitInfo->u64Qual);
2756}
2757
2758
2759/**
2760 * VMX VM-exit handler for VM-exits due to instruction execution.
2761 *
2762 * This is intended for instructions that only provide the VM-exit instruction
2763 * length.
2764 *
2765 * @param pVCpu The cross context virtual CPU structure.
2766 * @param uExitReason The VM-exit reason.
2767 * @param cbInstr The instruction length in bytes.
2768 */
2769VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT
2770{
2771#ifdef VBOX_STRICT
2772 /*
2773 * To prevent us from shooting ourselves in the foot.
2774 * The follow instructions should convey more than just the instruction length.
2775 */
2776 switch (uExitReason)
2777 {
2778 case VMX_EXIT_INVEPT:
2779 case VMX_EXIT_INVPCID:
2780 case VMX_EXIT_INVVPID:
2781 case VMX_EXIT_LDTR_TR_ACCESS:
2782 case VMX_EXIT_GDTR_IDTR_ACCESS:
2783 case VMX_EXIT_VMCLEAR:
2784 case VMX_EXIT_VMPTRLD:
2785 case VMX_EXIT_VMPTRST:
2786 case VMX_EXIT_VMREAD:
2787 case VMX_EXIT_VMWRITE:
2788 case VMX_EXIT_VMXON:
2789 case VMX_EXIT_XRSTORS:
2790 case VMX_EXIT_XSAVES:
2791 case VMX_EXIT_RDRAND:
2792 case VMX_EXIT_RDSEED:
2793 case VMX_EXIT_IO_INSTR:
2794 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2795 break;
2796 }
2797#endif
2798
2799 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_INSTR_LEN(uExitReason, cbInstr);
2800 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2801}
2802
2803
2804/**
2805 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
2806 *
2807 * @returns Strict VBox status code.
2808 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2809 * @thread EMT(pVCpu)
2810 */
2811VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
2812{
2813 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2814 Assert(!pVCpu->iem.s.cActiveMappings);
2815 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
2816}
2817
2818
2819/**
2820 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
2821 *
2822 * @returns Strict VBox status code.
2823 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2824 * @param uVector The SIPI vector.
2825 * @thread EMT(pVCpu)
2826 */
2827VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
2828{
2829 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
2830 Assert(!pVCpu->iem.s.cActiveMappings);
2831 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
2832}
2833
2834
2835/**
2836 * Interface for HM and EM to emulate a VM-exit.
2837 *
2838 * If a specialized version of a VM-exit handler exists, that must be used instead.
2839 *
2840 * @returns Strict VBox status code.
2841 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2842 * @param uExitReason The VM-exit reason.
2843 * @param u64ExitQual The Exit qualification.
2844 * @thread EMT(pVCpu)
2845 */
2846VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
2847{
2848 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
2849 Assert(!pVCpu->iem.s.cActiveMappings);
2850 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
2851}
2852
2853
2854/**
2855 * Interface for HM and EM to emulate a VM-exit due to an instruction.
2856 *
2857 * This is meant to be used for those instructions that VMX provides additional
2858 * decoding information beyond just the instruction length!
2859 *
2860 * @returns Strict VBox status code.
2861 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2862 * @param pExitInfo Pointer to the VM-exit information.
2863 * @thread EMT(pVCpu)
2864 */
2865VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
2866{
2867 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
2868 Assert(!pVCpu->iem.s.cActiveMappings);
2869 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
2870}
2871
2872
2873/**
2874 * Interface for HM and EM to emulate a VM-exit due to an instruction.
2875 *
2876 * This is meant to be used for those instructions that VMX provides only the
2877 * instruction length.
2878 *
2879 * @returns Strict VBox status code.
2880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2881 * @param pExitInfo Pointer to the VM-exit information.
2882 * @param cbInstr The instruction length in bytes.
2883 * @thread EMT(pVCpu)
2884 */
2885VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2886{
2887 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
2888 Assert(!pVCpu->iem.s.cActiveMappings);
2889 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
2890}
2891
2892
2893/**
2894 * VMX VM-exit handler for VM-exits due to instruction execution.
2895 *
2896 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2897 * instruction information and Exit qualification fields.
2898 *
2899 * @param pVCpu The cross context virtual CPU structure.
2900 * @param uExitReason The VM-exit reason.
2901 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2902 * @param cbInstr The instruction length in bytes.
2903 *
2904 * @remarks Do not use this for INS/OUTS instruction.
2905 */
2906VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT
2907{
2908#ifdef VBOX_STRICT
2909 /*
2910 * To prevent us from shooting ourselves in the foot.
2911 * The follow instructions convey specific info that require using their respective handlers.
2912 */
2913 switch (uExitReason)
2914 {
2915 case VMX_EXIT_INVEPT:
2916 case VMX_EXIT_INVPCID:
2917 case VMX_EXIT_INVVPID:
2918 case VMX_EXIT_LDTR_TR_ACCESS:
2919 case VMX_EXIT_GDTR_IDTR_ACCESS:
2920 case VMX_EXIT_VMCLEAR:
2921 case VMX_EXIT_VMPTRLD:
2922 case VMX_EXIT_VMPTRST:
2923 case VMX_EXIT_VMREAD:
2924 case VMX_EXIT_VMWRITE:
2925 case VMX_EXIT_VMXON:
2926 case VMX_EXIT_XRSTORS:
2927 case VMX_EXIT_XSAVES:
2928 case VMX_EXIT_RDRAND:
2929 case VMX_EXIT_RDSEED:
2930 break;
2931 default:
2932 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2933 break;
2934 }
2935#endif
2936
2937 /*
2938 * Update the Exit qualification field with displacement bytes.
2939 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2940 */
2941 /* Construct the VM-exit instruction information. */
2942 RTGCPTR GCPtrDisp;
2943 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2944
2945 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO(uExitReason, GCPtrDisp, uInstrInfo, cbInstr);
2946 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2947}
2948
2949
2950/**
2951 * VMX VM-exit handler for VM-exits due to INVLPG.
2952 *
2953 * @returns Strict VBox status code.
2954 * @param pVCpu The cross context virtual CPU structure.
2955 * @param GCPtrPage The guest-linear address of the page being invalidated.
2956 * @param cbInstr The instruction length in bytes.
2957 */
2958VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT
2959{
2960 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_INVLPG, GCPtrPage, cbInstr);
2961 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
2962 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2963}
2964
2965
2966/**
2967 * VMX VM-exit handler for VM-exits due to LMSW.
2968 *
2969 * @returns Strict VBox status code.
2970 * @param pVCpu The cross context virtual CPU structure.
2971 * @param uGuestCr0 The current guest CR0.
2972 * @param pu16NewMsw The machine-status word specified in LMSW's source
2973 * operand. This will be updated depending on the VMX
2974 * guest/host CR0 mask if LMSW is not intercepted.
2975 * @param GCPtrEffDst The guest-linear address of the source operand in case
2976 * of a memory operand. For register operand, pass
2977 * NIL_RTGCPTR.
2978 * @param cbInstr The instruction length in bytes.
2979 */
2980VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
2981 RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT
2982{
2983 Assert(pu16NewMsw);
2984
2985 uint16_t const uNewMsw = *pu16NewMsw;
2986 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
2987 {
2988 Log2(("lmsw: Guest intercept -> VM-exit\n"));
2989 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
2990 VMXVEXITINFO ExitInfo
2991 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_CRX,
2992 RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
2993 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
2994 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, uNewMsw)
2995 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW),
2996 cbInstr);
2997 if (fMemOperand)
2998 {
2999 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
3000 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
3001 }
3002 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3003 }
3004
3005 /*
3006 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
3007 * CR0 guest/host mask must be left unmodified.
3008 *
3009 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3010 */
3011 uint32_t const fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
3012 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3013 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (uNewMsw & ~fGstHostLmswMask);
3014
3015 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3016}
3017
3018
3019/**
3020 * VMX VM-exit handler for VM-exits due to CLTS.
3021 *
3022 * @returns Strict VBox status code.
3023 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
3024 * VM-exit but must not modify the guest CR0.TS bit.
3025 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
3026 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
3027 * CR0 fixed bits in VMX operation).
3028 * @param pVCpu The cross context virtual CPU structure.
3029 * @param cbInstr The instruction length in bytes.
3030 */
3031VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
3032{
3033 /*
3034 * If CR0.TS is owned by the host:
3035 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
3036 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
3037 * CLTS instruction completes without clearing CR0.TS.
3038 *
3039 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3040 */
3041 uint32_t const fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
3042 if (fGstHostMask & X86_CR0_TS)
3043 {
3044 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u & X86_CR0_TS)
3045 {
3046 Log2(("clts: Guest intercept -> VM-exit\n"));
3047 VMXVEXITINFO const ExitInfo
3048 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_CRX,
3049 RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3050 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS,
3051 VMX_EXIT_QUAL_CRX_ACCESS_CLTS),
3052 cbInstr);
3053 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3054 }
3055 return VINF_VMX_MODIFIES_BEHAVIOR;
3056 }
3057
3058 /*
3059 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
3060 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
3061 */
3062 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3063}
3064
3065
3066/**
3067 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
3068 * (CR0/CR4 write).
3069 *
3070 * @returns Strict VBox status code.
3071 * @param pVCpu The cross context virtual CPU structure.
3072 * @param iCrReg The control register (either CR0 or CR4).
3073 * @param uGuestCrX The current guest CR0/CR4.
3074 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated if no
3075 * VM-exit is caused.
3076 * @param iGReg The general register from which the CR0/CR4 value is being
3077 * loaded.
3078 * @param cbInstr The instruction length in bytes.
3079 */
3080VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX,
3081 uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT
3082{
3083 Assert(puNewCrX);
3084 Assert(iCrReg == 0 || iCrReg == 4);
3085 Assert(iGReg < X86_GREG_COUNT);
3086
3087 uint64_t const uNewCrX = *puNewCrX;
3088 if (CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX))
3089 {
3090 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
3091 VMXVEXITINFO const ExitInfo
3092 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_CRX,
3093 RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
3094 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg)
3095 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE),
3096 cbInstr);
3097 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3098 }
3099
3100 /*
3101 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
3102 * must not be modified the instruction.
3103 *
3104 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3105 */
3106 uint64_t uGuestCrX;
3107 uint64_t fGstHostMask;
3108 if (iCrReg == 0)
3109 {
3110 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3111 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
3112 fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
3113 }
3114 else
3115 {
3116 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3117 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
3118 fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u;
3119 }
3120
3121 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
3122 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3123}
3124
3125
3126/**
3127 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
3128 *
3129 * @returns VBox strict status code.
3130 * @param pVCpu The cross context virtual CPU structure.
3131 * @param iGReg The general register to which the CR3 value is being stored.
3132 * @param cbInstr The instruction length in bytes.
3133 */
3134VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT
3135{
3136 Assert(iGReg < X86_GREG_COUNT);
3137 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3138
3139 /*
3140 * If the CR3-store exiting control is set, we must cause a VM-exit.
3141 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3142 */
3143 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3144 {
3145 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3146 VMXVEXITINFO const ExitInfo
3147 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_CRX,
3148 RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3149 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg)
3150 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ),
3151 cbInstr);
3152 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3153 }
3154 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3155}
3156
3157
3158/**
3159 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3160 *
3161 * @returns VBox strict status code.
3162 * @param pVCpu The cross context virtual CPU structure.
3163 * @param uNewCr3 The new CR3 value.
3164 * @param iGReg The general register from which the CR3 value is being
3165 * loaded.
3166 * @param cbInstr The instruction length in bytes.
3167 */
3168VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT
3169{
3170 Assert(iGReg < X86_GREG_COUNT);
3171
3172 /*
3173 * If the CR3-load exiting control is set and the new CR3 value does not
3174 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3175 *
3176 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3177 */
3178 if (CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCr3))
3179 {
3180 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3181 VMXVEXITINFO const ExitInfo
3182 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_CRX,
3183 RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3184 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg)
3185 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS,
3186 VMX_EXIT_QUAL_CRX_ACCESS_WRITE),
3187 cbInstr);
3188 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3189 }
3190 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3191}
3192
3193
3194/**
3195 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3196 *
3197 * @returns VBox strict status code.
3198 * @param pVCpu The cross context virtual CPU structure.
3199 * @param iGReg The general register to which the CR8 value is being stored.
3200 * @param cbInstr The instruction length in bytes.
3201 */
3202VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT
3203{
3204 Assert(iGReg < X86_GREG_COUNT);
3205
3206 /*
3207 * If the CR8-store exiting control is set, we must cause a VM-exit.
3208 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3209 */
3210 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3211 {
3212 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3213 VMXVEXITINFO const ExitInfo
3214 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_CRX,
3215 RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3216 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg)
3217 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ),
3218 cbInstr);
3219 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3220 }
3221 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3222}
3223
3224
3225/**
3226 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3227 *
3228 * @returns VBox strict status code.
3229 * @param pVCpu The cross context virtual CPU structure.
3230 * @param iGReg The general register from which the CR8 value is being
3231 * loaded.
3232 * @param cbInstr The instruction length in bytes.
3233 */
3234VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT
3235{
3236 Assert(iGReg < X86_GREG_COUNT);
3237
3238 /*
3239 * If the CR8-load exiting control is set, we must cause a VM-exit.
3240 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3241 */
3242 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3243 {
3244 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3245 VMXVEXITINFO const ExitInfo
3246 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_CRX,
3247 RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3248 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg)
3249 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE),
3250 cbInstr);
3251 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3252 }
3253 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3254}
3255
3256
3257/**
3258 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3259 * GReg,DRx' (DRx read).
3260 *
3261 * @returns VBox strict status code.
3262 * @param pVCpu The cross context virtual CPU structure.
3263 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3264 * VMXINSTRID_MOV_FROM_DRX).
3265 * @param iDrReg The debug register being accessed.
3266 * @param iGReg The general register to/from which the DRx value is being
3267 * store/loaded.
3268 * @param cbInstr The instruction length in bytes.
3269 */
3270VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg,
3271 uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT
3272{
3273 Assert(iDrReg <= 7);
3274 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3275 Assert(iGReg < X86_GREG_COUNT);
3276
3277 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3278 {
3279 VMXVEXITINFO const ExitInfo
3280 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MOV_DRX,
3281 RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3282 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg)
3283 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION,
3284 uInstrId == VMXINSTRID_MOV_TO_DRX
3285 ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3286 : VMX_EXIT_QUAL_DRX_DIRECTION_READ),
3287 cbInstr);
3288 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3289 }
3290
3291 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3292}
3293
3294
3295/**
3296 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3297 *
3298 * @returns VBox strict status code.
3299 * @param pVCpu The cross context virtual CPU structure.
3300 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3301 * VMXINSTRID_IO_OUT).
3302 * @param u16Port The I/O port being accessed.
3303 * @param fImm Whether the I/O port was encoded using an immediate operand
3304 * or the implicit DX register.
3305 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3306 * @param cbInstr The instruction length in bytes.
3307 */
3308VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
3309 bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT
3310{
3311 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3312 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3313
3314 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, u16Port, cbAccess))
3315 {
3316 VMXVEXITINFO const ExitInfo
3317 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_IO_INSTR,
3318 RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3319 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3320 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port)
3321 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION,
3322 uInstrId == VMXINSTRID_IO_IN
3323 ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3324 : VMX_EXIT_QUAL_IO_DIRECTION_OUT),
3325 cbInstr);
3326 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3327 }
3328 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3329}
3330
3331
3332/**
3333 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3334 *
3335 * @returns VBox strict status code.
3336 * @param pVCpu The cross context virtual CPU structure.
3337 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3338 * VMXINSTRID_IO_OUTS).
3339 * @param u16Port The I/O port being accessed.
3340 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3341 * @param fRep Whether the instruction has a REP prefix or not.
3342 * @param ExitInstrInfo The VM-exit instruction info. field.
3343 * @param cbInstr The instruction length in bytes.
3344 */
3345VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
3346 bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT
3347{
3348 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3349 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3350 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3351 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3352 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3353
3354 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, u16Port, cbAccess))
3355 {
3356 /*
3357 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3358 */
3359 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3360 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3361 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3362 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3363 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3364
3365 uint32_t uDirection;
3366 uint64_t uGuestLinearAddr;
3367 if (uInstrId == VMXINSTRID_IO_INS)
3368 {
3369 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3370 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3371 }
3372 else
3373 {
3374 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3375 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3376 }
3377
3378 /*
3379 * If the segment is unusable, the guest-linear address in undefined.
3380 * We shall clear it for consistency.
3381 *
3382 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3383 */
3384 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3385 uGuestLinearAddr = 0;
3386
3387 VMXVEXITINFO const ExitInfo
3388 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR(VMX_EXIT_IO_INSTR,
3389 RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3390 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3391 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3392 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3393 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING,
3394 VMX_EXIT_QUAL_IO_ENCODING_DX)
3395 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port),
3396 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxInsOutInfo
3397 ? ExitInstrInfo.u : 0,
3398 cbInstr,
3399 uGuestLinearAddr);
3400 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3401 }
3402
3403 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3404}
3405
3406
3407/**
3408 * VMX VM-exit handler for VM-exits due to MWAIT.
3409 *
3410 * @returns VBox strict status code.
3411 * @param pVCpu The cross context virtual CPU structure.
3412 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3413 * @param cbInstr The instruction length in bytes.
3414 */
3415VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT
3416{
3417 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_MWAIT, fMonitorHwArmed, cbInstr);
3418 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3419}
3420
3421
3422/**
3423 * VMX VM-exit handler for VM-exits due to PAUSE.
3424 *
3425 * @returns VBox strict status code.
3426 * @param pVCpu The cross context virtual CPU structure.
3427 * @param cbInstr The instruction length in bytes.
3428 */
3429static VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
3430{
3431 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3432
3433 /*
3434 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3435 * "PAUSE-loop exiting" control.
3436 *
3437 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3438 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3439 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3440 * a VM-exit.
3441 *
3442 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3443 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3444 */
3445 bool fIntercept = false;
3446 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3447 fIntercept = true;
3448 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3449 && IEM_GET_CPL(pVCpu) == 0)
3450 {
3451 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3452
3453 /*
3454 * A previous-PAUSE-tick value of 0 is used to identify the first time
3455 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3456 * consider this to be the first execution of PAUSE in a loop according
3457 * to the Intel.
3458 *
3459 * All subsequent records for the previous-PAUSE-tick we ensure that it
3460 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3461 */
3462 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3463 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3464 uint64_t const uTick = TMCpuTickGet(pVCpu);
3465 uint32_t const uPleGap = pVmcs->u32PleGap;
3466 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3467 if ( *puPrevPauseTick == 0
3468 || uTick - *puPrevPauseTick > uPleGap)
3469 *puFirstPauseLoopTick = uTick;
3470 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3471 fIntercept = true;
3472
3473 *puPrevPauseTick = uTick | 1;
3474 }
3475
3476 if (fIntercept)
3477 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_PAUSE, cbInstr);
3478
3479 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3480}
3481
3482
3483/**
3484 * VMX VM-exit handler for VM-exits due to task switches.
3485 *
3486 * @returns VBox strict status code.
3487 * @param pVCpu The cross context virtual CPU structure.
3488 * @param enmTaskSwitch The cause of the task switch.
3489 * @param SelNewTss The selector of the new TSS.
3490 * @param cbInstr The instruction length in bytes.
3491 */
3492VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT
3493{
3494 /*
3495 * Task-switch VM-exits are unconditional and provide the Exit qualification.
3496 *
3497 * If the cause of the task switch is due to execution of CALL, IRET or the JMP
3498 * instruction or delivery of the exception generated by one of these instructions
3499 * lead to a task switch through a task gate in the IDT, we need to provide the
3500 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3501 * leaves the VM-exit instruction length field undefined.
3502 *
3503 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3504 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3505 */
3506 Assert(cbInstr <= 15);
3507
3508 uint8_t uType;
3509 switch (enmTaskSwitch)
3510 {
3511 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3512 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3513 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3514 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3515 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3516 }
3517
3518 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3519 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3520 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3521 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, u64ExitQual);
3522}
3523
3524
3525/**
3526 * VMX VM-exit handler for trap-like VM-exits.
3527 *
3528 * @returns VBox strict status code.
3529 * @param pVCpu The cross context virtual CPU structure.
3530 * @param pExitInfo Pointer to the VM-exit information.
3531 * @param pExitEventInfo Pointer to the VM-exit event information.
3532 */
3533static VBOXSTRICTRC iemVmxVmexitTrapLikeWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
3534{
3535 Assert(VMXIsVmexitTrapLike(pExitInfo->uReason));
3536 iemVmxVmcsSetGuestPendingDbgXcpts(pVCpu, pExitInfo->u64GuestPendingDbgXcpts);
3537 return iemVmxVmexit(pVCpu, pExitInfo->uReason, pExitInfo->u64Qual);
3538}
3539
3540
3541/**
3542 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
3543 * Virtualized-EOI, TPR-below threshold).
3544 *
3545 * @returns Strict VBox status code.
3546 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3547 * @param pExitInfo Pointer to the VM-exit information.
3548 * @thread EMT(pVCpu)
3549 */
3550VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
3551{
3552 Assert(pExitInfo);
3553 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
3554 Assert(!pVCpu->iem.s.cActiveMappings);
3555 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
3556}
3557
3558
3559/**
3560 * VMX VM-exit handler for VM-exits due to task switches.
3561 *
3562 * This is intended for task switches where the caller provides all the relevant
3563 * VM-exit information.
3564 *
3565 * @returns VBox strict status code.
3566 * @param pVCpu The cross context virtual CPU structure.
3567 * @param pExitInfo Pointer to the VM-exit information.
3568 * @param pExitEventInfo Pointer to the VM-exit event information.
3569 */
3570static VBOXSTRICTRC iemVmxVmexitTaskSwitchWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
3571 PCVMXVEXITEVENTINFO pExitEventInfo) RT_NOEXCEPT
3572{
3573 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
3574 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
3575 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3576 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3577 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, pExitInfo->u64Qual);
3578}
3579
3580
3581/**
3582 * Interface for HM and EM to emulate a VM-exit due to a task switch.
3583 *
3584 * @returns Strict VBox status code.
3585 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3586 * @param pExitInfo Pointer to the VM-exit information.
3587 * @param pExitEventInfo Pointer to the VM-exit event information.
3588 * @thread EMT(pVCpu)
3589 */
3590VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
3591{
3592 Assert(pExitInfo);
3593 Assert(pExitEventInfo);
3594 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
3595 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
3596 Assert(!pVCpu->iem.s.cActiveMappings);
3597 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
3598}
3599
3600
3601/**
3602 * VMX VM-exit handler for VM-exits due to expiring of the preemption timer.
3603 *
3604 * @returns VBox strict status code.
3605 * @param pVCpu The cross context virtual CPU structure.
3606 */
3607VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT
3608{
3609 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
3610 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
3611
3612 /* Import the hardware virtualization state (for nested-guest VM-entry TSC-tick). */
3613 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3614
3615 /* Save the VMX-preemption timer value (of 0) back in to the VMCS if the CPU supports this feature. */
3616 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
3617 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer = 0;
3618
3619 /* Cause the VMX-preemption timer VM-exit. The Exit qualification MBZ. */
3620 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER, 0 /* u64ExitQual */);
3621}
3622
3623
3624/**
3625 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
3626 *
3627 * @returns Strict VBox status code.
3628 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3629 * @thread EMT(pVCpu)
3630 */
3631VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
3632{
3633 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
3634 Assert(!pVCpu->iem.s.cActiveMappings);
3635 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
3636}
3637
3638
3639/**
3640 * VMX VM-exit handler for VM-exits due to external interrupts.
3641 *
3642 * @returns VBox strict status code.
3643 * @param pVCpu The cross context virtual CPU structure.
3644 * @param uVector The external interrupt vector (pass 0 if the interrupt
3645 * is still pending since we typically won't know the
3646 * vector).
3647 * @param fIntPending Whether the external interrupt is pending or
3648 * acknowledged in the interrupt controller.
3649 */
3650static VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending) RT_NOEXCEPT
3651{
3652 Assert(!fIntPending || uVector == 0);
3653
3654 /* The VM-exit is subject to "External interrupt exiting" being set. */
3655 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3656 {
3657 if (fIntPending)
3658 {
3659 /*
3660 * If the interrupt is pending and we don't need to acknowledge the
3661 * interrupt on VM-exit, cause the VM-exit immediately.
3662 *
3663 * See Intel spec 25.2 "Other Causes Of VM Exits".
3664 */
3665 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3666 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */);
3667
3668 /*
3669 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3670 * on VM-exit, postpone VM-exit till after the interrupt controller has been
3671 * acknowledged that the interrupt has been consumed. Callers would have to call
3672 * us again after getting the vector (and ofc, with fIntPending with false).
3673 */
3674 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3675 }
3676
3677 /*
3678 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3679 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3680 * all set, we need to record the vector of the external interrupt in the
3681 * VM-exit interruption information field. Otherwise, mark this field as invalid.
3682 *
3683 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3684 */
3685 uint32_t uExitIntInfo;
3686 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3687 {
3688 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3689 uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3690 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3691 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3692 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3693 }
3694 else
3695 uExitIntInfo = 0;
3696 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3697
3698 /*
3699 * Cause the VM-exit whether or not the vector has been stored
3700 * in the VM-exit interruption-information field.
3701 */
3702 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */);
3703 }
3704
3705 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3706}
3707
3708
3709/**
3710 * Interface for HM and EM to emulate VM-exit due to external interrupts.
3711 *
3712 * @returns Strict VBox status code.
3713 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3714 * @param uVector The external interrupt vector (pass 0 if the external
3715 * interrupt is still pending).
3716 * @param fIntPending Whether the external interrupt is pending or
3717 * acknowledged in the interrupt controller.
3718 * @thread EMT(pVCpu)
3719 */
3720VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
3721{
3722 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
3723 Assert(!pVCpu->iem.s.cActiveMappings);
3724 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
3725}
3726
3727
3728/**
3729 * VMX VM-exit handler for VM-exits due to a double fault caused during delivery of
3730 * an event.
3731 *
3732 * @returns VBox strict status code.
3733 * @param pVCpu The cross context virtual CPU structure.
3734 */
3735VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT
3736{
3737 uint32_t const fXcptBitmap = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
3738 if (fXcptBitmap & RT_BIT(X86_XCPT_DF))
3739 {
3740 /*
3741 * The NMI-unblocking due to IRET field need not be set for double faults.
3742 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
3743 */
3744 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_DF)
3745 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3746 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, 1)
3747 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, 0)
3748 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3749 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3750 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, 0 /* u64ExitQual */);
3751 }
3752
3753 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3754}
3755
3756
3757/**
3758 * VMX VM-exit handler for VM-exit due to delivery of an events.
3759 *
3760 * This is intended for VM-exit due to exceptions or NMIs where the caller provides
3761 * all the relevant VM-exit information.
3762 *
3763 * @returns VBox strict status code.
3764 * @param pVCpu The cross context virtual CPU structure.
3765 * @param pExitInfo Pointer to the VM-exit information.
3766 * @param pExitEventInfo Pointer to the VM-exit event information.
3767 */
3768static VBOXSTRICTRC iemVmxVmexitEventWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo) RT_NOEXCEPT
3769{
3770 Assert(pExitInfo);
3771 Assert(pExitEventInfo);
3772 Assert(pExitInfo->uReason == VMX_EXIT_XCPT_OR_NMI);
3773 Assert(VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
3774
3775 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
3776 iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo);
3777 iemVmxVmcsSetExitIntErrCode(pVCpu, pExitEventInfo->uExitIntErrCode);
3778 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3779 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3780 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, pExitInfo->u64Qual);
3781}
3782
3783
3784/**
3785 * Interface for HM and EM to emulate VM-exit due to NMIs.
3786 *
3787 * @returns Strict VBox status code.
3788 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3789 * @thread EMT(pVCpu)
3790 */
3791VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
3792{
3793 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_ONLY_REASON(VMX_EXIT_XCPT_OR_NMI);
3794 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_INT( RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
3795 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE,
3796 VMX_EXIT_INT_INFO_TYPE_NMI)
3797 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR,
3798 X86_XCPT_NMI),
3799 0);
3800 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
3801 Assert(!pVCpu->iem.s.cActiveMappings);
3802 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
3803}
3804
3805
3806/**
3807 * Interface for HM and EM to emulate VM-exit due to exceptions.
3808 *
3809 * Exception includes NMIs, software exceptions (those generated by INT3 or
3810 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
3811 *
3812 * @returns Strict VBox status code.
3813 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3814 * @param pExitInfo Pointer to the VM-exit information.
3815 * @param pExitEventInfo Pointer to the VM-exit event information.
3816 * @thread EMT(pVCpu)
3817 */
3818VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
3819{
3820 Assert(pExitInfo);
3821 Assert(pExitEventInfo);
3822 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
3823 Assert(!pVCpu->iem.s.cActiveMappings);
3824 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
3825}
3826
3827
3828/**
3829 * VMX VM-exit handler for VM-exits due to delivery of an event.
3830 *
3831 * @returns VBox strict status code.
3832 * @param pVCpu The cross context virtual CPU structure.
3833 * @param uVector The interrupt / exception vector.
3834 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3835 * @param uErrCode The error code associated with the event.
3836 * @param uCr2 The CR2 value in case of a \#PF exception.
3837 * @param cbInstr The instruction length in bytes.
3838 */
3839VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode,
3840 uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT
3841{
3842 /*
3843 * If the event is being injected as part of VM-entry, it is -not- subject to event
3844 * intercepts in the nested-guest. However, secondary exceptions that occur during
3845 * injection of any event -are- subject to event interception.
3846 *
3847 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3848 */
3849 if (!CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx))
3850 {
3851 /*
3852 * If the event is a virtual-NMI (which is an NMI being inject during VM-entry)
3853 * virtual-NMI blocking must be set in effect rather than physical NMI blocking.
3854 *
3855 * See Intel spec. 24.6.1 "Pin-Based VM-Execution Controls".
3856 */
3857 if ( uVector == X86_XCPT_NMI
3858 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3859 && (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3860 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true;
3861 else
3862 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking);
3863
3864 CPUMSetGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx, true);
3865 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3866 }
3867
3868 /*
3869 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3870 * If not, the caller will continue delivery of the external interrupt as it would
3871 * normally. The interrupt is no longer pending in the interrupt controller at this
3872 * point.
3873 */
3874 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3875 {
3876 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo));
3877 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3878 }
3879
3880 /*
3881 * Evaluate intercepts for hardware exceptions, software exceptions (#BP, #OF),
3882 * and privileged software exceptions (#DB generated by INT1/ICEBP) and software
3883 * interrupts.
3884 */
3885 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3886 bool fIntercept;
3887 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3888 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3889 fIntercept = CPUMIsGuestVmxXcptInterceptSet(&pVCpu->cpum.GstCtx, uVector, uErrCode);
3890 else
3891 {
3892 /* Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
3893 fIntercept = false;
3894 }
3895
3896 /*
3897 * Now that we've determined whether the event causes a VM-exit, we need to construct the
3898 * relevant VM-exit information and cause the VM-exit.
3899 */
3900 if (fIntercept)
3901 {
3902 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
3903
3904 /* Construct the rest of the event related information fields and cause the VM-exit. */
3905 uint64_t u64ExitQual;
3906 if (uVector == X86_XCPT_PF)
3907 {
3908 Assert(fFlags & IEM_XCPT_FLAGS_CR2);
3909 u64ExitQual = uCr2;
3910 }
3911 else if (uVector == X86_XCPT_DB)
3912 {
3913 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3914 u64ExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
3915 }
3916 else
3917 u64ExitQual = 0;
3918
3919 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3920 bool const fErrCodeValid = RT_BOOL(fFlags & IEM_XCPT_FLAGS_ERR);
3921 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
3922 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3923 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
3924 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
3925 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3926 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3927 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3928 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
3929
3930 /*
3931 * For VM-exits due to software exceptions (those generated by INT3 or INTO) or privileged
3932 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
3933 * length.
3934 */
3935 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3936 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3937 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3938 else
3939 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3940
3941 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, u64ExitQual);
3942 }
3943
3944 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3945}
3946
3947
3948/**
3949 * VMX VM-exit handler for EPT misconfiguration.
3950 *
3951 * @param pVCpu The cross context virtual CPU structure.
3952 * @param GCPhysAddr The physical address causing the EPT misconfiguration.
3953 * This need not be page aligned (e.g. nested-guest in real
3954 * mode).
3955 */
3956static VBOXSTRICTRC iemVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr) RT_NOEXCEPT
3957{
3958 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr);
3959 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_MISCONFIG, 0 /* u64ExitQual */);
3960}
3961
3962
3963/**
3964 * VMX VM-exit handler for EPT misconfiguration.
3965 *
3966 * This is intended for EPT misconfigurations where the caller provides all the
3967 * relevant VM-exit information.
3968 *
3969 * @param pVCpu The cross context virtual CPU structure.
3970 * @param GCPhysAddr The physical address causing the EPT misconfiguration.
3971 * This need not be page aligned (e.g. nested-guest in real
3972 * mode).
3973 * @param pExitEventInfo Pointer to the VM-exit event information.
3974 */
3975static VBOXSTRICTRC iemVmxVmexitEptMisconfigWithInfo(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo) RT_NOEXCEPT
3976{
3977 Assert(pExitEventInfo);
3978 Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
3979 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3980 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3981 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr);
3982 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_MISCONFIG, 0 /* u64ExitQual */);
3983}
3984
3985
3986/**
3987 * Interface for HM and EM to emulate a VM-exit due to an EPT misconfiguration.
3988 *
3989 * @returns Strict VBox status code.
3990 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3991 * @param GCPhysAddr The nested-guest physical address causing the EPT
3992 * misconfiguration.
3993 * @param pExitEventInfo Pointer to the VM-exit event information.
3994 * @thread EMT(pVCpu)
3995 */
3996VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
3997{
3998 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
3999
4000 iemInitExec(pVCpu, 0 /*fExecOpts*/);
4001 VBOXSTRICTRC rcStrict = iemVmxVmexitEptMisconfigWithInfo(pVCpu, GCPhysAddr, pExitEventInfo);
4002 Assert(!pVCpu->iem.s.cActiveMappings);
4003 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
4004}
4005
4006
4007/**
4008 * VMX VM-exit handler for EPT violation.
4009 *
4010 * @param pVCpu The cross context virtual CPU structure.
4011 * @param fAccess The access causing the EPT violation, IEM_ACCESS_XXX.
4012 * @param fSlatFail The SLAT failure info, IEM_SLAT_FAIL_XXX.
4013 * @param fEptAccess The EPT paging structure bits.
4014 * @param GCPhysAddr The physical address causing the EPT violation. This
4015 * need not be page aligned (e.g. nested-guest in real
4016 * mode).
4017 * @param fIsLinearAddrValid Whether translation of a linear address caused this
4018 * EPT violation. If @c false, GCPtrAddr must be 0.
4019 * @param GCPtrAddr The linear address causing the EPT violation.
4020 * @param cbInstr The VM-exit instruction length.
4021 */
4022static VBOXSTRICTRC iemVmxVmexitEptViolation(PVMCPUCC pVCpu, uint32_t fAccess, uint32_t fSlatFail,
4023 uint64_t fEptAccess, RTGCPHYS GCPhysAddr, bool fIsLinearAddrValid,
4024 uint64_t GCPtrAddr, uint8_t cbInstr) RT_NOEXCEPT
4025{
4026 /*
4027 * If the linear address isn't valid (can happen when loading PDPTEs
4028 * as part of MOV CR execution) the linear address field is undefined.
4029 * While we can leave it this way, it's preferable to zero it for consistency.
4030 */
4031 Assert(fIsLinearAddrValid || GCPtrAddr == 0);
4032
4033 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
4034 bool const fSupportsAccessDirty = RT_BOOL(fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY);
4035
4036 uint32_t const fDataRdMask = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_READ;
4037 uint32_t const fDataWrMask = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE;
4038 uint32_t const fInstrMask = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_EXEC;
4039 bool const fDataRead = ((fAccess & fDataRdMask) == IEM_ACCESS_DATA_R) | fSupportsAccessDirty;
4040 bool const fDataWrite = ((fAccess & fDataWrMask) == IEM_ACCESS_DATA_W) | fSupportsAccessDirty;
4041 bool const fInstrFetch = ((fAccess & fInstrMask) == IEM_ACCESS_INSTRUCTION);
4042 bool const fEptRead = RT_BOOL(fEptAccess & EPT_E_READ);
4043 bool const fEptWrite = RT_BOOL(fEptAccess & EPT_E_WRITE);
4044 bool const fEptExec = RT_BOOL(fEptAccess & EPT_E_EXECUTE);
4045 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
4046 bool const fIsLinearToPhysAddr = fIsLinearAddrValid & RT_BOOL(fSlatFail & IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR);
4047
4048 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_READ, fDataRead)
4049 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_WRITE, fDataWrite)
4050 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH, fInstrFetch)
4051 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_READ, fEptRead)
4052 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_WRITE, fEptWrite)
4053 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_EXECUTE, fEptExec)
4054 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_ADDR_VALID, fIsLinearAddrValid)
4055 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_TO_PHYS_ADDR, fIsLinearToPhysAddr)
4056 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_NMI_UNBLOCK_IRET, fNmiUnblocking);
4057
4058#ifdef VBOX_STRICT
4059 uint64_t const fMiscCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
4060 uint32_t const fProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2;
4061 Assert(!(fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ADVEXITINFO_EPT_VIOLATION)); /* Advanced VM-exit info. not supported */
4062 Assert(!(fCaps & MSR_IA32_VMX_EPT_VPID_CAP_SUPER_SHW_STACK)); /* Supervisor shadow stack control not supported. */
4063 Assert(!(RT_BF_GET(fMiscCaps, VMX_BF_MISC_INTEL_PT))); /* Intel PT not supported. */
4064 Assert(!(fProcCtls2 & VMX_PROC_CTLS2_MODE_BASED_EPT_PERM)); /* Mode-based execute control not supported. */
4065#endif
4066
4067 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr);
4068 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, GCPtrAddr);
4069 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
4070
4071 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_VIOLATION, u64ExitQual);
4072}
4073
4074
4075/**
4076 * VMX VM-exit handler for EPT violation.
4077 *
4078 * This is intended for EPT violations where the caller provides all the
4079 * relevant VM-exit information.
4080 *
4081 * @returns VBox strict status code.
4082 * @param pVCpu The cross context virtual CPU structure.
4083 * @param pExitInfo Pointer to the VM-exit information.
4084 * @param pExitEventInfo Pointer to the VM-exit event information.
4085 */
4086static VBOXSTRICTRC iemVmxVmexitEptViolationWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
4087 PCVMXVEXITEVENTINFO pExitEventInfo) RT_NOEXCEPT
4088{
4089 Assert(pExitInfo);
4090 Assert(pExitEventInfo);
4091 Assert(pExitInfo->uReason == VMX_EXIT_EPT_VIOLATION);
4092 Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
4093
4094 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
4095 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
4096
4097 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
4098 if (pExitInfo->u64Qual & VMX_BF_EXIT_QUAL_EPT_LINEAR_ADDR_VALID_MASK)
4099 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
4100 else
4101 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, 0);
4102 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
4103 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_VIOLATION, pExitInfo->u64Qual);
4104}
4105
4106
4107/**
4108 * Interface for HM and EM to emulate a VM-exit due to an EPT violation.
4109 *
4110 * @returns Strict VBox status code.
4111 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4112 * @param pExitInfo Pointer to the VM-exit information.
4113 * @param pExitEventInfo Pointer to the VM-exit event information.
4114 * @thread EMT(pVCpu)
4115 */
4116VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
4117 PCVMXVEXITEVENTINFO pExitEventInfo)
4118{
4119 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
4120
4121 iemInitExec(pVCpu, 0 /*fExecOpts*/);
4122 VBOXSTRICTRC rcStrict = iemVmxVmexitEptViolationWithInfo(pVCpu, pExitInfo, pExitEventInfo);
4123 Assert(!pVCpu->iem.s.cActiveMappings);
4124 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
4125}
4126
4127
4128/**
4129 * VMX VM-exit handler for EPT-induced VM-exits.
4130 *
4131 * @param pVCpu The cross context virtual CPU structure.
4132 * @param pWalk The page walk info.
4133 * @param fAccess The access causing the EPT event, IEM_ACCESS_XXX.
4134 * @param fSlatFail Additional SLAT info, IEM_SLAT_FAIL_XXX.
4135 * @param cbInstr The VM-exit instruction length if applicable. Pass 0 if not
4136 * applicable.
4137 */
4138VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT
4139{
4140 Assert(pWalk->fInfo & PGM_WALKINFO_IS_SLAT);
4141 Assert(pWalk->fFailed & PGM_WALKFAIL_EPT);
4142 Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEptXcptVe); /* #VE exceptions not supported. */
4143 Assert(!(pWalk->fFailed & PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE)); /* Without #VE, convertible violations not possible. */
4144
4145 if (pWalk->fFailed & PGM_WALKFAIL_EPT_VIOLATION)
4146 {
4147 LogFlow(("EptViolation: cs:rip=%04x:%08RX64 fAccess=%#RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fAccess));
4148 uint64_t const fEptAccess = (pWalk->fEffective & PGM_PTATTRS_EPT_MASK) >> PGM_PTATTRS_EPT_SHIFT;
4149 return iemVmxVmexitEptViolation(pVCpu, fAccess, fSlatFail, fEptAccess, pWalk->GCPhysNested,
4150 RT_BOOL(pWalk->fInfo & PGM_WALKINFO_IS_LINEAR_ADDR_VALID),
4151 pWalk->GCPtr, cbInstr);
4152 }
4153
4154 LogFlow(("EptMisconfig: cs:rip=%04x:%08RX64 fAccess=%#RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fAccess));
4155 Assert(pWalk->fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
4156 return iemVmxVmexitEptMisconfig(pVCpu, pWalk->GCPhysNested);
4157}
4158
4159
4160/**
4161 * VMX VM-exit handler for APIC accesses.
4162 *
4163 * @param pVCpu The cross context virtual CPU structure.
4164 * @param offAccess The offset of the register being accessed.
4165 * @param fAccess The type of access, see IEM_ACCESS_XXX.
4166 */
4167static VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPUCC pVCpu, uint16_t offAccess, uint32_t fAccess) RT_NOEXCEPT
4168{
4169 VMXAPICACCESS enmAccess;
4170 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
4171 if (fInEventDelivery)
4172 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
4173 else if ((fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == IEM_ACCESS_INSTRUCTION)
4174 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
4175 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
4176 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
4177 else
4178 enmAccess = VMXAPICACCESS_LINEAR_READ;
4179
4180 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
4181 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
4182 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, u64ExitQual);
4183}
4184
4185
4186/**
4187 * VMX VM-exit handler for APIC accesses.
4188 *
4189 * This is intended for APIC accesses where the caller provides all the
4190 * relevant VM-exit information.
4191 *
4192 * @returns VBox strict status code.
4193 * @param pVCpu The cross context virtual CPU structure.
4194 * @param pExitInfo Pointer to the VM-exit information.
4195 * @param pExitEventInfo Pointer to the VM-exit event information.
4196 */
4197static VBOXSTRICTRC iemVmxVmexitApicAccessWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
4198 PCVMXVEXITEVENTINFO pExitEventInfo) RT_NOEXCEPT
4199{
4200 /* VM-exit interruption information should not be valid for APIC-access VM-exits. */
4201 Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
4202 Assert(pExitInfo->uReason == VMX_EXIT_APIC_ACCESS);
4203 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
4204 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
4205 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
4206 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
4207 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
4208 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, pExitInfo->u64Qual);
4209}
4210
4211
4212/**
4213 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
4214 *
4215 * @returns Strict VBox status code.
4216 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
4217 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4218 *
4219 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4220 * @param pExitInfo Pointer to the VM-exit information.
4221 * @param pExitEventInfo Pointer to the VM-exit event information.
4222 * @thread EMT(pVCpu)
4223 */
4224VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
4225{
4226 Assert(pExitInfo);
4227 Assert(pExitEventInfo);
4228 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
4229 Assert(!pVCpu->iem.s.cActiveMappings);
4230 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
4231}
4232
4233
4234/**
4235 * VMX VM-exit handler for APIC-write VM-exits.
4236 *
4237 * @param pVCpu The cross context virtual CPU structure.
4238 * @param offApic The write to the virtual-APIC page offset that caused this
4239 * VM-exit.
4240 */
4241static VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
4242{
4243 Assert(offApic < XAPIC_OFF_END + 4);
4244 /* Write only bits 11:0 of the APIC offset into the Exit qualification field. */
4245 offApic &= UINT16_C(0xfff);
4246 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE, offApic);
4247}
4248
4249
4250/**
4251 * Clears any pending virtual-APIC write emulation.
4252 *
4253 * @returns The virtual-APIC offset that was written before clearing it.
4254 * @param pVCpu The cross context virtual CPU structure.
4255 */
4256DECLINLINE(uint16_t) iemVmxVirtApicClearPendingWrite(PVMCPUCC pVCpu)
4257{
4258 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
4259 uint16_t const offVirtApicWrite = pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite;
4260 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = 0;
4261 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
4262 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4263 return offVirtApicWrite;
4264}
4265
4266
4267/**
4268 * Reads a 32-bit register from the virtual-APIC page at the given offset.
4269 *
4270 * @returns The register from the virtual-APIC page.
4271 * @param pVCpu The cross context virtual CPU structure.
4272 * @param offReg The offset of the register being read.
4273 */
4274uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT
4275{
4276 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4277
4278 uint32_t uReg = 0;
4279 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4280 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
4281 AssertMsgStmt(RT_SUCCESS(rc),
4282 ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4283 sizeof(uReg), offReg, GCPhysVirtApic, rc),
4284 uReg = 0);
4285 return uReg;
4286}
4287
4288
4289/**
4290 * Reads a 64-bit register from the virtual-APIC page at the given offset.
4291 *
4292 * @returns The register from the virtual-APIC page.
4293 * @param pVCpu The cross context virtual CPU structure.
4294 * @param offReg The offset of the register being read.
4295 */
4296static uint64_t iemVmxVirtApicReadRaw64(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT
4297{
4298 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t));
4299
4300 uint64_t uReg = 0;
4301 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4302 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
4303 AssertMsgStmt(RT_SUCCESS(rc),
4304 ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4305 sizeof(uReg), offReg, GCPhysVirtApic, rc),
4306 uReg = 0);
4307 return uReg;
4308}
4309
4310
4311/**
4312 * Writes a 32-bit register to the virtual-APIC page at the given offset.
4313 *
4314 * @param pVCpu The cross context virtual CPU structure.
4315 * @param offReg The offset of the register being written.
4316 * @param uReg The register value to write.
4317 */
4318void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT
4319{
4320 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4321
4322 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4323 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
4324 AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4325 sizeof(uReg), offReg, GCPhysVirtApic, rc));
4326}
4327
4328
4329/**
4330 * Writes a 64-bit register to the virtual-APIC page at the given offset.
4331 *
4332 * @param pVCpu The cross context virtual CPU structure.
4333 * @param offReg The offset of the register being written.
4334 * @param uReg The register value to write.
4335 */
4336static void iemVmxVirtApicWriteRaw64(PVMCPUCC pVCpu, uint16_t offReg, uint64_t uReg) RT_NOEXCEPT
4337{
4338 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t));
4339
4340 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4341 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
4342 AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4343 sizeof(uReg), offReg, GCPhysVirtApic, rc));
4344}
4345
4346
4347/**
4348 * Sets the vector in a virtual-APIC 256-bit sparse register.
4349 *
4350 * @param pVCpu The cross context virtual CPU structure.
4351 * @param offReg The offset of the 256-bit spare register.
4352 * @param uVector The vector to set.
4353 *
4354 * @remarks This is based on our APIC device code.
4355 */
4356static void iemVmxVirtApicSetVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector) RT_NOEXCEPT
4357{
4358 /* Determine the vector offset within the chunk. */
4359 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4360
4361 /* Read the chunk at the offset. */
4362 uint32_t uReg;
4363 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4364 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
4365 if (RT_SUCCESS(rc))
4366 {
4367 /* Modify the chunk. */
4368 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4369 uReg |= RT_BIT(idxVectorBit);
4370
4371 /* Write the chunk. */
4372 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
4373 AssertMsgRC(rc, ("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4374 uVector, offReg, GCPhysVirtApic, rc));
4375 }
4376 else
4377 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4378 uVector, offReg, GCPhysVirtApic, rc));
4379}
4380
4381
4382/**
4383 * Clears the vector in a virtual-APIC 256-bit sparse register.
4384 *
4385 * @param pVCpu The cross context virtual CPU structure.
4386 * @param offReg The offset of the 256-bit spare register.
4387 * @param uVector The vector to clear.
4388 *
4389 * @remarks This is based on our APIC device code.
4390 */
4391static void iemVmxVirtApicClearVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector) RT_NOEXCEPT
4392{
4393 /* Determine the vector offset within the chunk. */
4394 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4395
4396 /* Read the chunk at the offset. */
4397 uint32_t uReg;
4398 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4399 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
4400 if (RT_SUCCESS(rc))
4401 {
4402 /* Modify the chunk. */
4403 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4404 uReg &= ~RT_BIT(idxVectorBit);
4405
4406 /* Write the chunk. */
4407 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
4408 AssertMsgRC(rc, ("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4409 uVector, offReg, GCPhysVirtApic, rc));
4410 }
4411 else
4412 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4413 uVector, offReg, GCPhysVirtApic, rc));
4414}
4415
4416
4417/**
4418 * Checks if a memory access to the APIC-access page must causes an APIC-access
4419 * VM-exit.
4420 *
4421 * @param pVCpu The cross context virtual CPU structure.
4422 * @param offAccess The offset of the register being accessed.
4423 * @param cbAccess The size of the access in bytes.
4424 * @param fAccess The type of access, see IEM_ACCESS_XXX.
4425 *
4426 * @remarks This must not be used for MSR-based APIC-access page accesses!
4427 * @sa iemVmxVirtApicAccessMsrWrite, iemVmxVirtApicAccessMsrRead.
4428 */
4429static bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT
4430{
4431 Assert(cbAccess > 0);
4432 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4433
4434 /*
4435 * We must cause a VM-exit if any of the following are true:
4436 * - TPR shadowing isn't active.
4437 * - The access size exceeds 32-bits.
4438 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4439 *
4440 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4441 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4442 */
4443 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4444 || cbAccess > sizeof(uint32_t)
4445 || ((offAccess + cbAccess - 1) & 0xc)
4446 || offAccess >= XAPIC_OFF_END + 4)
4447 return true;
4448
4449 /*
4450 * If the access is part of an operation where we have already
4451 * virtualized a virtual-APIC write, we must cause a VM-exit.
4452 */
4453 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4454 return true;
4455
4456 /*
4457 * Check write accesses to the APIC-access page that cause VM-exits.
4458 */
4459 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4460 {
4461 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4462 {
4463 /*
4464 * With APIC-register virtualization, a write access to any of the
4465 * following registers are virtualized. Accessing any other register
4466 * causes a VM-exit.
4467 */
4468 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4469 switch (offAlignedAccess)
4470 {
4471 case XAPIC_OFF_ID:
4472 case XAPIC_OFF_TPR:
4473 case XAPIC_OFF_EOI:
4474 case XAPIC_OFF_LDR:
4475 case XAPIC_OFF_DFR:
4476 case XAPIC_OFF_SVR:
4477 case XAPIC_OFF_ESR:
4478 case XAPIC_OFF_ICR_LO:
4479 case XAPIC_OFF_ICR_HI:
4480 case XAPIC_OFF_LVT_TIMER:
4481 case XAPIC_OFF_LVT_THERMAL:
4482 case XAPIC_OFF_LVT_PERF:
4483 case XAPIC_OFF_LVT_LINT0:
4484 case XAPIC_OFF_LVT_LINT1:
4485 case XAPIC_OFF_LVT_ERROR:
4486 case XAPIC_OFF_TIMER_ICR:
4487 case XAPIC_OFF_TIMER_DCR:
4488 break;
4489 default:
4490 return true;
4491 }
4492 }
4493 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4494 {
4495 /*
4496 * With virtual-interrupt delivery, a write access to any of the
4497 * following registers are virtualized. Accessing any other register
4498 * causes a VM-exit.
4499 *
4500 * Note! The specification does not allow writing to offsets in-between
4501 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4502 */
4503 switch (offAccess)
4504 {
4505 case XAPIC_OFF_TPR:
4506 case XAPIC_OFF_EOI:
4507 case XAPIC_OFF_ICR_LO:
4508 break;
4509 default:
4510 return true;
4511 }
4512 }
4513 else
4514 {
4515 /*
4516 * Without APIC-register virtualization or virtual-interrupt delivery,
4517 * only TPR accesses are virtualized.
4518 */
4519 if (offAccess == XAPIC_OFF_TPR)
4520 { /* likely */ }
4521 else
4522 return true;
4523 }
4524 }
4525 else
4526 {
4527 /*
4528 * Check read accesses to the APIC-access page that cause VM-exits.
4529 */
4530 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4531 {
4532 /*
4533 * With APIC-register virtualization, a read access to any of the
4534 * following registers are virtualized. Accessing any other register
4535 * causes a VM-exit.
4536 */
4537 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4538 switch (offAlignedAccess)
4539 {
4540 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4541 case XAPIC_OFF_ID:
4542 case XAPIC_OFF_VERSION:
4543 case XAPIC_OFF_TPR:
4544 case XAPIC_OFF_EOI:
4545 case XAPIC_OFF_LDR:
4546 case XAPIC_OFF_DFR:
4547 case XAPIC_OFF_SVR:
4548 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4549 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4550 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4551 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4552 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4553 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4554 case XAPIC_OFF_ESR:
4555 case XAPIC_OFF_ICR_LO:
4556 case XAPIC_OFF_ICR_HI:
4557 case XAPIC_OFF_LVT_TIMER:
4558 case XAPIC_OFF_LVT_THERMAL:
4559 case XAPIC_OFF_LVT_PERF:
4560 case XAPIC_OFF_LVT_LINT0:
4561 case XAPIC_OFF_LVT_LINT1:
4562 case XAPIC_OFF_LVT_ERROR:
4563 case XAPIC_OFF_TIMER_ICR:
4564 case XAPIC_OFF_TIMER_DCR:
4565 break;
4566 default:
4567 return true;
4568 }
4569 }
4570 else
4571 {
4572 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4573 if (offAccess == XAPIC_OFF_TPR)
4574 { /* likely */ }
4575 else
4576 return true;
4577 }
4578 }
4579
4580 /* The APIC access is virtualized, does not cause a VM-exit. */
4581 return false;
4582}
4583
4584
4585/**
4586 * Virtualizes a memory-based APIC access by certain instructions even though they
4587 * do not use the address to access memory.
4588 *
4589 * This is for instructions like MONITOR, CLFLUSH, CLFLUSHOPT, ENTER which may cause
4590 * page-faults but do not use the address to access memory.
4591 *
4592 * @param pVCpu The cross context virtual CPU structure.
4593 * @param pGCPhysAccess Pointer to the guest-physical address accessed.
4594 * @param cbAccess The size of the access in bytes.
4595 * @param fAccess The type of access, see IEM_ACCESS_XXX.
4596 */
4597VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT
4598{
4599 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4600 Assert(pGCPhysAccess);
4601
4602 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
4603 RTGCPHYS const GCPhysApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
4604 Assert(!(GCPhysApic & GUEST_PAGE_OFFSET_MASK));
4605
4606 if (GCPhysAccess == GCPhysApic)
4607 {
4608 uint16_t const offAccess = *pGCPhysAccess & GUEST_PAGE_OFFSET_MASK;
4609 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4610 if (fIntercept)
4611 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4612
4613 *pGCPhysAccess = GCPhysApic | offAccess;
4614 return VINF_VMX_MODIFIES_BEHAVIOR;
4615 }
4616
4617 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4618}
4619
4620
4621/**
4622 * Virtualizes a memory-based APIC access.
4623 *
4624 * @returns VBox strict status code.
4625 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the access was virtualized.
4626 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4627 *
4628 * @param pVCpu The cross context virtual CPU structure.
4629 * @param offAccess The offset of the register being accessed (within the
4630 * APIC-access page).
4631 * @param cbAccess The size of the access in bytes.
4632 * @param pvData Pointer to the data being written or where to store the data
4633 * being read.
4634 * @param fAccess The type of access, see IEM_ACCESS_XXX.
4635 */
4636static VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess,
4637 void *pvData, uint32_t fAccess) RT_NOEXCEPT
4638{
4639 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4640 Assert(pvData);
4641
4642 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4643 if (fIntercept)
4644 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4645
4646 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4647 {
4648 /*
4649 * A write access to the APIC-access page that is virtualized (rather than
4650 * causing a VM-exit) writes data to the virtual-APIC page.
4651 */
4652 uint32_t const u32Data = *(uint32_t *)pvData;
4653 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4654
4655 /*
4656 * Record the currently updated APIC offset, as we need this later for figuring
4657 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4658 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4659 *
4660 * After completion of the current operation, we need to perform TPR virtualization,
4661 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4662 *
4663 * The current operation may be a REP-prefixed string instruction, execution of any
4664 * other instruction, or delivery of an event through the IDT.
4665 *
4666 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4667 * performed now but later after completion of the current operation.
4668 *
4669 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4670 */
4671 iemVmxVirtApicSetPendingWrite(pVCpu, offAccess);
4672
4673 LogFlowFunc(("Write access at offset %#x not intercepted -> Wrote %#RX32\n", offAccess, u32Data));
4674 }
4675 else
4676 {
4677 /*
4678 * A read access from the APIC-access page that is virtualized (rather than
4679 * causing a VM-exit) returns data from the virtual-APIC page.
4680 *
4681 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4682 */
4683 Assert(fAccess & IEM_ACCESS_TYPE_READ);
4684
4685 Assert(cbAccess <= 4);
4686 Assert(offAccess < XAPIC_OFF_END + 4);
4687 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4688
4689 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4690 u32Data &= s_auAccessSizeMasks[cbAccess];
4691 *(uint32_t *)pvData = u32Data;
4692
4693 LogFlowFunc(("Read access at offset %#x not intercepted -> Read %#RX32\n", offAccess, u32Data));
4694 }
4695
4696 return VINF_VMX_MODIFIES_BEHAVIOR;
4697}
4698
4699
4700/**
4701 * Virtualizes an MSR-based APIC read access.
4702 *
4703 * @returns VBox strict status code.
4704 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4705 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4706 * handled by the x2APIC device.
4707 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4708 * not within the range of valid MSRs, caller must raise \#GP(0).
4709 * @param pVCpu The cross context virtual CPU structure.
4710 * @param idMsr The x2APIC MSR being read.
4711 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4712 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4713 */
4714static VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value) RT_NOEXCEPT
4715{
4716 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4717 Assert(pu64Value);
4718
4719 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4720 {
4721 if ( idMsr >= MSR_IA32_X2APIC_START
4722 && idMsr <= MSR_IA32_X2APIC_END)
4723 {
4724 uint16_t const offReg = (idMsr & 0xff) << 4;
4725 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4726 *pu64Value = u64Value;
4727 return VINF_VMX_MODIFIES_BEHAVIOR;
4728 }
4729 return VERR_OUT_OF_RANGE;
4730 }
4731
4732 if (idMsr == MSR_IA32_X2APIC_TPR)
4733 {
4734 uint16_t const offReg = (idMsr & 0xff) << 4;
4735 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4736 *pu64Value = u64Value;
4737 return VINF_VMX_MODIFIES_BEHAVIOR;
4738 }
4739
4740 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4741}
4742
4743
4744/**
4745 * Virtualizes an MSR-based APIC write access.
4746 *
4747 * @returns VBox strict status code.
4748 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4749 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4750 * not within the range of valid MSRs, caller must raise \#GP(0).
4751 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
4752 *
4753 * @param pVCpu The cross context virtual CPU structure.
4754 * @param idMsr The x2APIC MSR being written.
4755 * @param u64Value The value of the x2APIC MSR being written.
4756 */
4757static VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value) RT_NOEXCEPT
4758{
4759 /*
4760 * Check if the access is to be virtualized.
4761 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4762 */
4763 if ( idMsr == MSR_IA32_X2APIC_TPR
4764 || ( (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4765 && ( idMsr == MSR_IA32_X2APIC_EOI
4766 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4767 {
4768 /* Validate the MSR write depending on the register. */
4769 switch (idMsr)
4770 {
4771 case MSR_IA32_X2APIC_TPR:
4772 case MSR_IA32_X2APIC_SELF_IPI:
4773 {
4774 if (u64Value & UINT64_C(0xffffffffffffff00))
4775 return VERR_OUT_OF_RANGE;
4776 break;
4777 }
4778 case MSR_IA32_X2APIC_EOI:
4779 {
4780 if (u64Value != 0)
4781 return VERR_OUT_OF_RANGE;
4782 break;
4783 }
4784 }
4785
4786 /* Write the MSR to the virtual-APIC page. */
4787 uint16_t const offReg = (idMsr & 0xff) << 4;
4788 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4789
4790 /*
4791 * Record the currently updated APIC offset, as we need this later for figuring
4792 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4793 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4794 */
4795 iemVmxVirtApicSetPendingWrite(pVCpu, offReg);
4796
4797 return VINF_VMX_MODIFIES_BEHAVIOR;
4798 }
4799
4800 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4801}
4802
4803
4804/**
4805 * Interface for HM and EM to virtualize x2APIC MSR accesses.
4806 *
4807 * @returns Strict VBox status code.
4808 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
4809 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
4810 * the x2APIC device.
4811 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
4812 *
4813 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4814 * @param idMsr The MSR being read.
4815 * @param pu64Value Pointer to the value being written or where to store the
4816 * value being read.
4817 * @param fWrite Whether this is an MSR write or read access.
4818 * @thread EMT(pVCpu)
4819 */
4820VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
4821{
4822 Assert(pu64Value);
4823
4824 VBOXSTRICTRC rcStrict;
4825 if (fWrite)
4826 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
4827 else
4828 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
4829 Assert(!pVCpu->iem.s.cActiveMappings);
4830 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
4831
4832}
4833
4834
4835/**
4836 * Finds the most significant set bit in a virtual-APIC 256-bit sparse register.
4837 *
4838 * @returns VBox status code.
4839 * @retval VINF_SUCCESS when the highest set bit is found.
4840 * @retval VERR_NOT_FOUND when no bit is set.
4841 *
4842 * @param pVCpu The cross context virtual CPU structure.
4843 * @param offReg The offset of the APIC 256-bit sparse register.
4844 * @param pidxHighestBit Where to store the highest bit (most significant bit)
4845 * set in the register. Only valid when VINF_SUCCESS is
4846 * returned.
4847 *
4848 * @remarks The format of the 256-bit sparse register here mirrors that found in
4849 * real APIC hardware.
4850 */
4851static int iemVmxVirtApicGetHighestSetBitInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t *pidxHighestBit)
4852{
4853 Assert(offReg < XAPIC_OFF_END + 4);
4854 Assert(pidxHighestBit);
4855
4856 /*
4857 * There are 8 contiguous fragments (of 16-bytes each) in the sparse register.
4858 * However, in each fragment only the first 4 bytes are used.
4859 */
4860 uint8_t const cFrags = 8;
4861 for (int8_t iFrag = cFrags; iFrag >= 0; iFrag--)
4862 {
4863 uint16_t const offFrag = iFrag * 16;
4864 uint32_t const u32Frag = iemVmxVirtApicReadRaw32(pVCpu, offReg + offFrag);
4865 if (!u32Frag)
4866 continue;
4867
4868 unsigned idxHighestBit = ASMBitLastSetU32(u32Frag);
4869 Assert(idxHighestBit > 0);
4870 --idxHighestBit;
4871 Assert(idxHighestBit <= UINT8_MAX);
4872 *pidxHighestBit = idxHighestBit;
4873 return VINF_SUCCESS;
4874 }
4875 return VERR_NOT_FOUND;
4876}
4877
4878
4879/**
4880 * Evaluates pending virtual interrupts.
4881 *
4882 * @param pVCpu The cross context virtual CPU structure.
4883 */
4884static void iemVmxEvalPendingVirtIntrs(PVMCPUCC pVCpu) RT_NOEXCEPT
4885{
4886 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4887
4888 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4889 {
4890 uint8_t const uRvi = RT_LO_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus);
4891 uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR);
4892
4893 if ((uRvi >> 4) > (uPpr >> 4))
4894 {
4895 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signalling pending interrupt\n", uRvi, uPpr));
4896 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
4897 }
4898 else
4899 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Nothing to do\n", uRvi, uPpr));
4900 }
4901}
4902
4903
4904/**
4905 * Performs PPR virtualization.
4906 *
4907 * @returns VBox strict status code.
4908 * @param pVCpu The cross context virtual CPU structure.
4909 */
4910static void iemVmxPprVirtualization(PVMCPUCC pVCpu) RT_NOEXCEPT
4911{
4912 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4913 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4914
4915 /*
4916 * PPR virtualization is caused in response to a VM-entry, TPR-virtualization,
4917 * or EOI-virtualization.
4918 *
4919 * See Intel spec. 29.1.3 "PPR Virtualization".
4920 */
4921 uint8_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4922 uint8_t const uSvi = RT_HI_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus) & 0xf0;
4923
4924 uint32_t uPpr;
4925 if ((uTpr & 0xf0) >= uSvi)
4926 uPpr = uTpr;
4927 else
4928 uPpr = uSvi;
4929
4930 Log2(("ppr_virt: uTpr=%#x uSvi=%#x uPpr=%#x\n", uTpr, uSvi, uPpr));
4931 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uPpr);
4932}
4933
4934
4935/**
4936 * Performs VMX TPR virtualization.
4937 *
4938 * @returns VBox strict status code.
4939 * @param pVCpu The cross context virtual CPU structure.
4940 */
4941static VBOXSTRICTRC iemVmxTprVirtualization(PVMCPUCC pVCpu) RT_NOEXCEPT
4942{
4943 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4944
4945 /*
4946 * We should have already performed the virtual-APIC write to the TPR offset
4947 * in the virtual-APIC page. We now perform TPR virtualization.
4948 *
4949 * See Intel spec. 29.1.2 "TPR Virtualization".
4950 */
4951 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4952 {
4953 uint32_t const uTprThreshold = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32TprThreshold;
4954 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4955
4956 /*
4957 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4958 * See Intel spec. 29.1.2 "TPR Virtualization".
4959 */
4960 if (((uTpr >> 4) & 0xf) < uTprThreshold)
4961 {
4962 Log2(("tpr_virt: uTpr=%u uTprThreshold=%u -> VM-exit\n", uTpr, uTprThreshold));
4963 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD, 0 /* u64ExitQual */);
4964 }
4965 }
4966 else
4967 {
4968 iemVmxPprVirtualization(pVCpu);
4969 iemVmxEvalPendingVirtIntrs(pVCpu);
4970 }
4971
4972 return VINF_SUCCESS;
4973}
4974
4975
4976/**
4977 * Checks whether an EOI write for the given interrupt vector causes a VM-exit or
4978 * not.
4979 *
4980 * @returns @c true if the EOI write is intercepted, @c false otherwise.
4981 * @param pVCpu The cross context virtual CPU structure.
4982 * @param uVector The interrupt that was acknowledged using an EOI.
4983 */
4984static bool iemVmxIsEoiInterceptSet(PCVMCPU pVCpu, uint8_t uVector) RT_NOEXCEPT
4985{
4986 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4987 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4988
4989 if (uVector < 64)
4990 return RT_BOOL(pVmcs->u64EoiExitBitmap0.u & RT_BIT_64(uVector));
4991 if (uVector < 128)
4992 return RT_BOOL(pVmcs->u64EoiExitBitmap1.u & RT_BIT_64(uVector));
4993 if (uVector < 192)
4994 return RT_BOOL(pVmcs->u64EoiExitBitmap2.u & RT_BIT_64(uVector));
4995 return RT_BOOL(pVmcs->u64EoiExitBitmap3.u & RT_BIT_64(uVector));
4996}
4997
4998
4999/**
5000 * Performs EOI virtualization.
5001 *
5002 * @returns VBox strict status code.
5003 * @param pVCpu The cross context virtual CPU structure.
5004 */
5005static VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPUCC pVCpu) RT_NOEXCEPT
5006{
5007 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5008 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
5009
5010 /*
5011 * Clear the interrupt guest-interrupt as no longer in-service (ISR)
5012 * and get the next guest-interrupt that's in-service (if any).
5013 *
5014 * See Intel spec. 29.1.4 "EOI Virtualization".
5015 */
5016 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
5017 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
5018 Log2(("eoi_virt: uRvi=%#x uSvi=%#x\n", uRvi, uSvi));
5019
5020 uint8_t uVector = uSvi;
5021 iemVmxVirtApicClearVectorInReg(pVCpu, XAPIC_OFF_ISR0, uVector);
5022
5023 uVector = 0;
5024 iemVmxVirtApicGetHighestSetBitInReg(pVCpu, XAPIC_OFF_ISR0, &uVector);
5025
5026 if (uVector)
5027 Log2(("eoi_virt: next interrupt %#x\n", uVector));
5028 else
5029 Log2(("eoi_virt: no interrupt pending in ISR\n"));
5030
5031 /* Update guest-interrupt status SVI (leave RVI portion as it is) in the VMCS. */
5032 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uRvi, uVector);
5033
5034 iemVmxPprVirtualization(pVCpu);
5035 if (iemVmxIsEoiInterceptSet(pVCpu, uVector))
5036 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI, uVector);
5037 iemVmxEvalPendingVirtIntrs(pVCpu);
5038 return VINF_SUCCESS;
5039}
5040
5041
5042/**
5043 * Performs self-IPI virtualization.
5044 *
5045 * @returns VBox strict status code.
5046 * @param pVCpu The cross context virtual CPU structure.
5047 */
5048static VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPUCC pVCpu) RT_NOEXCEPT
5049{
5050 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5051 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
5052
5053 /*
5054 * We should have already performed the virtual-APIC write to the self-IPI offset
5055 * in the virtual-APIC page. We now perform self-IPI virtualization.
5056 *
5057 * See Intel spec. 29.1.5 "Self-IPI Virtualization".
5058 */
5059 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
5060 Log2(("self_ipi_virt: uVector=%#x\n", uVector));
5061 iemVmxVirtApicSetVectorInReg(pVCpu, XAPIC_OFF_IRR0, uVector);
5062 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
5063 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
5064 if (uVector > uRvi)
5065 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uVector, uSvi);
5066 iemVmxEvalPendingVirtIntrs(pVCpu);
5067 return VINF_SUCCESS;
5068}
5069
5070
5071/**
5072 * Performs VMX APIC-write emulation.
5073 *
5074 * @returns VBox strict status code.
5075 * @param pVCpu The cross context virtual CPU structure.
5076 */
5077VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT
5078{
5079 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5080
5081 /* Import the virtual-APIC write offset (part of the hardware-virtualization state). */
5082 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
5083
5084 /*
5085 * Perform APIC-write emulation based on the virtual-APIC register written.
5086 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
5087 */
5088 uint16_t const offApicWrite = iemVmxVirtApicClearPendingWrite(pVCpu);
5089 VBOXSTRICTRC rcStrict;
5090 switch (offApicWrite)
5091 {
5092 case XAPIC_OFF_TPR:
5093 {
5094 /* Clear bytes 3:1 of the VTPR and perform TPR virtualization. */
5095 uint32_t uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5096 uTpr &= UINT32_C(0x000000ff);
5097 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
5098 Log2(("iemVmxApicWriteEmulation: TPR write %#x\n", uTpr));
5099 rcStrict = iemVmxTprVirtualization(pVCpu);
5100 break;
5101 }
5102
5103 case XAPIC_OFF_EOI:
5104 {
5105 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
5106 {
5107 /* Clear VEOI and perform EOI virtualization. */
5108 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_EOI, 0);
5109 Log2(("iemVmxApicWriteEmulation: EOI write\n"));
5110 rcStrict = iemVmxEoiVirtualization(pVCpu);
5111 }
5112 else
5113 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
5114 break;
5115 }
5116
5117 case XAPIC_OFF_ICR_LO:
5118 {
5119 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
5120 {
5121 /* If the ICR_LO is valid, write it and perform self-IPI virtualization. */
5122 uint32_t const uIcrLo = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5123 uint32_t const fIcrLoMb0 = UINT32_C(0xfffbb700);
5124 uint32_t const fIcrLoMb1 = UINT32_C(0x000000f0);
5125 if ( !(uIcrLo & fIcrLoMb0)
5126 && (uIcrLo & fIcrLoMb1))
5127 {
5128 Log2(("iemVmxApicWriteEmulation: Self-IPI virtualization with vector %#x\n", (uIcrLo & 0xff)));
5129 rcStrict = iemVmxSelfIpiVirtualization(pVCpu);
5130 }
5131 else
5132 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
5133 }
5134 else
5135 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
5136 break;
5137 }
5138
5139 case XAPIC_OFF_ICR_HI:
5140 {
5141 /* Clear bytes 2:0 of VICR_HI. No other virtualization or VM-exit must occur. */
5142 uint32_t uIcrHi = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_HI);
5143 uIcrHi &= UINT32_C(0xff000000);
5144 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_ICR_HI, uIcrHi);
5145 rcStrict = VINF_SUCCESS;
5146 break;
5147 }
5148
5149 default:
5150 {
5151 /* Writes to any other virtual-APIC register causes an APIC-write VM-exit. */
5152 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
5153 break;
5154 }
5155 }
5156
5157 return rcStrict;
5158}
5159
5160
5161/**
5162 * Interface for HM and EM to perform an APIC-write emulation which may cause a
5163 * VM-exit.
5164 *
5165 * @returns Strict VBox status code.
5166 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
5167 * @thread EMT(pVCpu)
5168 */
5169VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
5170{
5171 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
5172 Assert(!pVCpu->iem.s.cActiveMappings);
5173 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
5174}
5175
5176
5177/**
5178 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
5179 *
5180 * @param pVCpu The cross context virtual CPU structure.
5181 * @param pszInstr The VMX instruction name (for logging purposes).
5182 */
5183DECLINLINE(int) iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPUCC pVCpu, const char *pszInstr)
5184{
5185 /*
5186 * Guest Control Registers, Debug Registers, and MSRs.
5187 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
5188 */
5189 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5190 const char * const pszFailure = "VM-exit";
5191 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
5192
5193 /* CR0 reserved bits. */
5194 {
5195 /* CR0 MB1 bits. */
5196 uint64_t const u64Cr0Fixed0 = iemVmxGetCr0Fixed0(pVCpu, true /* fVmxNonRootMode */);
5197 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
5198 { /* likely */ }
5199 else
5200 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
5201
5202 /* CR0 MBZ bits. */
5203 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5204 if (!(pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1))
5205 { /* likely */ }
5206 else
5207 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
5208
5209 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
5210 if ( !fUnrestrictedGuest
5211 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
5212 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5213 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
5214 }
5215
5216 /* CR4 reserved bits. */
5217 {
5218 /* CR4 MB1 bits. */
5219 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
5220 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
5221 { /* likely */ }
5222 else
5223 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
5224
5225 /* CR4 MBZ bits. */
5226 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
5227 if (!(pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1))
5228 { /* likely */ }
5229 else
5230 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
5231 }
5232
5233 /* DEBUGCTL MSR. */
5234 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5235 || !(pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
5236 { /* likely */ }
5237 else
5238 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
5239
5240 /* 64-bit CPU checks. */
5241 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5242 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5243 {
5244 if (fGstInLongMode)
5245 {
5246 /* PAE must be set. */
5247 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
5248 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
5249 { /* likely */ }
5250 else
5251 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
5252 }
5253 else
5254 {
5255 /* PCIDE should not be set. */
5256 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
5257 { /* likely */ }
5258 else
5259 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
5260 }
5261
5262 /* CR3. */
5263 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5264 { /* likely */ }
5265 else
5266 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
5267
5268 /* DR7. */
5269 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5270 || !(pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
5271 { /* likely */ }
5272 else
5273 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
5274
5275 /* SYSENTER ESP and SYSENTER EIP. */
5276 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
5277 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
5278 { /* likely */ }
5279 else
5280 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
5281 }
5282
5283 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5284 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
5285
5286 /* PAT MSR. */
5287 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5288 || CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
5289 { /* likely */ }
5290 else
5291 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
5292
5293 /* EFER MSR. */
5294 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5295 {
5296 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5297 if (!(pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
5298 { /* likely */ }
5299 else
5300 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
5301
5302 bool const fGstLma = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_LMA);
5303 bool const fGstLme = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_LME);
5304 if ( fGstLma == fGstInLongMode
5305 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
5306 || fGstLma == fGstLme))
5307 { /* likely */ }
5308 else
5309 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
5310 }
5311
5312 /* We don't support IA32_BNDCFGS MSR yet. */
5313 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
5314
5315 NOREF(pszInstr);
5316 NOREF(pszFailure);
5317 return VINF_SUCCESS;
5318}
5319
5320
5321/**
5322 * Checks guest segment registers, LDTR and TR as part of VM-entry.
5323 *
5324 * @param pVCpu The cross context virtual CPU structure.
5325 * @param pszInstr The VMX instruction name (for logging purposes).
5326 */
5327DECLINLINE(int) iemVmxVmentryCheckGuestSegRegs(PVMCPUCC pVCpu, const char *pszInstr)
5328{
5329 /*
5330 * Segment registers.
5331 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
5332 */
5333 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5334 const char * const pszFailure = "VM-exit";
5335 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
5336 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
5337 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5338
5339 /* Selectors. */
5340 if ( !fGstInV86Mode
5341 && !fUnrestrictedGuest
5342 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
5343 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
5344
5345 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
5346 {
5347 CPUMSELREG SelReg;
5348 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
5349 if (RT_LIKELY(rc == VINF_SUCCESS))
5350 { /* likely */ }
5351 else
5352 return rc;
5353
5354 /*
5355 * Virtual-8086 mode checks.
5356 */
5357 if (fGstInV86Mode)
5358 {
5359 /* Base address. */
5360 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
5361 { /* likely */ }
5362 else
5363 {
5364 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
5365 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5366 }
5367
5368 /* Limit. */
5369 if (SelReg.u32Limit == 0xffff)
5370 { /* likely */ }
5371 else
5372 {
5373 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
5374 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5375 }
5376
5377 /* Attribute. */
5378 if (SelReg.Attr.u == 0xf3)
5379 { /* likely */ }
5380 else
5381 {
5382 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
5383 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5384 }
5385
5386 /* We're done; move to checking the next segment. */
5387 continue;
5388 }
5389
5390 /* Checks done by 64-bit CPUs. */
5391 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5392 {
5393 /* Base address. */
5394 if ( iSegReg == X86_SREG_FS
5395 || iSegReg == X86_SREG_GS)
5396 {
5397 if (X86_IS_CANONICAL(SelReg.u64Base))
5398 { /* likely */ }
5399 else
5400 {
5401 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5402 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5403 }
5404 }
5405 else if (iSegReg == X86_SREG_CS)
5406 {
5407 if (!RT_HI_U32(SelReg.u64Base))
5408 { /* likely */ }
5409 else
5410 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
5411 }
5412 else
5413 {
5414 if ( SelReg.Attr.n.u1Unusable
5415 || !RT_HI_U32(SelReg.u64Base))
5416 { /* likely */ }
5417 else
5418 {
5419 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5420 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5421 }
5422 }
5423 }
5424
5425 /*
5426 * Checks outside Virtual-8086 mode.
5427 */
5428 uint8_t const uSegType = SelReg.Attr.n.u4Type;
5429 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
5430 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
5431 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
5432 uint8_t const fPresent = SelReg.Attr.n.u1Present;
5433 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
5434 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
5435 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
5436
5437 /* Code or usable segment. */
5438 if ( iSegReg == X86_SREG_CS
5439 || fUsable)
5440 {
5441 /* Reserved bits (bits 31:17 and bits 11:8). */
5442 if (!(SelReg.Attr.u & 0xfffe0f00))
5443 { /* likely */ }
5444 else
5445 {
5446 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
5447 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5448 }
5449
5450 /* Descriptor type. */
5451 if (fCodeDataSeg)
5452 { /* likely */ }
5453 else
5454 {
5455 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
5456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5457 }
5458
5459 /* Present. */
5460 if (fPresent)
5461 { /* likely */ }
5462 else
5463 {
5464 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
5465 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5466 }
5467
5468 /* Granularity. */
5469 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
5470 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
5471 { /* likely */ }
5472 else
5473 {
5474 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
5475 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5476 }
5477 }
5478
5479 if (iSegReg == X86_SREG_CS)
5480 {
5481 /* Segment Type and DPL. */
5482 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5483 && fUnrestrictedGuest)
5484 {
5485 if (uDpl == 0)
5486 { /* likely */ }
5487 else
5488 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
5489 }
5490 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
5491 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5492 {
5493 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5494 if (uDpl == AttrSs.n.u2Dpl)
5495 { /* likely */ }
5496 else
5497 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
5498 }
5499 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5500 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5501 {
5502 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5503 if (uDpl <= AttrSs.n.u2Dpl)
5504 { /* likely */ }
5505 else
5506 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
5507 }
5508 else
5509 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
5510
5511 /* Def/Big. */
5512 if ( fGstInLongMode
5513 && fSegLong)
5514 {
5515 if (uDefBig == 0)
5516 { /* likely */ }
5517 else
5518 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
5519 }
5520 }
5521 else if (iSegReg == X86_SREG_SS)
5522 {
5523 /* Segment Type. */
5524 if ( !fUsable
5525 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5526 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
5527 { /* likely */ }
5528 else
5529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
5530
5531 /* DPL. */
5532 if (!fUnrestrictedGuest)
5533 {
5534 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
5535 { /* likely */ }
5536 else
5537 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
5538 }
5539 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5540 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5541 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5542 {
5543 if (uDpl == 0)
5544 { /* likely */ }
5545 else
5546 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
5547 }
5548 }
5549 else
5550 {
5551 /* DS, ES, FS, GS. */
5552 if (fUsable)
5553 {
5554 /* Segment type. */
5555 if (uSegType & X86_SEL_TYPE_ACCESSED)
5556 { /* likely */ }
5557 else
5558 {
5559 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
5560 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5561 }
5562
5563 if ( !(uSegType & X86_SEL_TYPE_CODE)
5564 || (uSegType & X86_SEL_TYPE_READ))
5565 { /* likely */ }
5566 else
5567 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
5568
5569 /* DPL. */
5570 if ( !fUnrestrictedGuest
5571 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5572 {
5573 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
5574 { /* likely */ }
5575 else
5576 {
5577 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
5578 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5579 }
5580 }
5581 }
5582 }
5583 }
5584
5585 /*
5586 * LDTR.
5587 */
5588 {
5589 CPUMSELREG Ldtr;
5590 Ldtr.Sel = pVmcs->GuestLdtr;
5591 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
5592 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
5593 Ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
5594
5595 if (!Ldtr.Attr.n.u1Unusable)
5596 {
5597 /* Selector. */
5598 if (!(Ldtr.Sel & X86_SEL_LDT))
5599 { /* likely */ }
5600 else
5601 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
5602
5603 /* Base. */
5604 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5605 {
5606 if (X86_IS_CANONICAL(Ldtr.u64Base))
5607 { /* likely */ }
5608 else
5609 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
5610 }
5611
5612 /* Attributes. */
5613 /* Reserved bits (bits 31:17 and bits 11:8). */
5614 if (!(Ldtr.Attr.u & 0xfffe0f00))
5615 { /* likely */ }
5616 else
5617 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
5618
5619 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
5620 { /* likely */ }
5621 else
5622 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
5623
5624 if (!Ldtr.Attr.n.u1DescType)
5625 { /* likely */ }
5626 else
5627 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
5628
5629 if (Ldtr.Attr.n.u1Present)
5630 { /* likely */ }
5631 else
5632 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
5633
5634 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
5635 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
5636 { /* likely */ }
5637 else
5638 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
5639 }
5640 }
5641
5642 /*
5643 * TR.
5644 */
5645 {
5646 CPUMSELREG Tr;
5647 Tr.Sel = pVmcs->GuestTr;
5648 Tr.u32Limit = pVmcs->u32GuestTrLimit;
5649 Tr.u64Base = pVmcs->u64GuestTrBase.u;
5650 Tr.Attr.u = pVmcs->u32GuestTrAttr;
5651
5652 /* Selector. */
5653 if (!(Tr.Sel & X86_SEL_LDT))
5654 { /* likely */ }
5655 else
5656 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
5657
5658 /* Base. */
5659 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5660 {
5661 if (X86_IS_CANONICAL(Tr.u64Base))
5662 { /* likely */ }
5663 else
5664 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
5665 }
5666
5667 /* Attributes. */
5668 /* Reserved bits (bits 31:17 and bits 11:8). */
5669 if (!(Tr.Attr.u & 0xfffe0f00))
5670 { /* likely */ }
5671 else
5672 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
5673
5674 if (!Tr.Attr.n.u1Unusable)
5675 { /* likely */ }
5676 else
5677 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
5678
5679 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
5680 || ( !fGstInLongMode
5681 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
5682 { /* likely */ }
5683 else
5684 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
5685
5686 if (!Tr.Attr.n.u1DescType)
5687 { /* likely */ }
5688 else
5689 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
5690
5691 if (Tr.Attr.n.u1Present)
5692 { /* likely */ }
5693 else
5694 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5695
5696 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5697 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5698 { /* likely */ }
5699 else
5700 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5701 }
5702
5703 NOREF(pszInstr);
5704 NOREF(pszFailure);
5705 return VINF_SUCCESS;
5706}
5707
5708
5709/**
5710 * Checks guest GDTR and IDTR as part of VM-entry.
5711 *
5712 * @param pVCpu The cross context virtual CPU structure.
5713 * @param pszInstr The VMX instruction name (for logging purposes).
5714 */
5715DECLINLINE(int) iemVmxVmentryCheckGuestGdtrIdtr(PVMCPUCC pVCpu, const char *pszInstr)
5716{
5717 /*
5718 * GDTR and IDTR.
5719 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5720 */
5721 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5722 const char *const pszFailure = "VM-exit";
5723
5724 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5725 {
5726 /* Base. */
5727 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5728 { /* likely */ }
5729 else
5730 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5731
5732 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5733 { /* likely */ }
5734 else
5735 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5736 }
5737
5738 /* Limit. */
5739 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5740 { /* likely */ }
5741 else
5742 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5743
5744 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5745 { /* likely */ }
5746 else
5747 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5748
5749 NOREF(pszInstr);
5750 NOREF(pszFailure);
5751 return VINF_SUCCESS;
5752}
5753
5754
5755/**
5756 * Checks guest RIP and RFLAGS as part of VM-entry.
5757 *
5758 * @param pVCpu The cross context virtual CPU structure.
5759 * @param pszInstr The VMX instruction name (for logging purposes).
5760 */
5761DECLINLINE(int) iemVmxVmentryCheckGuestRipRFlags(PVMCPUCC pVCpu, const char *pszInstr)
5762{
5763 /*
5764 * RIP and RFLAGS.
5765 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5766 */
5767 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5768 const char *const pszFailure = "VM-exit";
5769 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5770
5771 /* RIP. */
5772 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5773 {
5774 X86DESCATTR AttrCs;
5775 AttrCs.u = pVmcs->u32GuestCsAttr;
5776 if ( !fGstInLongMode
5777 || !AttrCs.n.u1Long)
5778 {
5779 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5780 { /* likely */ }
5781 else
5782 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5783 }
5784
5785 if ( fGstInLongMode
5786 && AttrCs.n.u1Long)
5787 {
5788 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5789 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5790 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5791 { /* likely */ }
5792 else
5793 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5794 }
5795 }
5796
5797 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5798 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5799 : pVmcs->u64GuestRFlags.s.Lo;
5800 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5801 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5802 { /* likely */ }
5803 else
5804 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5805
5806 if (!(uGuestRFlags & X86_EFL_VM))
5807 { /* likely */ }
5808 else
5809 {
5810 if ( fGstInLongMode
5811 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5812 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5813 }
5814
5815 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(pVmcs->u32EntryIntInfo))
5816 {
5817 if (uGuestRFlags & X86_EFL_IF)
5818 { /* likely */ }
5819 else
5820 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5821 }
5822
5823 NOREF(pszInstr);
5824 NOREF(pszFailure);
5825 return VINF_SUCCESS;
5826}
5827
5828
5829/**
5830 * Checks guest non-register state as part of VM-entry.
5831 *
5832 * @param pVCpu The cross context virtual CPU structure.
5833 * @param pszInstr The VMX instruction name (for logging purposes).
5834 */
5835DECLINLINE(int) iemVmxVmentryCheckGuestNonRegState(PVMCPUCC pVCpu, const char *pszInstr)
5836{
5837 /*
5838 * Guest non-register state.
5839 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5840 */
5841 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5842 const char *const pszFailure = "VM-exit";
5843
5844 /*
5845 * Activity state.
5846 */
5847 uint64_t const u64GuestVmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
5848 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5849 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5850 { /* likely */ }
5851 else
5852 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5853
5854 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5855 if ( !AttrSs.n.u2Dpl
5856 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5857 { /* likely */ }
5858 else
5859 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5860
5861 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5862 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5863 {
5864 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5865 { /* likely */ }
5866 else
5867 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5868 }
5869
5870 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5871 {
5872 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5873 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5874 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5875 switch (pVmcs->u32GuestActivityState)
5876 {
5877 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5878 {
5879 if ( uType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5880 || uType == VMX_ENTRY_INT_INFO_TYPE_NMI
5881 || ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5882 && ( uVector == X86_XCPT_DB
5883 || uVector == X86_XCPT_MC))
5884 || ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5885 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5886 { /* likely */ }
5887 else
5888 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5889 break;
5890 }
5891
5892 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5893 {
5894 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
5895 || ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5896 && uVector == X86_XCPT_MC))
5897 { /* likely */ }
5898 else
5899 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5900 break;
5901 }
5902
5903 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5904 default:
5905 break;
5906 }
5907 }
5908
5909 /*
5910 * Interruptibility state.
5911 */
5912 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5913 { /* likely */ }
5914 else
5915 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5916
5917 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5918 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5919 { /* likely */ }
5920 else
5921 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5922
5923 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5924 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5925 { /* likely */ }
5926 else
5927 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5928
5929 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5930 {
5931 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5932 if (uType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5933 {
5934 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5935 { /* likely */ }
5936 else
5937 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5938 }
5939 else if (uType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5940 {
5941 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5942 { /* likely */ }
5943 else
5944 {
5945 /*
5946 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5947 * We update the Exit qualification only when blocking-by-STI is set
5948 * without blocking-by-MovSS being set. Although in practise it does not
5949 * make much difference since the order of checks are implementation defined.
5950 */
5951 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5952 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5953 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5954 }
5955
5956 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5957 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5958 { /* likely */ }
5959 else
5960 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5961 }
5962 }
5963
5964 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5965 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5966 { /* likely */ }
5967 else
5968 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5969
5970 /* We don't support SGX yet. So enclave-interruption must not be set. */
5971 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5972 { /* likely */ }
5973 else
5974 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5975
5976 /*
5977 * Pending debug exceptions.
5978 */
5979 uint64_t const uPendingDbgXcpts = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5980 ? pVmcs->u64GuestPendingDbgXcpts.u
5981 : pVmcs->u64GuestPendingDbgXcpts.s.Lo;
5982 if (!(uPendingDbgXcpts & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5983 { /* likely */ }
5984 else
5985 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5986
5987 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5988 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5989 {
5990 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5991 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5992 && !(uPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5993 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5994
5995 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5996 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5997 && (uPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5998 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5999 }
6000
6001 /* We don't support RTM (Real-time Transactional Memory) yet. */
6002 if (!(uPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_RTM))
6003 { /* likely */ }
6004 else
6005 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
6006
6007 /*
6008 * VMCS link pointer.
6009 */
6010 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
6011 {
6012 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
6013 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
6014 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
6015 { /* likely */ }
6016 else
6017 {
6018 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
6019 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
6020 }
6021
6022 /* Validate the address. */
6023 if ( !(GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
6024 && !(GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6025 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
6026 { /* likely */ }
6027 else
6028 {
6029 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
6030 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
6031 }
6032 }
6033
6034 NOREF(pszInstr);
6035 NOREF(pszFailure);
6036 return VINF_SUCCESS;
6037}
6038
6039
6040#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6041/**
6042 * Checks guest PDPTEs as part of VM-entry.
6043 *
6044 * @param pVCpu The cross context virtual CPU structure.
6045 * @param pszInstr The VMX instruction name (for logging purposes).
6046 */
6047static int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
6048{
6049 /*
6050 * Guest PDPTEs.
6051 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
6052 */
6053 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6054 const char * const pszFailure = "VM-exit";
6055
6056 /*
6057 * When EPT is used, we need to validate the PAE PDPTEs provided in the VMCS.
6058 * Otherwise, we load any PAE PDPTEs referenced by CR3 at a later point.
6059 */
6060 if ( iemVmxVmcsIsGuestPaePagingEnabled(pVmcs)
6061 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT))
6062 {
6063 /* Get PDPTEs from the VMCS. */
6064 X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
6065 aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
6066 aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
6067 aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
6068 aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
6069
6070 /* Check validity of the PDPTEs. */
6071 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]))
6072 { /* likely */ }
6073 else
6074 {
6075 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
6076 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
6077 }
6078 }
6079
6080 NOREF(pszFailure);
6081 NOREF(pszInstr);
6082 return VINF_SUCCESS;
6083}
6084#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
6085
6086
6087/**
6088 * Checks guest-state as part of VM-entry.
6089 *
6090 * @returns VBox status code.
6091 * @param pVCpu The cross context virtual CPU structure.
6092 * @param pszInstr The VMX instruction name (for logging purposes).
6093 */
6094static int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
6095{
6096 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
6097 if (RT_SUCCESS(rc))
6098 {
6099 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
6100 if (RT_SUCCESS(rc))
6101 {
6102 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
6103 if (RT_SUCCESS(rc))
6104 {
6105 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
6106 if (RT_SUCCESS(rc))
6107 {
6108 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
6109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6110 if (RT_SUCCESS(rc))
6111 rc = iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
6112#endif
6113 }
6114 }
6115 }
6116 }
6117 return rc;
6118}
6119
6120
6121/**
6122 * Checks host-state as part of VM-entry.
6123 *
6124 * @returns VBox status code.
6125 * @param pVCpu The cross context virtual CPU structure.
6126 * @param pszInstr The VMX instruction name (for logging purposes).
6127 */
6128static int iemVmxVmentryCheckHostState(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
6129{
6130 /*
6131 * Host Control Registers and MSRs.
6132 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
6133 */
6134 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6135 const char * const pszFailure = "VMFail";
6136
6137 /* CR0 reserved bits. */
6138 {
6139 /* CR0 MB1 bits. */
6140 uint64_t const u64Cr0Fixed0 = iemVmxGetCr0Fixed0(pVCpu, true /* fVmxNonRootMode */);
6141 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
6142 { /* likely */ }
6143 else
6144 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
6145
6146 /* CR0 MBZ bits. */
6147 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
6148 if (!(pVmcs->u64HostCr0.u & ~u64Cr0Fixed1))
6149 { /* likely */ }
6150 else
6151 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
6152 }
6153
6154 /* CR4 reserved bits. */
6155 {
6156 /* CR4 MB1 bits. */
6157 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6158 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
6159 { /* likely */ }
6160 else
6161 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
6162
6163 /* CR4 MBZ bits. */
6164 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6165 if (!(pVmcs->u64HostCr4.u & ~u64Cr4Fixed1))
6166 { /* likely */ }
6167 else
6168 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
6169 }
6170
6171 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6172 {
6173 /* CR3 reserved bits. */
6174 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
6175 { /* likely */ }
6176 else
6177 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
6178
6179 /* SYSENTER ESP and SYSENTER EIP. */
6180 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
6181 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
6182 { /* likely */ }
6183 else
6184 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
6185 }
6186
6187 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6188 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
6189
6190 /* PAT MSR. */
6191 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
6192 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
6193 { /* likely */ }
6194 else
6195 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
6196
6197 /* EFER MSR. */
6198 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
6199 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
6200 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
6201 {
6202 if (!(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
6203 { /* likely */ }
6204 else
6205 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
6206
6207 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_LMA);
6208 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_LME);
6209 if ( fHostInLongMode == fHostLma
6210 && fHostInLongMode == fHostLme)
6211 { /* likely */ }
6212 else
6213 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
6214 }
6215
6216 /*
6217 * Host Segment and Descriptor-Table Registers.
6218 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
6219 */
6220 /* Selector RPL and TI. */
6221 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
6222 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
6223 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
6224 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
6225 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
6226 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
6227 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
6228 { /* likely */ }
6229 else
6230 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
6231
6232 /* CS and TR selectors cannot be 0. */
6233 if ( pVmcs->HostCs
6234 && pVmcs->HostTr)
6235 { /* likely */ }
6236 else
6237 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
6238
6239 /* SS cannot be 0 if 32-bit host. */
6240 if ( fHostInLongMode
6241 || pVmcs->HostSs)
6242 { /* likely */ }
6243 else
6244 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
6245
6246 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6247 {
6248 /* FS, GS, GDTR, IDTR, TR base address. */
6249 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
6250 && X86_IS_CANONICAL(pVmcs->u64HostGsBase.u)
6251 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
6252 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
6253 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
6254 { /* likely */ }
6255 else
6256 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
6257 }
6258
6259 /*
6260 * Host address-space size for 64-bit CPUs.
6261 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
6262 */
6263 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6264 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6265 {
6266 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
6267
6268 /* Logical processor in IA-32e mode. */
6269 if (fCpuInLongMode)
6270 {
6271 if (fHostInLongMode)
6272 {
6273 /* PAE must be set. */
6274 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
6275 { /* likely */ }
6276 else
6277 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
6278
6279 /* RIP must be canonical. */
6280 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
6281 { /* likely */ }
6282 else
6283 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
6284 }
6285 else
6286 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
6287 }
6288 else
6289 {
6290 /* Logical processor is outside IA-32e mode. */
6291 if ( !fGstInLongMode
6292 && !fHostInLongMode)
6293 {
6294 /* PCIDE should not be set. */
6295 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
6296 { /* likely */ }
6297 else
6298 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
6299
6300 /* The high 32-bits of RIP MBZ. */
6301 if (!pVmcs->u64HostRip.s.Hi)
6302 { /* likely */ }
6303 else
6304 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
6305 }
6306 else
6307 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
6308 }
6309 }
6310 else
6311 {
6312 /* Host address-space size for 32-bit CPUs. */
6313 if ( !fGstInLongMode
6314 && !fHostInLongMode)
6315 { /* likely */ }
6316 else
6317 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
6318 }
6319
6320 NOREF(pszInstr);
6321 NOREF(pszFailure);
6322 return VINF_SUCCESS;
6323}
6324
6325
6326#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6327/**
6328 * Checks the EPT pointer VMCS field as part of VM-entry.
6329 *
6330 * @returns VBox status code.
6331 * @param pVCpu The cross context virtual CPU structure.
6332 * @param uEptPtr The EPT pointer to check.
6333 * @param penmVmxDiag Where to store the diagnostic reason on failure (not
6334 * updated on success). Optional, can be NULL.
6335 */
6336static int iemVmxVmentryCheckEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr, VMXVDIAG *penmVmxDiag) RT_NOEXCEPT
6337{
6338 VMXVDIAG enmVmxDiag;
6339
6340 /* Reserved bits. */
6341 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;
6342 uint64_t const fValidMask = VMX_EPTP_VALID_MASK & ~(UINT64_MAX << cMaxPhysAddrWidth);
6343 if (uEptPtr & fValidMask)
6344 {
6345 /* Memory Type. */
6346 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
6347 uint8_t const fMemType = RT_BF_GET(uEptPtr, VMX_BF_EPTP_MEMTYPE);
6348 if ( ( fMemType == VMX_EPTP_MEMTYPE_WB
6349 && RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_MEMTYPE_WB))
6350 || ( fMemType == VMX_EPTP_MEMTYPE_UC
6351 && RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_MEMTYPE_UC)))
6352 {
6353 /*
6354 * Page walk length (PML4).
6355 * Intel used to specify bit 7 of IA32_VMX_EPT_VPID_CAP as page walk length
6356 * of 5 but that seems to be removed from the latest specs. leaving only PML4
6357 * as the maximum supported page-walk level hence we hardcode it as 3 (1 less than 4)
6358 */
6359 Assert(RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_PAGE_WALK_LENGTH_4));
6360 if (RT_BF_GET(uEptPtr, VMX_BF_EPTP_PAGE_WALK_LENGTH) == 3)
6361 {
6362 /* Access and dirty bits support in EPT structures. */
6363 if ( !RT_BF_GET(uEptPtr, VMX_BF_EPTP_ACCESS_DIRTY)
6364 || RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY))
6365 return VINF_SUCCESS;
6366
6367 enmVmxDiag = kVmxVDiag_Vmentry_EptpAccessDirty;
6368 }
6369 else
6370 enmVmxDiag = kVmxVDiag_Vmentry_EptpPageWalkLength;
6371 }
6372 else
6373 enmVmxDiag = kVmxVDiag_Vmentry_EptpMemType;
6374 }
6375 else
6376 enmVmxDiag = kVmxVDiag_Vmentry_EptpRsvd;
6377
6378 if (penmVmxDiag)
6379 *penmVmxDiag = enmVmxDiag;
6380 return VERR_VMX_VMENTRY_FAILED;
6381}
6382#endif
6383
6384
6385/**
6386 * Checks VMCS controls fields as part of VM-entry.
6387 *
6388 * @returns VBox status code.
6389 * @param pVCpu The cross context virtual CPU structure.
6390 * @param pszInstr The VMX instruction name (for logging purposes).
6391 *
6392 * @remarks This may update secondary-processor based VM-execution control fields
6393 * in the current VMCS if necessary.
6394 */
6395static int iemVmxVmentryCheckCtls(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
6396{
6397 PVMXVVMCS pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6398 const char * const pszFailure = "VMFail";
6399 bool const fVmxTrueMsrs = RT_BOOL(pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Basic & VMX_BF_BASIC_TRUE_CTLS_MASK);
6400
6401 /*
6402 * VM-execution controls.
6403 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
6404 */
6405 {
6406 /* Pin-based VM-execution controls. */
6407 {
6408 VMXCTLSMSR const PinCtls = fVmxTrueMsrs ? pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.TruePinCtls
6409 : pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
6410 if (!(~pVmcs->u32PinCtls & PinCtls.n.allowed0))
6411 { /* likely */ }
6412 else
6413 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
6414
6415 if (!(pVmcs->u32PinCtls & ~PinCtls.n.allowed1))
6416 { /* likely */ }
6417 else
6418 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
6419 }
6420
6421 /* Processor-based VM-execution controls. */
6422 {
6423 VMXCTLSMSR const ProcCtls = fVmxTrueMsrs ? pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.TrueProcCtls
6424 : pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
6425 if (!(~pVmcs->u32ProcCtls & ProcCtls.n.allowed0))
6426 { /* likely */ }
6427 else
6428 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
6429
6430 if (!(pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1))
6431 { /* likely */ }
6432 else
6433 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
6434 }
6435
6436 /* Secondary processor-based VM-execution controls. */
6437 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6438 {
6439 VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
6440 if (!(~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0))
6441 { /* likely */ }
6442 else
6443 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
6444
6445 if (!(pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1))
6446 { /* likely */ }
6447 else
6448 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
6449 }
6450 else if (pVmcs->u32ProcCtls2)
6451 {
6452 /*
6453 * If the "activate secondary controls" is clear, then the secondary processor-based VM-execution controls
6454 * is treated as 0.
6455 *
6456 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
6457 *
6458 * Since this is a rather rare occurrence (only observed for a few VM-entries with Microsoft Hyper-V
6459 * enabled Windows Server 2008 R2 guest), it's not worth changing every place that reads this control to
6460 * also check the "activate secondary controls" bit. Instead, we temporarily save the guest programmed
6461 * control here, zero out the value the rest of our code uses and restore the guest programmed value
6462 * on VM-exit.
6463 */
6464 pVmcs->u32RestoreProcCtls2 = pVmcs->u32ProcCtls2;
6465 pVmcs->u32ProcCtls2 = 0;
6466 }
6467
6468 /* CR3-target count. */
6469 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
6470 { /* likely */ }
6471 else
6472 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
6473
6474 /* I/O bitmaps physical addresses. */
6475 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
6476 {
6477 RTGCPHYS const GCPhysIoBitmapA = pVmcs->u64AddrIoBitmapA.u;
6478 if ( !(GCPhysIoBitmapA & X86_PAGE_4K_OFFSET_MASK)
6479 && !(GCPhysIoBitmapA >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6480 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapA))
6481 { /* likely */ }
6482 else
6483 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
6484
6485 RTGCPHYS const GCPhysIoBitmapB = pVmcs->u64AddrIoBitmapB.u;
6486 if ( !(GCPhysIoBitmapB & X86_PAGE_4K_OFFSET_MASK)
6487 && !(GCPhysIoBitmapB >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6488 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapB))
6489 { /* likely */ }
6490 else
6491 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
6492 }
6493
6494 /* MSR bitmap physical address. */
6495 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6496 {
6497 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
6498 if ( !(GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
6499 && !(GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6500 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
6501 { /* likely */ }
6502 else
6503 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
6504 }
6505
6506 /* TPR shadow related controls. */
6507 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6508 {
6509 /* Virtual-APIC page physical address. */
6510 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6511 if ( !(GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
6512 && !(GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6513 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
6514 { /* likely */ }
6515 else
6516 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
6517
6518 /* TPR threshold bits 31:4 MBZ without virtual-interrupt delivery. */
6519 if ( !(pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)
6520 || (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6521 { /* likely */ }
6522 else
6523 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
6524
6525 /* The rest done XXX document */
6526 }
6527 else
6528 {
6529 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6530 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6531 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6532 { /* likely */ }
6533 else
6534 {
6535 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6536 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
6537 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6538 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
6539 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
6540 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
6541 }
6542 }
6543
6544 /* NMI exiting and virtual-NMIs. */
6545 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
6546 || !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6547 { /* likely */ }
6548 else
6549 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
6550
6551 /* Virtual-NMIs and NMI-window exiting. */
6552 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6553 || !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
6554 { /* likely */ }
6555 else
6556 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
6557
6558 /* Virtualize APIC accesses. */
6559 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6560 {
6561 /* APIC-access physical address. */
6562 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6563 if ( !(GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
6564 && !(GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6565 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6566 { /* likely */ }
6567 else
6568 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
6569
6570 /*
6571 * Disallow APIC-access page and virtual-APIC page from being the same address.
6572 * Note! This is not an Intel requirement, but one imposed by our implementation.
6573 * This is done primarily to simplify recursion scenarios while redirecting accesses
6574 * between the APIC-access page and the virtual-APIC page. If any nested hypervisor
6575 * requires this, we can implement it later
6576 */
6577 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6578 {
6579 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6580 if (GCPhysVirtApic != GCPhysApicAccess)
6581 { /* likely */ }
6582 else
6583 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6584 }
6585 }
6586
6587 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6588 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6589 || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6590 { /* likely */ }
6591 else
6592 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6593
6594 /* Virtual-interrupt delivery requires external interrupt exiting. */
6595 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6596 || (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6597 { /* likely */ }
6598 else
6599 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6600
6601 /* VPID. */
6602 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6603 || pVmcs->u16Vpid != 0)
6604 { /* likely */ }
6605 else
6606 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6607
6608#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6609 /* Extended-Page-Table Pointer (EPTP). */
6610 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
6611 {
6612 VMXVDIAG enmVmxDiag;
6613 int const rc = iemVmxVmentryCheckEptPtr(pVCpu, pVmcs->u64EptPtr.u, &enmVmxDiag);
6614 if (RT_SUCCESS(rc))
6615 { /* likely */ }
6616 else
6617 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, enmVmxDiag, rc);
6618 }
6619#else
6620 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
6621 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST));
6622#endif
6623 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6624 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6625 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6626 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_XCPT_VE)); /* We don't support EPT-violation #VE yet. */
6627 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_TSC_SCALING)); /* We don't support TSC-scaling yet. */
6628
6629 /* VMCS shadowing. */
6630 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6631 {
6632 /* VMREAD-bitmap physical address. */
6633 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6634 if ( !(GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6635 && !(GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6636 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6637 { /* likely */ }
6638 else
6639 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6640
6641 /* VMWRITE-bitmap physical address. */
6642 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6643 if ( !(GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6644 && !(GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6645 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6646 { /* likely */ }
6647 else
6648 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6649 }
6650 }
6651
6652 /*
6653 * VM-exit controls.
6654 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
6655 */
6656 {
6657 VMXCTLSMSR const ExitCtls = fVmxTrueMsrs ? pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.TrueExitCtls
6658 : pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
6659 if (!(~pVmcs->u32ExitCtls & ExitCtls.n.allowed0))
6660 { /* likely */ }
6661 else
6662 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
6663
6664 if (!(pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1))
6665 { /* likely */ }
6666 else
6667 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
6668
6669 /* Save preemption timer without activating it. */
6670 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6671 || !(pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
6672 { /* likely */ }
6673 else
6674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
6675
6676 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
6677 if (pVmcs->u32ExitMsrStoreCount)
6678 {
6679 if ( !(pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
6680 && !(pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6681 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
6682 { /* likely */ }
6683 else
6684 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
6685 }
6686
6687 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
6688 if (pVmcs->u32ExitMsrLoadCount)
6689 {
6690 if ( !(pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6691 && !(pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6692 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
6693 { /* likely */ }
6694 else
6695 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
6696 }
6697 }
6698
6699 /*
6700 * VM-entry controls.
6701 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6702 */
6703 {
6704 VMXCTLSMSR const EntryCtls = fVmxTrueMsrs ? pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.TrueEntryCtls
6705 : pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
6706 if (!(~pVmcs->u32EntryCtls & EntryCtls.n.allowed0))
6707 { /* likely */ }
6708 else
6709 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
6710
6711 if (!(pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1))
6712 { /* likely */ }
6713 else
6714 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
6715
6716 /* Event injection. */
6717 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
6718 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
6719 {
6720 /* Type and vector. */
6721 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
6722 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
6723 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
6724 if ( !uRsvd
6725 && VMXIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
6726 && VMXIsEntryIntInfoVectorValid(uVector, uType))
6727 { /* likely */ }
6728 else
6729 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
6730
6731 /* Exception error code. */
6732 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
6733 {
6734 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
6735 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
6736 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
6737 { /* likely */ }
6738 else
6739 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
6740
6741 /* Exceptions that provide an error code. */
6742 if (uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
6743 {
6744 if ( uVector == X86_XCPT_DF
6745 || uVector == X86_XCPT_TS
6746 || uVector == X86_XCPT_NP
6747 || uVector == X86_XCPT_SS
6748 || uVector == X86_XCPT_GP
6749 || uVector == X86_XCPT_PF
6750 || uVector == X86_XCPT_AC)
6751 { /* likely */ }
6752 else
6753 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
6754 }
6755
6756 /* Exception error-code reserved bits. */
6757 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
6758 { /* likely */ }
6759 else
6760 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
6761
6762 /* Injecting a software interrupt, software exception or privileged software exception. */
6763 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
6764 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
6765 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
6766 {
6767 /* Instruction length must be in the range 0-15. */
6768 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
6769 { /* likely */ }
6770 else
6771 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
6772
6773 /* However, instruction length of 0 is allowed only when its CPU feature is present. */
6774 if ( pVmcs->u32EntryInstrLen != 0
6775 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
6776 { /* likely */ }
6777 else
6778 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
6779 }
6780 }
6781 }
6782
6783 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
6784 if (pVmcs->u32EntryMsrLoadCount)
6785 {
6786 if ( !(pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6787 && !(pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6788 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
6789 { /* likely */ }
6790 else
6791 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
6792 }
6793
6794 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
6795 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
6796 }
6797
6798 NOREF(pszInstr);
6799 NOREF(pszFailure);
6800 return VINF_SUCCESS;
6801}
6802
6803
6804/**
6805 * Loads the guest control registers, debug register and some MSRs as part of
6806 * VM-entry.
6807 *
6808 * @param pVCpu The cross context virtual CPU structure.
6809 */
6810static void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPUCC pVCpu) RT_NOEXCEPT
6811{
6812 /*
6813 * Load guest control registers, debug registers and MSRs.
6814 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6815 */
6816 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6817
6818 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6819 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_GUEST_CR0_IGNORE_MASK)
6820 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_GUEST_CR0_IGNORE_MASK);
6821 pVCpu->cpum.GstCtx.cr0 = uGstCr0;
6822 pVCpu->cpum.GstCtx.cr4 = pVmcs->u64GuestCr4.u;
6823 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6824
6825 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6826 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_GUEST_DR7_MBZ_MASK) | VMX_ENTRY_GUEST_DR7_MB1_MASK;
6827
6828 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6829 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6830 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6831
6832 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6833 {
6834 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6835
6836 /* EFER MSR. */
6837 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6838 {
6839 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
6840 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6841 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6842 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6843 if (fGstInLongMode)
6844 {
6845 /* If the nested-guest is in long mode, LMA and LME are both set. */
6846 Assert(fGstPaging);
6847 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6848 }
6849 else
6850 {
6851 /*
6852 * If the nested-guest is outside long mode:
6853 * - With paging: LMA is cleared, LME is cleared.
6854 * - Without paging: LMA is cleared, LME is left unmodified.
6855 */
6856 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6857 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6858 }
6859 }
6860 /* else: see below. */
6861 }
6862
6863 /* PAT MSR. */
6864 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6865 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6866
6867 /* EFER MSR. */
6868 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6869 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6870
6871 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6872 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6873
6874 /* We don't support IA32_BNDCFGS MSR yet. */
6875 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6876
6877 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6878}
6879
6880
6881/**
6882 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6883 *
6884 * @param pVCpu The cross context virtual CPU structure.
6885 */
6886static void iemVmxVmentryLoadGuestSegRegs(PVMCPUCC pVCpu) RT_NOEXCEPT
6887{
6888 /*
6889 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6890 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6891 */
6892 /* CS, SS, ES, DS, FS, GS. */
6893 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6894 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6895 {
6896 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6897 CPUMSELREG VmcsSelReg;
6898 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6899 AssertRC(rc); NOREF(rc);
6900 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6901 {
6902 pGstSelReg->Sel = VmcsSelReg.Sel;
6903 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6904 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6905 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6906 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6907 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6908 }
6909 else
6910 {
6911 pGstSelReg->Sel = VmcsSelReg.Sel;
6912 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6913 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6914 switch (iSegReg)
6915 {
6916 case X86_SREG_CS:
6917 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6918 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6919 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6920 break;
6921
6922 case X86_SREG_SS:
6923 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6924 pGstSelReg->u32Limit = 0;
6925 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6926 break;
6927
6928 case X86_SREG_ES:
6929 case X86_SREG_DS:
6930 pGstSelReg->u64Base = 0;
6931 pGstSelReg->u32Limit = 0;
6932 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6933 break;
6934
6935 case X86_SREG_FS:
6936 case X86_SREG_GS:
6937 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6938 pGstSelReg->u32Limit = 0;
6939 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6940 break;
6941 }
6942 Assert(pGstSelReg->Attr.n.u1Unusable);
6943 }
6944 }
6945
6946 /* LDTR. */
6947 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6948 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6949 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6950 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6951 {
6952 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6953 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6954 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6955 }
6956 else
6957 {
6958 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6959 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6960 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6961 }
6962
6963 /* TR. */
6964 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6965 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6966 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6967 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6968 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6969 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6970 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6971
6972 /* GDTR. */
6973 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6974 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6975
6976 /* IDTR. */
6977 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6978 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6979}
6980
6981
6982/**
6983 * Loads the guest MSRs from the VM-entry MSR-load area as part of VM-entry.
6984 *
6985 * @returns VBox status code.
6986 * @param pVCpu The cross context virtual CPU structure.
6987 * @param pszInstr The VMX instruction name (for logging purposes).
6988 */
6989static int iemVmxVmentryLoadGuestAutoMsrs(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
6990{
6991 /*
6992 * Load guest MSRs.
6993 * See Intel spec. 26.4 "Loading MSRs".
6994 */
6995 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6996 const char *const pszFailure = "VM-exit";
6997
6998 /*
6999 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
7000 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
7001 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
7002 */
7003 uint32_t const cMsrs = RT_MIN(pVmcs->u32EntryMsrLoadCount, RT_ELEMENTS(pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea));
7004 if (!cMsrs)
7005 return VINF_SUCCESS;
7006
7007 /*
7008 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
7009 * exceeded including possibly raising #MC exceptions during VMX transition. Our
7010 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
7011 */
7012 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
7013 if (fIsMsrCountValid)
7014 { /* likely */ }
7015 else
7016 {
7017 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
7018 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
7019 }
7020
7021 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
7022 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea[0],
7023 GCPhysVmEntryMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
7024 if (RT_SUCCESS(rc))
7025 {
7026 PCVMXAUTOMSR pMsr = &pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea[0];
7027 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
7028 {
7029 if ( !pMsr->u32Reserved
7030 && pMsr->u32Msr != MSR_K8_FS_BASE
7031 && pMsr->u32Msr != MSR_K8_GS_BASE
7032 && pMsr->u32Msr != MSR_K6_EFER
7033 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
7034 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
7035 {
7036 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
7037 if (rcStrict == VINF_SUCCESS)
7038 continue;
7039
7040 /*
7041 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
7042 * If any nested hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
7043 * recording the MSR index in the Exit qualification (as per the Intel spec.) and indicated
7044 * further by our own, specific diagnostic code. Later, we can try implement handling of the
7045 * MSR in ring-0 if possible, or come up with a better, generic solution.
7046 */
7047 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
7048 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
7049 ? kVmxVDiag_Vmentry_MsrLoadRing3
7050 : kVmxVDiag_Vmentry_MsrLoad;
7051 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
7052 }
7053 else
7054 {
7055 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
7056 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
7057 }
7058 }
7059 }
7060 else
7061 {
7062 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmEntryMsrLoadArea, rc));
7063 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys, rc);
7064 }
7065
7066 NOREF(pszInstr);
7067 NOREF(pszFailure);
7068 return VINF_SUCCESS;
7069}
7070
7071
7072/**
7073 * Loads the guest-state non-register state as part of VM-entry.
7074 *
7075 * @returns VBox status code.
7076 * @param pVCpu The cross context virtual CPU structure.
7077 * @param pszInstr The VMX instruction name (for logging purposes).
7078 *
7079 * @remarks This must be called only after loading the nested-guest register state
7080 * (especially nested-guest RIP).
7081 */
7082static int iemVmxVmentryLoadGuestNonRegState(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7083{
7084 /*
7085 * Load guest non-register state.
7086 * See Intel spec. 26.6 "Special Features of VM Entry"
7087 */
7088 const char *const pszFailure = "VM-exit";
7089 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7090
7091 /*
7092 * If VM-entry is not vectoring, block-by-STI and block-by-MovSS state must be loaded.
7093 * If VM-entry is vectoring, there is no block-by-STI or block-by-MovSS.
7094 *
7095 * See Intel spec. 26.6.1 "Interruptibility State".
7096 */
7097 bool const fEntryVectoring = VMXIsVmentryVectoring(pVmcs->u32EntryIntInfo, NULL /* puEntryIntInfoType */);
7098 if ( !fEntryVectoring
7099 && (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)))
7100 CPUMSetInInterruptShadowEx(&pVCpu->cpum.GstCtx, pVmcs->u64GuestRip.u);
7101 else
7102 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
7103
7104 /* NMI blocking. */
7105 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
7106 {
7107 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
7108 {
7109 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true;
7110 Assert(!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx));
7111 }
7112 else
7113 {
7114 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
7115 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
7116 }
7117 }
7118 else
7119 {
7120 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
7121 Assert(!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx));
7122 }
7123
7124 /* SMI blocking is irrelevant. We don't support SMIs yet. */
7125
7126 /*
7127 * Set PGM's copy of the EPT pointer.
7128 * The EPTP has already been validated while checking guest state.
7129 *
7130 * It is important to do this prior to mapping PAE PDPTEs (below).
7131 */
7132 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
7133 PGMSetGuestEptPtr(pVCpu, pVmcs->u64EptPtr.u);
7134
7135 /*
7136 * Load the guest's PAE PDPTEs.
7137 */
7138 if (!iemVmxVmcsIsGuestPaePagingEnabled(pVmcs))
7139 {
7140 /*
7141 * When PAE paging is not used we clear the PAE PDPTEs for safety
7142 * in case we might be switching from a PAE host to a non-PAE guest.
7143 */
7144 pVCpu->cpum.GstCtx.aPaePdpes[0].u = 0;
7145 pVCpu->cpum.GstCtx.aPaePdpes[1].u = 0;
7146 pVCpu->cpum.GstCtx.aPaePdpes[2].u = 0;
7147 pVCpu->cpum.GstCtx.aPaePdpes[3].u = 0;
7148 }
7149 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
7150 {
7151 /*
7152 * With EPT and the nested-guest using PAE paging, we've already validated the PAE PDPTEs
7153 * while checking the guest state. We can load them into the nested-guest CPU state now.
7154 * They'll later be used while mapping CR3 and the PAE PDPTEs.
7155 */
7156 pVCpu->cpum.GstCtx.aPaePdpes[0].u = pVmcs->u64GuestPdpte0.u;
7157 pVCpu->cpum.GstCtx.aPaePdpes[1].u = pVmcs->u64GuestPdpte1.u;
7158 pVCpu->cpum.GstCtx.aPaePdpes[2].u = pVmcs->u64GuestPdpte2.u;
7159 pVCpu->cpum.GstCtx.aPaePdpes[3].u = pVmcs->u64GuestPdpte3.u;
7160 }
7161 else
7162 {
7163 /*
7164 * Without EPT and the nested-guest using PAE paging, we must load the PAE PDPTEs
7165 * referenced by CR3. This involves loading (and mapping) CR3 and validating them now.
7166 */
7167 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
7168 if (RT_SUCCESS(rc))
7169 { /* likely */ }
7170 else
7171 {
7172 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
7173 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte, rc);
7174 }
7175 }
7176
7177 /* VPID is irrelevant. We don't support VPID yet. */
7178
7179 /* Clear address-range monitoring. */
7180 EMMonitorWaitClear(pVCpu);
7181
7182 return VINF_SUCCESS;
7183}
7184
7185
7186/**
7187 * Loads the guest VMCS referenced state (such as MSR bitmaps, I/O bitmaps etc).
7188 *
7189 * @param pVCpu The cross context virtual CPU structure.
7190 * @param pszInstr The VMX instruction name (for logging purposes).
7191 *
7192 * @remarks This assumes various VMCS related data structure pointers have already
7193 * been verified prior to calling this function.
7194 */
7195static int iemVmxVmentryLoadGuestVmcsRefState(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7196{
7197 const char *const pszFailure = "VM-exit";
7198 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7199
7200 /*
7201 * Virtualize APIC accesses.
7202 */
7203 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
7204 {
7205 /* APIC-access physical address. */
7206 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
7207
7208 /*
7209 * Register the handler for the APIC-access page.
7210 *
7211 * We don't deregister the APIC-access page handler during the VM-exit as a different
7212 * nested-VCPU might be using the same guest-physical address for its APIC-access page.
7213 *
7214 * We leave the page registered until the first access that happens outside VMX non-root
7215 * mode. Guest software is allowed to access structures such as the APIC-access page
7216 * only when no logical processor with a current VMCS references it in VMX non-root mode,
7217 * otherwise it can lead to unpredictable behavior including guest triple-faults.
7218 *
7219 * See Intel spec. 24.11.4 "Software Access to Related Structures".
7220 */
7221 /** @todo r=bird: The lazy deregistration of the page is potentially slightly
7222 * problematic, as the guest may cause us to create lots of access
7223 * handler entries. However, any slowdown or similar effects should
7224 * only ever affect the guest itself, so not a big issue. Though, I
7225 * wish there was most recently used approach or something to tracking
7226 * these... */
7227 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7228 int rc = PGMHandlerPhysicalRegisterVmxApicAccessPage(pVM, GCPhysApicAccess, pVM->iem.s.hVmxApicAccessPage);
7229 if (RT_SUCCESS(rc))
7230 { /* likely */ }
7231 else
7232 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg, rc);
7233 }
7234
7235 /*
7236 * VMCS shadowing.
7237 */
7238 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
7239 {
7240 /* Read the VMREAD-bitmap. */
7241 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
7242 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abVmreadBitmap[0],
7243 GCPhysVmreadBitmap, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.abVmreadBitmap));
7244 if (RT_SUCCESS(rc))
7245 { /* likely */ }
7246 else
7247 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys, rc);
7248
7249 /* Read the VMWRITE-bitmap. */
7250 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmwriteBitmap.u;
7251 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abVmwriteBitmap[0],
7252 GCPhysVmwriteBitmap, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.abVmwriteBitmap));
7253 if (RT_SUCCESS(rc))
7254 { /* likely */ }
7255 else
7256 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys, rc);
7257 }
7258
7259 /*
7260 * I/O bitmaps.
7261 */
7262 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
7263 {
7264 /* Read the IO bitmap A. */
7265 RTGCPHYS const GCPhysIoBitmapA = pVmcs->u64AddrIoBitmapA.u;
7266 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abIoBitmap[0],
7267 GCPhysIoBitmapA, VMX_V_IO_BITMAP_A_SIZE);
7268 if (RT_SUCCESS(rc))
7269 { /* likely */ }
7270 else
7271 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapAPtrReadPhys, rc);
7272
7273 /* Read the IO bitmap B. */
7274 RTGCPHYS const GCPhysIoBitmapB = pVmcs->u64AddrIoBitmapB.u;
7275 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abIoBitmap[VMX_V_IO_BITMAP_A_SIZE],
7276 GCPhysIoBitmapB, VMX_V_IO_BITMAP_B_SIZE);
7277 if (RT_SUCCESS(rc))
7278 { /* likely */ }
7279 else
7280 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapBPtrReadPhys, rc);
7281 }
7282
7283 /*
7284 * TPR shadow and Virtual-APIC page.
7285 */
7286 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
7287 {
7288 /* Verify TPR threshold and VTPR when both virtualize-APIC accesses and virtual-interrupt delivery aren't used. */
7289 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
7290 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
7291 {
7292 /* Read the VTPR from the virtual-APIC page. */
7293 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
7294 uint8_t u8VTpr;
7295 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr));
7296 if (RT_SUCCESS(rc))
7297 { /* likely */ }
7298 else
7299 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys, rc);
7300
7301 /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */
7302 if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0))
7303 { /* likely */ }
7304 else
7305 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
7306 }
7307 }
7308
7309 /*
7310 * VMCS link pointer.
7311 */
7312 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
7313 {
7314 /* Read the VMCS-link pointer from guest memory. */
7315 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
7316 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs,
7317 GCPhysShadowVmcs, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs));
7318 if (RT_SUCCESS(rc))
7319 { /* likely */ }
7320 else
7321 {
7322 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
7323 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys, rc);
7324 }
7325
7326 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
7327 if (pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs.u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
7328 { /* likely */ }
7329 else
7330 {
7331 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
7332 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
7333 }
7334
7335 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
7336 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
7337 || pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs.u32VmcsRevId.n.fIsShadowVmcs)
7338 { /* likely */ }
7339 else
7340 {
7341 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
7342 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
7343 }
7344
7345 /* Update our cache of the guest physical address of the shadow VMCS. */
7346 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
7347 }
7348
7349 /*
7350 * MSR bitmap.
7351 */
7352 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7353 {
7354 /* Read the MSR bitmap. */
7355 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
7356 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap[0],
7357 GCPhysMsrBitmap, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap));
7358 if (RT_SUCCESS(rc))
7359 { /* likely */ }
7360 else
7361 IEM_VMX_VMENTRY_FAILED_RET_2(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys, rc);
7362 }
7363
7364 NOREF(pszFailure);
7365 NOREF(pszInstr);
7366 return VINF_SUCCESS;
7367}
7368
7369
7370/**
7371 * Loads the guest-state as part of VM-entry.
7372 *
7373 * @returns VBox status code.
7374 * @param pVCpu The cross context virtual CPU structure.
7375 * @param pszInstr The VMX instruction name (for logging purposes).
7376 *
7377 * @remarks This must be done after all the necessary steps prior to loading of
7378 * guest-state (e.g. checking various VMCS state).
7379 */
7380static int iemVmxVmentryLoadGuestState(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7381{
7382 /* Load guest control registers, MSRs (that are directly part of the VMCS). */
7383 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
7384
7385 /* Load guest segment registers. */
7386 iemVmxVmentryLoadGuestSegRegs(pVCpu);
7387
7388 /*
7389 * Load guest RIP, RSP and RFLAGS.
7390 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
7391 */
7392 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7393 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
7394 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
7395 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
7396
7397 /* Initialize the PAUSE-loop controls as part of VM-entry. */
7398 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
7399 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
7400
7401 /* Load guest non-register state (such as interrupt shadows, NMI blocking etc). */
7402 int rc = iemVmxVmentryLoadGuestNonRegState(pVCpu, pszInstr);
7403 if (rc == VINF_SUCCESS)
7404 { /* likely */ }
7405 else
7406 return rc;
7407
7408 /* Load VMX related structures and state referenced by the VMCS. */
7409 rc = iemVmxVmentryLoadGuestVmcsRefState(pVCpu, pszInstr);
7410 if (rc == VINF_SUCCESS)
7411 { /* likely */ }
7412 else
7413 return rc;
7414
7415 NOREF(pszInstr);
7416 return VINF_SUCCESS;
7417}
7418
7419
7420/**
7421 * Returns whether there are is a pending debug exception on VM-entry.
7422 *
7423 * @param pVCpu The cross context virtual CPU structure.
7424 * @param pszInstr The VMX instruction name (for logging purposes).
7425 */
7426static bool iemVmxVmentryIsPendingDebugXcpt(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7427{
7428 /*
7429 * Pending debug exceptions.
7430 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
7431 */
7432 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7433 Assert(pVmcs);
7434
7435 bool fPendingDbgXcpt = RT_BOOL(pVmcs->u64GuestPendingDbgXcpts.u & ( VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS
7436 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_EN_BP));
7437 if (fPendingDbgXcpt)
7438 {
7439 uint8_t uEntryIntInfoType;
7440 bool const fEntryVectoring = VMXIsVmentryVectoring(pVmcs->u32EntryIntInfo, &uEntryIntInfoType);
7441 if (fEntryVectoring)
7442 {
7443 switch (uEntryIntInfoType)
7444 {
7445 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
7446 case VMX_ENTRY_INT_INFO_TYPE_NMI:
7447 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
7448 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
7449 fPendingDbgXcpt = false;
7450 break;
7451
7452 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
7453 {
7454 /*
7455 * Whether the pending debug exception for software exceptions other than
7456 * #BP and #OF is delivered after injecting the exception or is discard
7457 * is CPU implementation specific. We will discard them (easier).
7458 */
7459 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
7460 if ( uVector != X86_XCPT_BP
7461 && uVector != X86_XCPT_OF)
7462 fPendingDbgXcpt = false;
7463 RT_FALL_THRU();
7464 }
7465 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
7466 {
7467 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
7468 fPendingDbgXcpt = false;
7469 break;
7470 }
7471 }
7472 }
7473 else
7474 {
7475 /*
7476 * When the VM-entry is not vectoring but there is blocking-by-MovSS, whether the
7477 * pending debug exception is held pending or is discarded is CPU implementation
7478 * specific. We will discard them (easier).
7479 */
7480 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
7481 fPendingDbgXcpt = false;
7482
7483 /* There's no pending debug exception in the shutdown or wait-for-SIPI state. */
7484 if (pVmcs->u32GuestActivityState & (VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN | VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT))
7485 fPendingDbgXcpt = false;
7486 }
7487 }
7488
7489 NOREF(pszInstr);
7490 return fPendingDbgXcpt;
7491}
7492
7493
7494/**
7495 * Set up the monitor-trap flag (MTF).
7496 *
7497 * @param pVCpu The cross context virtual CPU structure.
7498 * @param pszInstr The VMX instruction name (for logging purposes).
7499 */
7500static void iemVmxVmentrySetupMtf(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7501{
7502 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7503 Assert(pVmcs);
7504 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
7505 {
7506 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
7507 Log(("%s: Monitor-trap flag set on VM-entry\n", pszInstr));
7508 }
7509 else
7510 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
7511 NOREF(pszInstr);
7512}
7513
7514
7515/**
7516 * Sets up NMI-window exiting.
7517 *
7518 * @param pVCpu The cross context virtual CPU structure.
7519 * @param pszInstr The VMX instruction name (for logging purposes).
7520 */
7521static void iemVmxVmentrySetupNmiWindow(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7522{
7523 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7524 Assert(pVmcs);
7525 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
7526 {
7527 Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI);
7528 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW);
7529 Log(("%s: NMI-window set on VM-entry\n", pszInstr));
7530 }
7531 else
7532 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
7533 NOREF(pszInstr);
7534}
7535
7536
7537/**
7538 * Sets up interrupt-window exiting.
7539 *
7540 * @param pVCpu The cross context virtual CPU structure.
7541 * @param pszInstr The VMX instruction name (for logging purposes).
7542 */
7543static void iemVmxVmentrySetupIntWindow(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7544{
7545 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7546 Assert(pVmcs);
7547 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
7548 {
7549 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW);
7550 Log(("%s: Interrupt-window set on VM-entry\n", pszInstr));
7551 }
7552 else
7553 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
7554 NOREF(pszInstr);
7555}
7556
7557
7558/**
7559 * Set up the VMX-preemption timer.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure.
7562 * @param pszInstr The VMX instruction name (for logging purposes).
7563 */
7564static void iemVmxVmentrySetupPreemptTimer(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7565{
7566 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7567 Assert(pVmcs);
7568 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
7569 {
7570 /*
7571 * If the timer is 0, we must cause a VM-exit before executing the first
7572 * nested-guest instruction. So we can flag as though the timer has already
7573 * expired and we will check and cause a VM-exit at the right priority elsewhere
7574 * in the code.
7575 */
7576 uint64_t uEntryTick;
7577 uint32_t const uPreemptTimer = pVmcs->u32PreemptTimer;
7578 if (uPreemptTimer)
7579 {
7580 int rc = CPUMStartGuestVmxPremptTimer(pVCpu, uPreemptTimer, VMX_V_PREEMPT_TIMER_SHIFT, &uEntryTick);
7581 AssertRC(rc);
7582 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uEntryTick));
7583 }
7584 else
7585 {
7586 uEntryTick = TMCpuTickGetNoCheck(pVCpu);
7587 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
7588 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64 to expire immediately!\n", pszInstr, uEntryTick));
7589 }
7590
7591 pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick = uEntryTick;
7592 }
7593 else
7594 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
7595
7596 NOREF(pszInstr);
7597}
7598
7599
7600/**
7601 * Injects an event using TRPM given a VM-entry interruption info and related
7602 * fields.
7603 *
7604 * @param pVCpu The cross context virtual CPU structure.
7605 * @param pszInstr The VMX instruction name (for logging purposes).
7606 * @param uEntryIntInfo The VM-entry interruption info.
7607 * @param uErrCode The error code associated with the event if any.
7608 * @param cbInstr The VM-entry instruction length (for software
7609 * interrupts and software exceptions). Pass 0
7610 * otherwise.
7611 * @param GCPtrFaultAddress The guest CR2 if this is a \#PF event.
7612 */
7613static void iemVmxVmentryInjectTrpmEvent(PVMCPUCC pVCpu, const char *pszInstr, uint32_t uEntryIntInfo, uint32_t uErrCode,
7614 uint32_t cbInstr, RTGCUINTPTR GCPtrFaultAddress) RT_NOEXCEPT
7615{
7616 Assert(VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo));
7617
7618 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
7619 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo);
7620 TRPMEVENT const enmTrpmEvent = HMVmxEventTypeToTrpmEventType(uEntryIntInfo);
7621
7622 Assert(uType != VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT);
7623
7624 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrpmEvent);
7625 AssertRC(rc);
7626 Log(("%s: Injecting: vector=%#x type=%#x (%s)\n", pszInstr, uVector, uType, VMXGetEntryIntInfoTypeDesc(uType)));
7627
7628 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(uEntryIntInfo))
7629 {
7630 TRPMSetErrorCode(pVCpu, uErrCode);
7631 Log(("%s: Injecting: err_code=%#x\n", pszInstr, uErrCode));
7632 }
7633
7634 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(uEntryIntInfo))
7635 {
7636 TRPMSetFaultAddress(pVCpu, GCPtrFaultAddress);
7637 Log(("%s: Injecting: fault_addr=%RGp\n", pszInstr, GCPtrFaultAddress));
7638 }
7639 else
7640 {
7641 switch (uType)
7642 {
7643 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
7644 TRPMSetTrapDueToIcebp(pVCpu);
7645 Log(("%s: Injecting: icebp\n", pszInstr));
7646 RT_FALL_THRU();
7647 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
7648 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
7649 TRPMSetInstrLength(pVCpu, cbInstr);
7650 Log(("%s: Injecting: instr_len=%u\n", pszInstr, cbInstr));
7651 break;
7652 }
7653 }
7654
7655 NOREF(pszInstr);
7656}
7657
7658
7659/**
7660 * Performs event injection (if any) as part of VM-entry.
7661 *
7662 * @param pVCpu The cross context virtual CPU structure.
7663 * @param pszInstr The VMX instruction name (for logging purposes).
7664 */
7665static void iemVmxVmentryInjectEvent(PVMCPUCC pVCpu, const char *pszInstr) RT_NOEXCEPT
7666{
7667 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7668
7669 /*
7670 * Inject events.
7671 * The event that is going to be made pending for injection is not subject to VMX intercepts,
7672 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
7673 * of the current event -are- subject to intercepts, hence this flag will be flipped during
7674 * the actually delivery of this event.
7675 *
7676 * See Intel spec. 26.5 "Event Injection".
7677 */
7678 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
7679 bool const fEntryIntInfoValid = VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo);
7680
7681 CPUMSetGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx, !fEntryIntInfoValid);
7682 if (fEntryIntInfoValid)
7683 {
7684 if (VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo) != VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
7685 iemVmxVmentryInjectTrpmEvent(pVCpu, pszInstr, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
7686 pVCpu->cpum.GstCtx.cr2);
7687 else
7688 {
7689 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
7690 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
7691 }
7692
7693 /*
7694 * We need to clear the VM-entry interruption information field's valid bit on VM-exit.
7695 *
7696 * However, we do it here on VM-entry as well because while it isn't visible to guest
7697 * software until VM-exit, when and if HM looks at the VMCS to continue nested-guest
7698 * execution using hardware-assisted VMX, it will not try to inject the event again.
7699 *
7700 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7701 */
7702 pVmcs->u32EntryIntInfo &= ~VMX_ENTRY_INT_INFO_VALID;
7703 }
7704 else
7705 {
7706 /*
7707 * Inject any pending guest debug exception.
7708 * Unlike injecting events, this #DB injection on VM-entry is subject to #DB VMX intercept.
7709 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
7710 */
7711 bool const fPendingDbgXcpt = iemVmxVmentryIsPendingDebugXcpt(pVCpu, pszInstr);
7712 if (fPendingDbgXcpt)
7713 {
7714 uint32_t const uDbgXcptInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
7715 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
7716 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7717 iemVmxVmentryInjectTrpmEvent(pVCpu, pszInstr, uDbgXcptInfo, 0 /* uErrCode */, pVmcs->u32EntryInstrLen,
7718 0 /* GCPtrFaultAddress */);
7719 }
7720 }
7721
7722 NOREF(pszInstr);
7723}
7724
7725
7726/**
7727 * Initializes all read-only VMCS fields as part of VM-entry.
7728 *
7729 * @param pVCpu The cross context virtual CPU structure.
7730 */
7731static void iemVmxVmentryInitReadOnlyFields(PVMCPUCC pVCpu) RT_NOEXCEPT
7732{
7733 /*
7734 * Any VMCS field which we do not establish on every VM-exit but may potentially
7735 * be used on the VM-exit path of a nested hypervisor -and- is not explicitly
7736 * specified to be undefined, needs to be initialized here.
7737 *
7738 * Thus, it is especially important to clear the Exit qualification field
7739 * since it must be zero for VM-exits where it is not used. Similarly, the
7740 * VM-exit interruption information field's valid bit needs to be cleared for
7741 * the same reasons.
7742 */
7743 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7744 Assert(pVmcs);
7745
7746 /* 16-bit (none currently). */
7747 /* 32-bit. */
7748 pVmcs->u32RoVmInstrError = 0;
7749 pVmcs->u32RoExitReason = 0;
7750 pVmcs->u32RoExitIntInfo = 0;
7751 pVmcs->u32RoExitIntErrCode = 0;
7752 pVmcs->u32RoIdtVectoringInfo = 0;
7753 pVmcs->u32RoIdtVectoringErrCode = 0;
7754 pVmcs->u32RoExitInstrLen = 0;
7755 pVmcs->u32RoExitInstrInfo = 0;
7756
7757 /* 64-bit. */
7758 pVmcs->u64RoGuestPhysAddr.u = 0;
7759
7760 /* Natural-width. */
7761 pVmcs->u64RoExitQual.u = 0;
7762 pVmcs->u64RoIoRcx.u = 0;
7763 pVmcs->u64RoIoRsi.u = 0;
7764 pVmcs->u64RoIoRdi.u = 0;
7765 pVmcs->u64RoIoRip.u = 0;
7766 pVmcs->u64RoGuestLinearAddr.u = 0;
7767}
7768
7769
7770/**
7771 * VMLAUNCH/VMRESUME instruction execution worker.
7772 *
7773 * @returns Strict VBox status code.
7774 * @param pVCpu The cross context virtual CPU structure.
7775 * @param cbInstr The instruction length in bytes.
7776 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
7777 * VMXINSTRID_VMRESUME).
7778 *
7779 * @remarks Common VMX instruction checks are already expected to by the caller,
7780 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7781 */
7782static VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId) RT_NOEXCEPT
7783{
7784# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7785 RT_NOREF3(pVCpu, cbInstr, uInstrId);
7786 return VINF_EM_RAW_EMULATE_INSTR;
7787# else
7788 Assert( uInstrId == VMXINSTRID_VMLAUNCH
7789 || uInstrId == VMXINSTRID_VMRESUME);
7790 const char * const pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
7791
7792 /* Nested-guest intercept. */
7793 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7794 return iemVmxVmexitInstr(pVCpu, uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH, cbInstr);
7795
7796 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7797
7798 /*
7799 * Basic VM-entry checks.
7800 * The order of the CPL, current and shadow VMCS and block-by-MovSS are important.
7801 * The checks following that do not have to follow a specific order.
7802 *
7803 * See Intel spec. 26.1 "Basic VM-entry Checks".
7804 */
7805
7806 /* CPL. */
7807 if (IEM_GET_CPL(pVCpu) == 0)
7808 { /* likely */ }
7809 else
7810 {
7811 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, IEM_GET_CPL(pVCpu)));
7812 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
7813 return iemRaiseGeneralProtectionFault0(pVCpu);
7814 }
7815
7816 /* Current VMCS valid. */
7817 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7818 { /* likely */ }
7819 else
7820 {
7821 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7822 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
7823 iemVmxVmFailInvalid(pVCpu);
7824 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7825 }
7826
7827 /* Current VMCS is not a shadow VMCS. */
7828 PVMXVVMCS pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7829 if (!pVmcs->u32VmcsRevId.n.fIsShadowVmcs)
7830 { /* likely */ }
7831 else
7832 {
7833 Log(("%s: VMCS pointer %#RGp is a shadow VMCS -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7834 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrShadowVmcs;
7835 iemVmxVmFailInvalid(pVCpu);
7836 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7837 }
7838
7839 /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we
7840 * use block-by-STI here which is not quite correct. */
7841 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
7842 { /* likely */ }
7843 else
7844 {
7845 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
7846 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
7847 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
7848 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7849 }
7850
7851 if (uInstrId == VMXINSTRID_VMLAUNCH)
7852 {
7853 /* VMLAUNCH with non-clear VMCS. */
7854 if (pVmcs->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR)
7855 { /* likely */ }
7856 else if (pVmcs->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR_LEGACY)
7857 {
7858 /* Convert legacy launch-state value to current value, see @bugref{10318#c114} for reasons.*/
7859 pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
7860 Log(("vmlaunch: Updated legacy 'VMCLEAR' VMCS launch-state bit to current\n"));
7861 }
7862 else
7863 {
7864 Log(("vmlaunch: VMLAUNCH with non-clear VMCS %RGp -> VMFail\n", pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs));
7865 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
7866 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
7867 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7868 }
7869 }
7870 else
7871 {
7872 /* VMRESUME with non-launched VMCS. */
7873 if (pVmcs->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_LAUNCHED)
7874 { /* likely */ }
7875 else
7876 {
7877 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
7878 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
7879 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
7880 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7881 }
7882 }
7883
7884 /*
7885 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
7886 * while entering VMX non-root mode. We do some of this while checking VM-execution
7887 * controls. The nested hypervisor should not make assumptions and cannot expect
7888 * predictable behavior if changes to these structures are made in guest memory while
7889 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
7890 * modify them anyway as we cache them in host memory.
7891 *
7892 * See Intel spec. 24.11.4 "Software Access to Related Structures".
7893 */
7894 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7895
7896 int rc = iemVmxVmentryCheckCtls(pVCpu, pszInstr);
7897 if (RT_SUCCESS(rc))
7898 {
7899 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
7900 if (RT_SUCCESS(rc))
7901 {
7902 /*
7903 * Initialize read-only VMCS fields before VM-entry since we don't update all of them
7904 * for every VM-exit. This needs to be done before invoking a VM-exit (even those
7905 * ones that may occur during VM-entry below).
7906 */
7907 iemVmxVmentryInitReadOnlyFields(pVCpu);
7908
7909 /*
7910 * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
7911 * So we save the VMCPU_FF_BLOCK_NMI force-flag here so we can restore it on
7912 * VM-exit when required.
7913 * See Intel spec. 26.7 "VM-entry Failures During or After Loading Guest State"
7914 */
7915 iemVmxVmentrySaveNmiBlockingFF(pVCpu);
7916
7917 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
7918 if (RT_SUCCESS(rc))
7919 {
7920 /*
7921 * We've now entered nested-guest execution.
7922 *
7923 * It is important do this prior to loading the guest state because
7924 * as part of loading the guest state, PGM (and perhaps other components
7925 * in the future) relies on detecting whether VMX non-root mode has been
7926 * entered.
7927 */
7928 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
7929
7930 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
7931 if (RT_SUCCESS(rc))
7932 {
7933 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
7934 if (RT_SUCCESS(rc))
7935 {
7936 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
7937
7938 /* VMLAUNCH instruction must update the VMCS launch state. */
7939 if (uInstrId == VMXINSTRID_VMLAUNCH)
7940 pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;
7941
7942 /* Perform the VMX transition (PGM updates). */
7943 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, cbInstr);
7944 if (rcStrict == VINF_SUCCESS)
7945 { /* likely */ }
7946 else if (RT_SUCCESS(rcStrict))
7947 {
7948 Log3(("%s: iemVmxTransition returns %Rrc -> Setting passup status\n", pszInstr,
7949 VBOXSTRICTRC_VAL(rcStrict)));
7950 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7951 }
7952 else
7953 {
7954 Log3(("%s: iemVmxTransition failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
7955 return rcStrict;
7956 }
7957
7958 /* Paranoia. */
7959 Assert(rcStrict == VINF_SUCCESS);
7960
7961 /*
7962 * The priority of potential VM-exits during VM-entry is important.
7963 * The priorities of VM-exits and events are listed from highest
7964 * to lowest as follows:
7965 *
7966 * 1. Event injection.
7967 * 2. Trap on task-switch (T flag set in TSS).
7968 * 3. TPR below threshold / APIC-write.
7969 * 4. SMI, INIT.
7970 * 5. MTF exit.
7971 * 6. Debug-trap exceptions (EFLAGS.TF), pending debug exceptions.
7972 * 7. VMX-preemption timer.
7973 * 9. NMI-window exit.
7974 * 10. NMI injection.
7975 * 11. Interrupt-window exit.
7976 * 12. Virtual-interrupt injection.
7977 * 13. Interrupt injection.
7978 * 14. Process next instruction (fetch, decode, execute).
7979 */
7980
7981 /* Setup VMX-preemption timer. */
7982 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
7983
7984 /* Setup monitor-trap flag. */
7985 iemVmxVmentrySetupMtf(pVCpu, pszInstr);
7986
7987 /* Setup NMI-window exiting. */
7988 iemVmxVmentrySetupNmiWindow(pVCpu, pszInstr);
7989
7990 /* Setup interrupt-window exiting. */
7991 iemVmxVmentrySetupIntWindow(pVCpu, pszInstr);
7992
7993 /*
7994 * Inject any event that the nested hypervisor wants to inject.
7995 * Note! We cannot immediately perform the event injection here as we may have
7996 * pending PGM operations to perform due to switching page tables and/or
7997 * mode.
7998 */
7999 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
8000
8001# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
8002 /* Reschedule to IEM-only execution of the nested-guest. */
8003 LogFlow(("%s: Enabling IEM-only EM execution policy!\n", pszInstr));
8004 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
8005 if (rcSched != VINF_SUCCESS)
8006 iemSetPassUpStatus(pVCpu, rcSched);
8007# endif
8008
8009 /* Finally, done. */
8010 Log2(("vmentry: %s: cs:rip=%04x:%08RX64 cr0=%#RX64 (%#RX64) cr4=%#RX64 (%#RX64) efer=%#RX64 (%#RX64)\n",
8011 pszInstr, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0,
8012 pVmcs->u64Cr0ReadShadow.u, pVCpu->cpum.GstCtx.cr4, pVmcs->u64Cr4ReadShadow.u,
8013 pVCpu->cpum.GstCtx.msrEFER, pVmcs->u64GuestEferMsr.u));
8014 return VINF_SUCCESS;
8015 }
8016 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED, pVmcs->u64RoExitQual.u);
8017 }
8018 }
8019 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED, pVmcs->u64RoExitQual.u);
8020 }
8021
8022 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
8023 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8024 }
8025
8026 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
8027 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8028# endif
8029}
8030
8031
8032/**
8033 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
8034 *
8035 * @returns Strict VBox status code.
8036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8037 * @param cbInstr The instruction length in bytes.
8038 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
8039 * VMXINSTRID_VMRESUME).
8040 * @thread EMT(pVCpu)
8041 */
8042VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
8043{
8044 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
8045 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
8046
8047 iemInitExec(pVCpu, 0 /*fExecOpts*/);
8048 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
8049 Assert(!pVCpu->iem.s.cActiveMappings);
8050 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
8051}
8052
8053
8054/**
8055 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
8056 * (causes a VM-exit) or not.
8057 *
8058 * @returns @c true if the instruction is intercepted, @c false otherwise.
8059 * @param pVCpu The cross context virtual CPU structure.
8060 * @param uExitReason The VM-exit reason (VMX_EXIT_RDMSR or
8061 * VMX_EXIT_WRMSR).
8062 * @param idMsr The MSR.
8063 */
8064bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT
8065{
8066 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
8067 Assert( uExitReason == VMX_EXIT_RDMSR
8068 || uExitReason == VMX_EXIT_WRMSR);
8069
8070 /* Consult the MSR bitmap if the feature is supported. */
8071 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
8072 Assert(pVmcs);
8073 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8074 {
8075 uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, idMsr);
8076 if (uExitReason == VMX_EXIT_RDMSR)
8077 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_RD);
8078 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_WR);
8079 }
8080
8081 /* Without MSR bitmaps, all MSR accesses are intercepted. */
8082 return true;
8083}
8084
8085
8086/**
8087 * VMREAD instruction execution worker that does not perform any validation checks.
8088 *
8089 * Callers are expected to have performed the necessary checks and to ensure the
8090 * VMREAD will succeed.
8091 *
8092 * @param pVmcs Pointer to the virtual VMCS.
8093 * @param pu64Dst Where to write the VMCS value.
8094 * @param u64VmcsField The VMCS field.
8095 *
8096 * @remarks May be called with interrupts disabled.
8097 */
8098static void iemVmxVmreadNoCheck(PCVMXVVMCS pVmcs, uint64_t *pu64Dst, uint64_t u64VmcsField) RT_NOEXCEPT
8099{
8100 VMXVMCSFIELD VmcsField;
8101 VmcsField.u = u64VmcsField;
8102 uint8_t const uWidth = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
8103 uint8_t const uType = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
8104 uint8_t const uWidthType = (uWidth << 2) | uType;
8105 uint8_t const uIndex = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
8106 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
8107 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
8108 AssertMsg(offField < VMX_V_VMCS_SIZE, ("off=%u field=%#RX64 width=%#x type=%#x index=%#x (%u)\n", offField, u64VmcsField,
8109 uWidth, uType, uIndex, uIndex));
8110 AssertCompile(VMX_V_SHADOW_VMCS_SIZE == VMX_V_VMCS_SIZE);
8111
8112 /*
8113 * Read the VMCS component based on the field's effective width.
8114 *
8115 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
8116 * indicates high bits (little endian).
8117 *
8118 * Note! The caller is responsible to trim the result and update registers
8119 * or memory locations are required. Here we just zero-extend to the largest
8120 * type (i.e. 64-bits).
8121 */
8122 uint8_t const *pbVmcs = (uint8_t const *)pVmcs;
8123 uint8_t const *pbField = pbVmcs + offField;
8124 uint8_t const uEffWidth = VMXGetVmcsFieldWidthEff(VmcsField.u);
8125 switch (uEffWidth)
8126 {
8127 case VMX_VMCSFIELD_WIDTH_64BIT:
8128 case VMX_VMCSFIELD_WIDTH_NATURAL: *pu64Dst = *(uint64_t const *)pbField; break;
8129 case VMX_VMCSFIELD_WIDTH_32BIT: *pu64Dst = *(uint32_t const *)pbField; break;
8130 case VMX_VMCSFIELD_WIDTH_16BIT: *pu64Dst = *(uint16_t const *)pbField; break;
8131 }
8132}
8133
8134
8135/**
8136 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
8137 *
8138 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
8139 * are performed. Bounds checks are strict builds only.
8140 *
8141 * @param pVmcs Pointer to the virtual VMCS.
8142 * @param u64VmcsField The VMCS field.
8143 * @param pu64Dst Where to store the VMCS value.
8144 *
8145 * @remarks May be called with interrupts disabled.
8146 * @todo This should probably be moved to CPUM someday.
8147 */
8148VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
8149{
8150 AssertPtr(pVmcs);
8151 AssertPtr(pu64Dst);
8152 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
8153}
8154
8155
8156/**
8157 * VMREAD common (memory/register) instruction execution worker.
8158 *
8159 * @returns Strict VBox status code.
8160 * @param pVCpu The cross context virtual CPU structure.
8161 * @param cbInstr The instruction length in bytes.
8162 * @param pu64Dst Where to write the VMCS value (only updated when
8163 * VINF_SUCCESS is returned).
8164 * @param u64VmcsField The VMCS field.
8165 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
8166 * NULL.
8167 */
8168static VBOXSTRICTRC iemVmxVmreadCommon(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t *pu64Dst,
8169 uint64_t u64VmcsField, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8170{
8171 /* Nested-guest intercept. */
8172 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8173 && CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64VmcsField))
8174 {
8175 if (pExitInfo)
8176 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8177 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
8178 }
8179
8180 /* CPL. */
8181 if (IEM_GET_CPL(pVCpu) == 0)
8182 { /* likely */ }
8183 else
8184 {
8185 Log(("vmread: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
8186 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
8187 return iemRaiseGeneralProtectionFault0(pVCpu);
8188 }
8189
8190 pVCpu->iem.s.cPotentialExits++;
8191
8192 /* VMCS pointer in root mode. */
8193 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu)
8194 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
8195 { /* likely */ }
8196 else
8197 {
8198 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
8199 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
8200 iemVmxVmFailInvalid(pVCpu);
8201 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8202 }
8203
8204 /* VMCS-link pointer in non-root mode. */
8205 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8206 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
8207 { /* likely */ }
8208 else
8209 {
8210 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
8211 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
8212 iemVmxVmFailInvalid(pVCpu);
8213 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8214 }
8215
8216 /* Supported VMCS field. */
8217 if (CPUMIsGuestVmxVmcsFieldValid(pVCpu->CTX_SUFF(pVM), u64VmcsField))
8218 { /* likely */ }
8219 else
8220 {
8221 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64VmcsField));
8222 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
8223 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64VmcsField;
8224 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
8225 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8226 }
8227
8228 /*
8229 * Reading from the current or shadow VMCS.
8230 */
8231 PCVMXVVMCS pVmcs = !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8232 ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs
8233 : &pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs;
8234 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
8235 Log4(("vmread %#RX64 => %#RX64\n", u64VmcsField, *pu64Dst));
8236 return VINF_SUCCESS;
8237}
8238
8239
8240/**
8241 * VMREAD (64-bit register) instruction execution worker.
8242 *
8243 * @returns Strict VBox status code.
8244 * @param pVCpu The cross context virtual CPU structure.
8245 * @param cbInstr The instruction length in bytes.
8246 * @param pu64Dst Where to store the VMCS field's value.
8247 * @param u64VmcsField The VMCS field.
8248 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
8249 * NULL.
8250 */
8251static VBOXSTRICTRC iemVmxVmreadReg64(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t *pu64Dst,
8252 uint64_t u64VmcsField, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8253{
8254 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64VmcsField, pExitInfo);
8255 if (rcStrict == VINF_SUCCESS)
8256 {
8257 iemVmxVmSucceed(pVCpu);
8258 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8259 }
8260
8261 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8262 return rcStrict;
8263}
8264
8265
8266/**
8267 * VMREAD (32-bit register) instruction execution worker.
8268 *
8269 * @returns Strict VBox status code.
8270 * @param pVCpu The cross context virtual CPU structure.
8271 * @param cbInstr The instruction length in bytes.
8272 * @param pu32Dst Where to store the VMCS field's value.
8273 * @param u32VmcsField The VMCS field.
8274 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
8275 * NULL.
8276 */
8277static VBOXSTRICTRC iemVmxVmreadReg32(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t *pu32Dst,
8278 uint64_t u32VmcsField, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8279{
8280 uint64_t u64Dst;
8281 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32VmcsField, pExitInfo);
8282 if (rcStrict == VINF_SUCCESS)
8283 {
8284 *pu32Dst = u64Dst;
8285 iemVmxVmSucceed(pVCpu);
8286 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8287 }
8288
8289 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8290 return rcStrict;
8291}
8292
8293
8294/**
8295 * VMREAD (memory) instruction execution worker.
8296 *
8297 * @returns Strict VBox status code.
8298 * @param pVCpu The cross context virtual CPU structure.
8299 * @param cbInstr The instruction length in bytes.
8300 * @param iEffSeg The effective segment register to use with @a u64Val.
8301 * Pass UINT8_MAX if it is a register access.
8302 * @param GCPtrDst The guest linear address to store the VMCS field's
8303 * value.
8304 * @param u64VmcsField The VMCS field.
8305 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
8306 * NULL.
8307 */
8308static VBOXSTRICTRC iemVmxVmreadMem(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDst,
8309 uint64_t u64VmcsField, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8310{
8311 uint64_t u64Dst;
8312 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64VmcsField, pExitInfo);
8313 if (rcStrict == VINF_SUCCESS)
8314 {
8315 /*
8316 * Write the VMCS field's value to the location specified in guest-memory.
8317 */
8318 if (IEM_IS_64BIT_CODE(pVCpu))
8319 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
8320 else
8321 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
8322 if (rcStrict == VINF_SUCCESS)
8323 {
8324 iemVmxVmSucceed(pVCpu);
8325 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8326 }
8327
8328 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
8329 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
8330 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrDst;
8331 return rcStrict;
8332 }
8333
8334 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8335 return rcStrict;
8336}
8337
8338
8339/**
8340 * Interface for HM and EM to emulate the VMREAD instruction.
8341 *
8342 * @returns Strict VBox status code.
8343 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8344 * @param pExitInfo Pointer to the VM-exit information.
8345 * @thread EMT(pVCpu)
8346 */
8347VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
8348{
8349 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
8350 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
8351 Assert(pExitInfo);
8352
8353 iemInitExec(pVCpu, 0 /*fExecOpts*/);
8354
8355 VBOXSTRICTRC rcStrict;
8356 uint8_t const cbInstr = pExitInfo->cbInstr;
8357 bool const fIs64BitMode = RT_BOOL(IEM_IS_64BIT_CODE(pVCpu));
8358 uint64_t const u64FieldEnc = fIs64BitMode
8359 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
8360 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
8361 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
8362 {
8363 if (fIs64BitMode)
8364 {
8365 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
8366 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
8367 }
8368 else
8369 {
8370 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
8371 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
8372 }
8373 }
8374 else
8375 {
8376 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
8377 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
8378 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
8379 }
8380 Assert(!pVCpu->iem.s.cActiveMappings);
8381 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
8382}
8383
8384
8385/**
8386 * VMWRITE instruction execution worker that does not perform any validation
8387 * checks.
8388 *
8389 * Callers are expected to have performed the necessary checks and to ensure the
8390 * VMWRITE will succeed.
8391 *
8392 * @param pVmcs Pointer to the virtual VMCS.
8393 * @param u64Val The value to write.
8394 * @param u64VmcsField The VMCS field.
8395 *
8396 * @remarks May be called with interrupts disabled.
8397 */
8398static void iemVmxVmwriteNoCheck(PVMXVVMCS pVmcs, uint64_t u64Val, uint64_t u64VmcsField) RT_NOEXCEPT
8399{
8400 VMXVMCSFIELD VmcsField;
8401 VmcsField.u = u64VmcsField;
8402 uint8_t const uWidth = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
8403 uint8_t const uType = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
8404 uint8_t const uWidthType = (uWidth << 2) | uType;
8405 uint8_t const uIndex = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
8406 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
8407 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
8408 Assert(offField < VMX_V_VMCS_SIZE);
8409 AssertCompile(VMX_V_SHADOW_VMCS_SIZE == VMX_V_VMCS_SIZE);
8410
8411 /*
8412 * Write the VMCS component based on the field's effective width.
8413 *
8414 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
8415 * indicates high bits (little endian).
8416 */
8417 uint8_t *pbVmcs = (uint8_t *)pVmcs;
8418 uint8_t *pbField = pbVmcs + offField;
8419 uint8_t const uEffWidth = VMXGetVmcsFieldWidthEff(VmcsField.u);
8420 switch (uEffWidth)
8421 {
8422 case VMX_VMCSFIELD_WIDTH_64BIT:
8423 case VMX_VMCSFIELD_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
8424 case VMX_VMCSFIELD_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
8425 case VMX_VMCSFIELD_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
8426 }
8427}
8428
8429
8430/**
8431 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
8432 *
8433 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
8434 * are performed. Bounds checks are strict builds only.
8435 *
8436 * @param pVmcs Pointer to the virtual VMCS.
8437 * @param u64VmcsField The VMCS field.
8438 * @param u64Val The value to write.
8439 *
8440 * @remarks May be called with interrupts disabled.
8441 * @todo This should probably be moved to CPUM someday.
8442 */
8443VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
8444{
8445 AssertPtr(pVmcs);
8446 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
8447}
8448
8449
8450/**
8451 * VMWRITE instruction execution worker.
8452 *
8453 * @returns Strict VBox status code.
8454 * @param pVCpu The cross context virtual CPU structure.
8455 * @param cbInstr The instruction length in bytes.
8456 * @param iEffSeg The effective segment register to use with @a u64Val.
8457 * Pass UINT8_MAX if it is a register access.
8458 * @param u64Val The value to write (or guest linear address to the
8459 * value), @a iEffSeg will indicate if it's a memory
8460 * operand.
8461 * @param u64VmcsField The VMCS field.
8462 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
8463 * NULL.
8464 */
8465static VBOXSTRICTRC iemVmxVmwrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, uint64_t u64Val,
8466 uint64_t u64VmcsField, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8467{
8468 /* Nested-guest intercept. */
8469 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8470 && CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64VmcsField))
8471 {
8472 if (pExitInfo)
8473 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8474 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
8475 }
8476
8477 /* CPL. */
8478 if (IEM_GET_CPL(pVCpu) == 0)
8479 { /* likely */ }
8480 else
8481 {
8482 Log(("vmwrite: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
8483 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
8484 return iemRaiseGeneralProtectionFault0(pVCpu);
8485 }
8486
8487 pVCpu->iem.s.cPotentialExits++;
8488
8489 /* VMCS pointer in root mode. */
8490 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu)
8491 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
8492 { /* likely */ }
8493 else
8494 {
8495 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
8496 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
8497 iemVmxVmFailInvalid(pVCpu);
8498 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8499 }
8500
8501 /* VMCS-link pointer in non-root mode. */
8502 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8503 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
8504 { /* likely */ }
8505 else
8506 {
8507 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
8508 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
8509 iemVmxVmFailInvalid(pVCpu);
8510 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8511 }
8512
8513 /* If the VMWRITE instruction references memory, access the specified memory operand. */
8514 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
8515 if (!fIsRegOperand)
8516 {
8517 /* Read the value from the specified guest memory location. */
8518 VBOXSTRICTRC rcStrict;
8519 RTGCPTR const GCPtrVal = u64Val;
8520 if (IEM_IS_64BIT_CODE(pVCpu))
8521 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
8522 else
8523 rcStrict = iemMemFetchDataU32_ZX_U64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
8524 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
8525 {
8526 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
8527 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
8528 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVal;
8529 return rcStrict;
8530 }
8531 }
8532 else
8533 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
8534
8535 /* Supported VMCS field. */
8536 if (CPUMIsGuestVmxVmcsFieldValid(pVCpu->CTX_SUFF(pVM), u64VmcsField))
8537 { /* likely */ }
8538 else
8539 {
8540 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64VmcsField));
8541 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
8542 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64VmcsField;
8543 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
8544 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8545 }
8546
8547 /* Read-only VMCS field. */
8548 bool const fIsFieldReadOnly = VMXIsVmcsFieldReadOnly(u64VmcsField);
8549 if ( !fIsFieldReadOnly
8550 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
8551 { /* likely */ }
8552 else
8553 {
8554 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64VmcsField));
8555 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
8556 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64VmcsField;
8557 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
8558 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8559 }
8560
8561 /*
8562 * Write to the current or shadow VMCS.
8563 */
8564 bool const fInVmxNonRootMode = IEM_VMX_IS_NON_ROOT_MODE(pVCpu);
8565 PVMXVVMCS pVmcs = !fInVmxNonRootMode
8566 ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs
8567 : &pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs;
8568 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
8569 Log4(("vmwrite %#RX64 <= %#RX64\n", u64VmcsField, u64Val));
8570
8571 if ( !fInVmxNonRootMode
8572 && VM_IS_HM_ENABLED(pVCpu->CTX_SUFF(pVM)))
8573 {
8574 /* Notify HM that the VMCS content might have changed. */
8575 HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
8576 }
8577
8578 iemVmxVmSucceed(pVCpu);
8579 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8580}
8581
8582
8583/**
8584 * Interface for HM and EM to emulate the VMWRITE instruction.
8585 *
8586 * @returns Strict VBox status code.
8587 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8588 * @param pExitInfo Pointer to the VM-exit information.
8589 * @thread EMT(pVCpu)
8590 */
8591VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
8592{
8593 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
8594 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
8595 Assert(pExitInfo);
8596
8597 iemInitExec(pVCpu, 0 /*fExecOpts*/);
8598
8599 uint64_t u64Val;
8600 uint8_t iEffSeg;
8601 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
8602 {
8603 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
8604 iEffSeg = UINT8_MAX;
8605 }
8606 else
8607 {
8608 u64Val = pExitInfo->GCPtrEffAddr;
8609 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
8610 }
8611 uint8_t const cbInstr = pExitInfo->cbInstr;
8612 uint64_t const u64FieldEnc = IEM_IS_64BIT_CODE(pVCpu)
8613 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
8614 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
8615 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
8616 Assert(!pVCpu->iem.s.cActiveMappings);
8617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
8618}
8619
8620
8621/**
8622 * VMCLEAR instruction execution worker.
8623 *
8624 * @returns Strict VBox status code.
8625 * @param pVCpu The cross context virtual CPU structure.
8626 * @param cbInstr The instruction length in bytes.
8627 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
8628 * @param GCPtrVmcs The linear address of the VMCS pointer.
8629 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8630 *
8631 * @remarks Common VMX instruction checks are already expected to by the caller,
8632 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8633 */
8634static VBOXSTRICTRC iemVmxVmclear(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg,
8635 RTGCPHYS GCPtrVmcs, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8636{
8637 /* Nested-guest intercept. */
8638 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8639 {
8640 if (pExitInfo)
8641 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8642 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
8643 }
8644
8645 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8646
8647 /* CPL. */
8648 if (IEM_GET_CPL(pVCpu) == 0)
8649 { /* likely */ }
8650 else
8651 {
8652 Log(("vmclear: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
8653 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
8654 return iemRaiseGeneralProtectionFault0(pVCpu);
8655 }
8656
8657 /* Get the VMCS pointer from the location specified by the source memory operand. */
8658 RTGCPHYS GCPhysVmcs;
8659 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
8660 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8661 { /* likely */ }
8662 else
8663 {
8664 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
8665 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
8666 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmcs;
8667 return rcStrict;
8668 }
8669
8670 /* VMCS pointer alignment. */
8671 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
8672 { /* likely */ }
8673 else
8674 {
8675 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
8676 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
8677 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8678 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
8679 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8680 }
8681
8682 /* VMCS physical-address width limits. */
8683 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
8684 { /* likely */ }
8685 else
8686 {
8687 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
8688 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
8689 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8690 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
8691 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8692 }
8693
8694 /* VMCS is not the VMXON region. */
8695 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
8696 { /* likely */ }
8697 else
8698 {
8699 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
8700 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
8701 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8702 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
8703 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8704 }
8705
8706 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
8707 restriction imposed by our implementation. */
8708 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
8709 { /* likely */ }
8710 else
8711 {
8712 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
8713 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
8714 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8715 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
8716 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8717 }
8718
8719 /*
8720 * VMCLEAR allows committing and clearing any valid VMCS pointer.
8721 *
8722 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
8723 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
8724 * to 'clear'.
8725 */
8726 uint8_t const fVmcsLaunchStateClear = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
8727 if ( IEM_VMX_HAS_CURRENT_VMCS(pVCpu)
8728 && IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
8729 {
8730 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState = fVmcsLaunchStateClear;
8731 iemVmxWriteCurrentVmcsToGstMem(pVCpu);
8732 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8733 }
8734 else
8735 {
8736 AssertCompileMemberSize(VMXVVMCS, fVmcsState, sizeof(fVmcsLaunchStateClear));
8737 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
8738 (const void *)&fVmcsLaunchStateClear, sizeof(fVmcsLaunchStateClear));
8739 if (RT_FAILURE(rcStrict))
8740 return rcStrict;
8741 }
8742
8743 iemVmxVmSucceed(pVCpu);
8744 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8745}
8746
8747
8748/**
8749 * Interface for HM and EM to emulate the VMCLEAR instruction.
8750 *
8751 * @returns Strict VBox status code.
8752 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8753 * @param pExitInfo Pointer to the VM-exit information.
8754 * @thread EMT(pVCpu)
8755 */
8756VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
8757{
8758 Assert(pExitInfo);
8759 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
8760 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
8761
8762 iemInitExec(pVCpu, 0 /*fExecOpts*/);
8763
8764 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
8765 uint8_t const cbInstr = pExitInfo->cbInstr;
8766 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
8767 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
8768 Assert(!pVCpu->iem.s.cActiveMappings);
8769 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
8770}
8771
8772
8773/**
8774 * VMPTRST instruction execution worker.
8775 *
8776 * @returns Strict VBox status code.
8777 * @param pVCpu The cross context virtual CPU structure.
8778 * @param cbInstr The instruction length in bytes.
8779 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
8780 * @param GCPtrVmcs The linear address of where to store the current VMCS
8781 * pointer.
8782 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8783 *
8784 * @remarks Common VMX instruction checks are already expected to by the caller,
8785 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8786 */
8787static VBOXSTRICTRC iemVmxVmptrst(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg,
8788 RTGCPHYS GCPtrVmcs, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8789{
8790 /* Nested-guest intercept. */
8791 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8792 {
8793 if (pExitInfo)
8794 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8795 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
8796 }
8797
8798 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8799
8800 /* CPL. */
8801 if (IEM_GET_CPL(pVCpu) == 0)
8802 { /* likely */ }
8803 else
8804 {
8805 Log(("vmptrst: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
8806 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
8807 return iemRaiseGeneralProtectionFault0(pVCpu);
8808 }
8809
8810 /* Set the VMCS pointer to the location specified by the destination memory operand. */
8811 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
8812 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
8813 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8814 {
8815 iemVmxVmSucceed(pVCpu);
8816 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8817 }
8818
8819 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8820 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
8821 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmcs;
8822 return rcStrict;
8823}
8824
8825
8826/**
8827 * Interface for HM and EM to emulate the VMPTRST instruction.
8828 *
8829 * @returns Strict VBox status code.
8830 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8831 * @param pExitInfo Pointer to the VM-exit information.
8832 * @thread EMT(pVCpu)
8833 */
8834VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
8835{
8836 Assert(pExitInfo);
8837 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
8838 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
8839
8840 iemInitExec(pVCpu, 0 /*fExecOpts*/);
8841
8842 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
8843 uint8_t const cbInstr = pExitInfo->cbInstr;
8844 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
8845 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
8846 Assert(!pVCpu->iem.s.cActiveMappings);
8847 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
8848}
8849
8850
8851/**
8852 * VMPTRLD instruction execution worker.
8853 *
8854 * @returns Strict VBox status code.
8855 * @param pVCpu The cross context virtual CPU structure.
8856 * @param cbInstr The instruction length in bytes.
8857 * @param GCPtrVmcs The linear address of the current VMCS pointer.
8858 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8859 *
8860 * @remarks Common VMX instruction checks are already expected to by the caller,
8861 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8862 */
8863static VBOXSTRICTRC iemVmxVmptrld(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg,
8864 RTGCPHYS GCPtrVmcs, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
8865{
8866 /* Nested-guest intercept. */
8867 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8868 {
8869 if (pExitInfo)
8870 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8871 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
8872 }
8873
8874 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8875
8876 /* CPL. */
8877 if (IEM_GET_CPL(pVCpu) == 0)
8878 { /* likely */ }
8879 else
8880 {
8881 Log(("vmptrld: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
8882 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
8883 return iemRaiseGeneralProtectionFault0(pVCpu);
8884 }
8885
8886 /* Get the VMCS pointer from the location specified by the source memory operand. */
8887 RTGCPHYS GCPhysVmcs;
8888 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
8889 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8890 { /* likely */ }
8891 else
8892 {
8893 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
8894 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
8895 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmcs;
8896 return rcStrict;
8897 }
8898
8899 /* VMCS pointer alignment. */
8900 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
8901 { /* likely */ }
8902 else
8903 {
8904 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
8905 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
8906 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8907 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8908 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8909 }
8910
8911 /* VMCS physical-address width limits. */
8912 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
8913 { /* likely */ }
8914 else
8915 {
8916 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
8917 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
8918 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8919 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8920 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8921 }
8922
8923 /* VMCS is not the VMXON region. */
8924 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
8925 { /* likely */ }
8926 else
8927 {
8928 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
8929 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
8930 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8931 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
8932 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8933 }
8934
8935 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
8936 restriction imposed by our implementation. */
8937 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
8938 { /* likely */ }
8939 else
8940 {
8941 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
8942 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
8943 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8944 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8945 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8946 }
8947
8948 /* Read just the VMCS revision from the VMCS. */
8949 VMXVMCSREVID VmcsRevId;
8950 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
8951 if (RT_SUCCESS(rc))
8952 { /* likely */ }
8953 else
8954 {
8955 Log(("vmptrld: Failed to read revision identifier from VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
8956 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_RevPtrReadPhys;
8957 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8958 return rc;
8959 }
8960
8961 /*
8962 * Verify the VMCS revision specified by the guest matches what we reported to the guest.
8963 * Verify the VMCS is not a shadow VMCS, if the VMCS shadowing feature is supported.
8964 */
8965 if ( VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID
8966 && ( !VmcsRevId.n.fIsShadowVmcs
8967 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
8968 { /* likely */ }
8969 else
8970 {
8971 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
8972 {
8973 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32, GCPtrVmcs=%#RGv GCPhysVmcs=%#RGp -> VMFail()\n",
8974 VMX_V_VMCS_REVISION_ID, VmcsRevId.n.u31RevisionId, GCPtrVmcs, GCPhysVmcs));
8975 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
8976 }
8977 else
8978 {
8979 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
8980 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
8981 }
8982 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
8983 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8984 }
8985
8986 /*
8987 * We cache only the current VMCS in CPUMCTX. Therefore, VMPTRLD should always flush
8988 * the cache of an existing, current VMCS back to guest memory before loading a new,
8989 * different current VMCS.
8990 */
8991 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
8992 {
8993 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
8994 {
8995 iemVmxWriteCurrentVmcsToGstMem(pVCpu);
8996 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8997 }
8998
8999 /* Set the new VMCS as the current VMCS and read it from guest memory. */
9000 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
9001 rc = iemVmxReadCurrentVmcsFromGstMem(pVCpu);
9002 if (RT_SUCCESS(rc))
9003 {
9004 /* Notify HM that a new, current VMCS is loaded. */
9005 if (VM_IS_HM_ENABLED(pVCpu->CTX_SUFF(pVM)))
9006 HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
9007
9008 /* Convert legacy launch-state value to current value, see @bugref{10318#c114} for reasons.*/
9009 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR_LEGACY)
9010 {
9011 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
9012 Log(("vmptrld: Updated legacy 'VMCLEAR' VMCS launch-state bit to current\n"));
9013 }
9014 }
9015 else
9016 {
9017 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
9018 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
9019 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
9020 return rc;
9021 }
9022 }
9023 else if ( IEM_VMX_HAS_CURRENT_VMCS(pVCpu)
9024 && pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR_LEGACY)
9025 {
9026 /* Convert legacy launch-state value to current value, see @bugref{10318#c114} for reasons.*/
9027 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
9028 Log(("vmptrld: Updated legacy VMCLEAR launch-state bit to current\n"));
9029 }
9030
9031 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
9032 iemVmxVmSucceed(pVCpu);
9033 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9034}
9035
9036
9037/**
9038 * Interface for HM and EM to emulate the VMPTRLD instruction.
9039 *
9040 * @returns Strict VBox status code.
9041 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9042 * @param pExitInfo Pointer to the VM-exit information.
9043 * @thread EMT(pVCpu)
9044 */
9045VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
9046{
9047 Assert(pExitInfo);
9048 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
9049 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
9050
9051 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9052
9053 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
9054 uint8_t const cbInstr = pExitInfo->cbInstr;
9055 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
9056 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
9057 Assert(!pVCpu->iem.s.cActiveMappings);
9058 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9059}
9060
9061
9062/**
9063 * INVVPID instruction execution worker.
9064 *
9065 * @returns Strict VBox status code.
9066 * @param pVCpu The cross context virtual CPU structure.
9067 * @param cbInstr The instruction length in bytes.
9068 * @param iEffSeg The segment of the invvpid descriptor.
9069 * @param GCPtrInvvpidDesc The address of invvpid descriptor.
9070 * @param u64InvvpidType The invalidation type.
9071 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
9072 * NULL.
9073 *
9074 * @remarks Common VMX instruction checks are already expected to by the caller,
9075 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
9076 */
9077VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
9078 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
9079{
9080 /* Check if INVVPID instruction is supported, otherwise raise #UD. */
9081 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVpid)
9082 return iemRaiseUndefinedOpcode(pVCpu);
9083
9084 /* Nested-guest intercept. */
9085 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
9086 {
9087 if (pExitInfo)
9088 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
9089 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVVPID, VMXINSTRID_NONE, cbInstr);
9090 }
9091
9092 /* CPL. */
9093 if (IEM_GET_CPL(pVCpu) != 0)
9094 {
9095 Log(("invvpid: CPL != 0 -> #GP(0)\n"));
9096 return iemRaiseGeneralProtectionFault0(pVCpu);
9097 }
9098
9099 /*
9100 * Validate INVVPID invalidation type.
9101 *
9102 * The instruction specifies exactly ONE of the supported invalidation types.
9103 *
9104 * Each of the types has a bit in IA32_VMX_EPT_VPID_CAP MSR specifying if it is
9105 * supported. In theory, it's possible for a CPU to not support flushing individual
9106 * addresses but all the other types or any other combination. We do not take any
9107 * shortcuts here by assuming the types we currently expose to the guest.
9108 */
9109 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
9110 bool const fInvvpidSupported = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID);
9111 bool const fTypeIndivAddr = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
9112 bool const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX);
9113 bool const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
9114 bool const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
9115
9116 bool afSupportedTypes[4];
9117 afSupportedTypes[0] = fTypeIndivAddr;
9118 afSupportedTypes[1] = fTypeSingleCtx;
9119 afSupportedTypes[2] = fTypeAllCtx;
9120 afSupportedTypes[3] = fTypeSingleCtxRetainGlobals;
9121
9122 if ( fInvvpidSupported
9123 && !(u64InvvpidType & ~(uint64_t)VMX_INVVPID_VALID_MASK)
9124 && afSupportedTypes[u64InvvpidType & 3])
9125 { /* likely */ }
9126 else
9127 {
9128 Log(("invvpid: invalid/unsupported invvpid type %#x -> VMFail\n", u64InvvpidType));
9129 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_TypeInvalid;
9130 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InvvpidType;
9131 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9132 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9133 }
9134
9135 /*
9136 * Fetch the invvpid descriptor from guest memory.
9137 */
9138 RTUINT128U uDesc;
9139 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvvpidDesc);
9140 if (rcStrict == VINF_SUCCESS)
9141 {
9142 /*
9143 * Validate the descriptor.
9144 */
9145 if (uDesc.s.Lo <= 0xffff)
9146 { /* likely */ }
9147 else
9148 {
9149 Log(("invvpid: reserved bits set in invvpid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
9150 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_DescRsvd;
9151 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = uDesc.s.Lo;
9152 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9153 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9154 }
9155
9156 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
9157 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
9158 uint16_t const uVpid = uDesc.Words.w0;
9159 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
9160 switch (u64InvvpidType)
9161 {
9162 case VMXTLBFLUSHVPID_INDIV_ADDR:
9163 {
9164 if (uVpid != 0)
9165 {
9166 if (IEM_IS_CANONICAL(GCPtrInvAddr))
9167 {
9168 /* Invalidate mappings for the linear address tagged with VPID. */
9169 /** @todo PGM support for VPID? Currently just flush everything. */
9170 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
9171 iemVmxVmSucceed(pVCpu);
9172 }
9173 else
9174 {
9175 Log(("invvpid: invalidation address %#RGP is not canonical -> VMFail\n", GCPtrInvAddr));
9176 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidAddr;
9177 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrInvAddr;
9178 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9179 }
9180 }
9181 else
9182 {
9183 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
9184 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidVpid;
9185 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InvvpidType;
9186 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9187 }
9188 break;
9189 }
9190
9191 case VMXTLBFLUSHVPID_SINGLE_CONTEXT:
9192 {
9193 if (uVpid != 0)
9194 {
9195 /* Invalidate all mappings with VPID. */
9196 /** @todo PGM support for VPID? Currently just flush everything. */
9197 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
9198 iemVmxVmSucceed(pVCpu);
9199 }
9200 else
9201 {
9202 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
9203 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type1InvalidVpid;
9204 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InvvpidType;
9205 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9206 }
9207 break;
9208 }
9209
9210 case VMXTLBFLUSHVPID_ALL_CONTEXTS:
9211 {
9212 /* Invalidate all mappings with non-zero VPIDs. */
9213 /** @todo PGM support for VPID? Currently just flush everything. */
9214 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
9215 iemVmxVmSucceed(pVCpu);
9216 break;
9217 }
9218
9219 case VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS:
9220 {
9221 if (uVpid != 0)
9222 {
9223 /* Invalidate all mappings with VPID except global translations. */
9224 /** @todo PGM support for VPID? Currently just flush everything. */
9225 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
9226 iemVmxVmSucceed(pVCpu);
9227 }
9228 else
9229 {
9230 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
9231 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type3InvalidVpid;
9232 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = uVpid;
9233 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9234 }
9235 break;
9236 }
9237 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9238 }
9239 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9240 }
9241 return rcStrict;
9242}
9243
9244
9245/**
9246 * Interface for HM and EM to emulate the INVVPID instruction.
9247 *
9248 * @returns Strict VBox status code.
9249 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9250 * @param pExitInfo Pointer to the VM-exit information.
9251 * @thread EMT(pVCpu)
9252 */
9253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
9254{
9255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
9256 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
9257 Assert(pExitInfo);
9258
9259 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9260
9261 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
9262 uint8_t const cbInstr = pExitInfo->cbInstr;
9263 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
9264 uint64_t const u64InvvpidType = IEM_IS_64BIT_CODE(pVCpu)
9265 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
9266 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
9267 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
9268 Assert(!pVCpu->iem.s.cActiveMappings);
9269 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9270}
9271
9272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9273
9274/**
9275 * INVEPT instruction execution worker.
9276 *
9277 * @returns Strict VBox status code.
9278 * @param pVCpu The cross context virtual CPU structure.
9279 * @param cbInstr The instruction length in bytes.
9280 * @param iEffSeg The segment of the invept descriptor.
9281 * @param GCPtrInveptDesc The address of invept descriptor.
9282 * @param u64InveptType The invalidation type.
9283 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
9284 * NULL.
9285 *
9286 * @remarks Common VMX instruction checks are already expected to by the caller,
9287 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
9288 */
9289static VBOXSTRICTRC iemVmxInvept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInveptDesc,
9290 uint64_t u64InveptType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
9291{
9292 /* Check if EPT is supported, otherwise raise #UD. */
9293 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEpt)
9294 return iemRaiseUndefinedOpcode(pVCpu);
9295
9296 /* Nested-guest intercept. */
9297 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
9298 {
9299 if (pExitInfo)
9300 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
9301 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVEPT, VMXINSTRID_NONE, cbInstr);
9302 }
9303
9304 /* CPL. */
9305 if (IEM_GET_CPL(pVCpu) != 0)
9306 {
9307 Log(("invept: CPL != 0 -> #GP(0)\n"));
9308 return iemRaiseGeneralProtectionFault0(pVCpu);
9309 }
9310
9311 /*
9312 * Validate INVEPT invalidation type.
9313 *
9314 * The instruction specifies exactly ONE of the supported invalidation types.
9315 *
9316 * Each of the types has a bit in IA32_VMX_EPT_VPID_CAP MSR specifying if it is
9317 * supported. In theory, it's possible for a CPU to not support flushing individual
9318 * addresses but all the other types or any other combination. We do not take any
9319 * shortcuts here by assuming the types we currently expose to the guest.
9320 */
9321 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
9322 bool const fInveptSupported = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT);
9323 bool const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT_SINGLE_CTX);
9324 bool const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVEPT_ALL_CTX);
9325
9326 bool afSupportedTypes[4];
9327 afSupportedTypes[0] = false;
9328 afSupportedTypes[1] = fTypeSingleCtx;
9329 afSupportedTypes[2] = fTypeAllCtx;
9330 afSupportedTypes[3] = false;
9331
9332 if ( fInveptSupported
9333 && !(u64InveptType & ~(uint64_t)VMX_INVEPT_VALID_MASK)
9334 && afSupportedTypes[u64InveptType & 3])
9335 { /* likely */ }
9336 else
9337 {
9338 Log(("invept: invalid/unsupported invvpid type %#x -> VMFail\n", u64InveptType));
9339 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invept_TypeInvalid;
9340 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InveptType;
9341 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9342 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9343 }
9344
9345 /*
9346 * Fetch the invept descriptor from guest memory.
9347 */
9348 RTUINT128U uDesc;
9349 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInveptDesc);
9350 if (rcStrict == VINF_SUCCESS)
9351 {
9352 /*
9353 * Validate the descriptor.
9354 *
9355 * The Intel spec. does not explicit say the INVEPT instruction fails when reserved
9356 * bits in the descriptor are set, but it -does- for INVVPID. Until we test on real
9357 * hardware, it's assumed INVEPT behaves the same as INVVPID in this regard. It's
9358 * better to be strict in our emulation until proven otherwise.
9359 *
9360 * UPDATE: Hyper-V enabled Windows 10 Pro guests do NOT clear the reserved bits in
9361 * the descriptor. Hence, I've disabled this check for now, see @bugref{10318#c122}.
9362 */
9363#if 0
9364 if (uDesc.s.Hi)
9365 {
9366 Log(("invept: reserved bits set in invept descriptor %#RX64 -> VMFail\n", uDesc.s.Hi));
9367 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invept_DescRsvd;
9368 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = uDesc.s.Hi;
9369 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9370 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9371 }
9372#endif
9373
9374 /*
9375 * Flush TLB mappings based on the EPT type.
9376 */
9377 if (u64InveptType == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
9378 {
9379 uint64_t const GCPhysEptPtr = uDesc.s.Lo;
9380 int const rc = iemVmxVmentryCheckEptPtr(pVCpu, GCPhysEptPtr, NULL /* enmDiag */);
9381 if (RT_SUCCESS(rc))
9382 { /* likely */ }
9383 else
9384 {
9385 Log(("invept: EPTP invalid %#RX64 -> VMFail\n", GCPhysEptPtr));
9386 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invept_EptpInvalid;
9387 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysEptPtr;
9388 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
9389 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9390 }
9391 }
9392
9393 /** @todo PGM support for EPT tags? Currently just flush everything. */
9394 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
9395 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
9396 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
9397
9398 iemVmxVmSucceed(pVCpu);
9399 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9400 }
9401
9402 return rcStrict;
9403}
9404
9405
9406/**
9407 * Interface for HM and EM to emulate the INVEPT instruction.
9408 *
9409 * @returns Strict VBox status code.
9410 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9411 * @param pExitInfo Pointer to the VM-exit information.
9412 * @thread EMT(pVCpu)
9413 */
9414VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
9415{
9416 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
9417 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
9418 Assert(pExitInfo);
9419
9420 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9421
9422 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
9423 uint8_t const cbInstr = pExitInfo->cbInstr;
9424 RTGCPTR const GCPtrInveptDesc = pExitInfo->GCPtrEffAddr;
9425 uint64_t const u64InveptType = IEM_IS_64BIT_CODE(pVCpu)
9426 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
9427 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
9428 VBOXSTRICTRC rcStrict = iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, u64InveptType, pExitInfo);
9429 Assert(!pVCpu->iem.s.cActiveMappings);
9430 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9431}
9432
9433#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9434
9435/**
9436 * VMXON instruction execution worker.
9437 *
9438 * @returns Strict VBox status code.
9439 * @param pVCpu The cross context virtual CPU structure.
9440 * @param cbInstr The instruction length in bytes.
9441 * @param iEffSeg The effective segment register to use with @a
9442 * GCPtrVmxon.
9443 * @param GCPtrVmxon The linear address of the VMXON pointer.
9444 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
9445 *
9446 * @remarks Common VMX instruction checks are already expected to by the caller,
9447 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
9448 */
9449static VBOXSTRICTRC iemVmxVmxon(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg,
9450 RTGCPHYS GCPtrVmxon, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT
9451{
9452 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
9453 {
9454 /* CPL. */
9455 if (IEM_GET_CPL(pVCpu) == 0)
9456 { /* likely */ }
9457 else
9458 {
9459 Log(("vmxon: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
9460 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
9461 return iemRaiseGeneralProtectionFault0(pVCpu);
9462 }
9463
9464 /* A20M (A20 Masked) mode. */
9465 if (PGMPhysIsA20Enabled(pVCpu))
9466 { /* likely */ }
9467 else
9468 {
9469 Log(("vmxon: A20M mode -> #GP(0)\n"));
9470 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
9471 return iemRaiseGeneralProtectionFault0(pVCpu);
9472 }
9473
9474 /* CR0. */
9475 {
9476 /*
9477 * CR0 MB1 bits.
9478 *
9479 * We use VMX_V_CR0_FIXED0 below to ensure CR0.PE and CR0.PG are always set
9480 * while executing VMXON. CR0.PE and CR0.PG are only allowed to be clear
9481 * when the guest running in VMX non-root mode with unrestricted-guest control
9482 * enabled in the VMCS.
9483 */
9484 uint64_t const uCr0Fixed0 = VMX_V_CR0_FIXED0;
9485 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) == uCr0Fixed0)
9486 { /* likely */ }
9487 else
9488 {
9489 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
9490 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
9491 return iemRaiseGeneralProtectionFault0(pVCpu);
9492 }
9493
9494 /* CR0 MBZ bits. */
9495 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
9496 if (!(pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1))
9497 { /* likely */ }
9498 else
9499 {
9500 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
9501 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
9502 return iemRaiseGeneralProtectionFault0(pVCpu);
9503 }
9504 }
9505
9506 /* CR4. */
9507 {
9508 /* CR4 MB1 bits. */
9509 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
9510 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) == uCr4Fixed0)
9511 { /* likely */ }
9512 else
9513 {
9514 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
9515 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
9516 return iemRaiseGeneralProtectionFault0(pVCpu);
9517 }
9518
9519 /* CR4 MBZ bits. */
9520 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
9521 if (!(pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1))
9522 { /* likely */ }
9523 else
9524 {
9525 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
9526 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
9527 return iemRaiseGeneralProtectionFault0(pVCpu);
9528 }
9529 }
9530
9531 /* Feature control MSR's LOCK and VMXON bits. */
9532 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatCtrl(pVCpu);
9533 if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
9534 == (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
9535 { /* likely */ }
9536 else
9537 {
9538 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
9539 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
9540 return iemRaiseGeneralProtectionFault0(pVCpu);
9541 }
9542
9543 /* Get the VMXON pointer from the location specified by the source memory operand. */
9544 RTGCPHYS GCPhysVmxon;
9545 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
9546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9547 { /* likely */ }
9548 else
9549 {
9550 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
9551 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
9552 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmxon;
9553 return rcStrict;
9554 }
9555
9556 /* VMXON region pointer alignment. */
9557 if (!(GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK))
9558 { /* likely */ }
9559 else
9560 {
9561 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
9562 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
9563 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmxon;
9564 iemVmxVmFailInvalid(pVCpu);
9565 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9566 }
9567
9568 /* VMXON physical-address width limits. */
9569 if (!(GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
9570 { /* likely */ }
9571 else
9572 {
9573 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
9574 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
9575 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmxon;
9576 iemVmxVmFailInvalid(pVCpu);
9577 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9578 }
9579
9580 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
9581 restriction imposed by our implementation. */
9582 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
9583 { /* likely */ }
9584 else
9585 {
9586 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
9587 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
9588 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmxon;
9589 iemVmxVmFailInvalid(pVCpu);
9590 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9591 }
9592
9593 /* Read the VMCS revision ID from the VMXON region. */
9594 VMXVMCSREVID VmcsRevId;
9595 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
9596 if (RT_SUCCESS(rc))
9597 { /* likely */ }
9598 else
9599 {
9600 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
9601 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
9602 return rc;
9603 }
9604
9605 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
9606 if (RT_LIKELY(VmcsRevId.u == VMX_V_VMCS_REVISION_ID))
9607 { /* likely */ }
9608 else
9609 {
9610 /* Revision ID mismatch. */
9611 if (!VmcsRevId.n.fIsShadowVmcs)
9612 {
9613 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
9614 VmcsRevId.n.u31RevisionId));
9615 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
9616 iemVmxVmFailInvalid(pVCpu);
9617 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9618 }
9619
9620 /* Shadow VMCS disallowed. */
9621 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
9622 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
9623 iemVmxVmFailInvalid(pVCpu);
9624 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9625 }
9626
9627 /*
9628 * Record that we're in VMX operation, block INIT, block and disable A20M.
9629 */
9630 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
9631 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
9632 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
9633
9634 /* Clear address-range monitoring. */
9635 EMMonitorWaitClear(pVCpu);
9636 /** @todo NSTVMX: Intel PT. */
9637
9638 iemVmxVmSucceed(pVCpu);
9639 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9640 }
9641 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
9642 {
9643 /* Nested-guest intercept. */
9644 if (pExitInfo)
9645 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
9646 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
9647 }
9648
9649 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
9650
9651 /* CPL. */
9652 if (IEM_GET_CPL(pVCpu) > 0)
9653 {
9654 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
9655 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
9656 return iemRaiseGeneralProtectionFault0(pVCpu);
9657 }
9658
9659 /* VMXON when already in VMX root mode. */
9660 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
9661 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
9662 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9663}
9664
9665
9666/**
9667 * Interface for HM and EM to emulate the VMXON instruction.
9668 *
9669 * @returns Strict VBox status code.
9670 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9671 * @param pExitInfo Pointer to the VM-exit information.
9672 * @thread EMT(pVCpu)
9673 */
9674VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
9675{
9676 Assert(pExitInfo);
9677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
9678 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
9679
9680 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9681
9682 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
9683 uint8_t const cbInstr = pExitInfo->cbInstr;
9684 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
9685 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
9686 Assert(!pVCpu->iem.s.cActiveMappings);
9687 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9688}
9689
9690
9691/**
9692 * Implements 'VMXOFF'.
9693 *
9694 * @remarks Common VMX instruction checks are already expected to by the caller,
9695 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
9696 */
9697IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
9698{
9699 /* Nested-guest intercept. */
9700 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
9701 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
9702
9703 /* CPL. */
9704 if (IEM_GET_CPL(pVCpu) == 0)
9705 { /* likely */ }
9706 else
9707 {
9708 Log(("vmxoff: CPL %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
9709 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
9710 return iemRaiseGeneralProtectionFault0(pVCpu);
9711 }
9712
9713 /* Dual monitor treatment of SMIs and SMM. */
9714 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
9715 if (!(fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID))
9716 { /* likely */ }
9717 else
9718 {
9719 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
9720 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9721 }
9722
9723 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
9724 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
9725 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
9726
9727 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
9728 { /** @todo NSTVMX: Unblock SMI. */ }
9729
9730 EMMonitorWaitClear(pVCpu);
9731 /** @todo NSTVMX: Unblock and enable A20M. */
9732
9733 iemVmxVmSucceed(pVCpu);
9734 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9735}
9736
9737
9738/**
9739 * Interface for HM and EM to emulate the VMXOFF instruction.
9740 *
9741 * @returns Strict VBox status code.
9742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9743 * @param cbInstr The instruction length in bytes.
9744 * @thread EMT(pVCpu)
9745 */
9746VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
9747{
9748 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
9749 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
9750
9751 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9752 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
9753 Assert(!pVCpu->iem.s.cActiveMappings);
9754 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9755}
9756
9757
9758/**
9759 * Implements 'VMXON'.
9760 */
9761IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
9762{
9763 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
9764}
9765
9766
9767/**
9768 * Implements 'VMLAUNCH'.
9769 */
9770IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
9771{
9772 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH);
9773}
9774
9775
9776/**
9777 * Implements 'VMRESUME'.
9778 */
9779IEM_CIMPL_DEF_0(iemCImpl_vmresume)
9780{
9781 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME);
9782}
9783
9784
9785/**
9786 * Implements 'VMPTRLD'.
9787 */
9788IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
9789{
9790 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
9791}
9792
9793
9794/**
9795 * Implements 'VMPTRST'.
9796 */
9797IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
9798{
9799 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
9800}
9801
9802
9803/**
9804 * Implements 'VMCLEAR'.
9805 */
9806IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
9807{
9808 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
9809}
9810
9811
9812/**
9813 * Implements 'VMWRITE' register.
9814 */
9815IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField)
9816{
9817 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, u64Val, u64VmcsField, NULL /* pExitInfo */);
9818}
9819
9820
9821/**
9822 * Implements 'VMWRITE' memory.
9823 */
9824IEM_CIMPL_DEF_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField)
9825{
9826 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, GCPtrVal, u64VmcsField, NULL /* pExitInfo */);
9827}
9828
9829
9830/**
9831 * Implements 'VMREAD' register (64-bit).
9832 */
9833IEM_CIMPL_DEF_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField)
9834{
9835 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64VmcsField, NULL /* pExitInfo */);
9836}
9837
9838
9839/**
9840 * Implements 'VMREAD' register (32-bit).
9841 */
9842IEM_CIMPL_DEF_2(iemCImpl_vmread_reg32, uint64_t *, pu64Dst, uint32_t, u32VmcsField)
9843{
9844 VBOXSTRICTRC const rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, (uint32_t *)pu64Dst, u32VmcsField, NULL /* pExitInfo */);
9845 /* Zero the high part of the register on success. */
9846 if (rcStrict == VINF_SUCCESS)
9847 *pu64Dst = (uint32_t)*pu64Dst;
9848 return rcStrict;
9849}
9850
9851
9852/**
9853 * Implements 'VMREAD' memory, 64-bit register.
9854 */
9855IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField)
9856{
9857 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64VmcsField, NULL /* pExitInfo */);
9858}
9859
9860
9861/**
9862 * Implements 'VMREAD' memory, 32-bit register.
9863 */
9864IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField)
9865{
9866 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u32VmcsField, NULL /* pExitInfo */);
9867}
9868
9869
9870/**
9871 * Implements 'INVVPID'.
9872 */
9873IEM_CIMPL_DEF_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType)
9874{
9875 return iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, NULL /* pExitInfo */);
9876}
9877
9878
9879#if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_IEM_RECOMPILER) /* HACK ALERT: Linking trick. */
9880/**
9881 * Implements 'INVEPT'.
9882 */
9883IEM_CIMPL_DEF_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType)
9884{
9885# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9886 return iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, uInveptType, NULL /* pExitInfo */);
9887# else
9888 RT_NOREF(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, uInveptType);
9889 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9890# endif
9891}
9892#endif
9893
9894
9895/**
9896 * Implements VMX's implementation of PAUSE.
9897 */
9898IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
9899{
9900 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
9901 {
9902 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
9903 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
9904 return rcStrict;
9905 }
9906
9907 /*
9908 * Outside VMX non-root operation or if the PAUSE instruction does not cause
9909 * a VM-exit, the instruction operates normally.
9910 */
9911 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9912}
9913
9914
9915/**
9916 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
9917 *
9918 * @remarks The @a uUser argument is currently unused.
9919 */
9920DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
9921 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
9922 PGMACCESSORIGIN enmOrigin, uint64_t uUser)
9923{
9924 RT_NOREF3(pvPhys, enmOrigin, uUser);
9925
9926 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9927 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9928 {
9929 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9930 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
9931
9932 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R;
9933 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
9934
9935 LogFlowFunc(("Fault at %#RGp (cbBuf=%u fAccess=%#x)\n", GCPhysFault, cbBuf, fAccess));
9936 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
9937 if (RT_FAILURE(rcStrict))
9938 return rcStrict;
9939
9940 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
9941 return VINF_SUCCESS;
9942 }
9943
9944 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
9945 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
9946 if (RT_FAILURE(rc))
9947 return rc;
9948
9949 /* Instruct the caller of this handler to perform the read/write as normal memory. */
9950 return VINF_PGM_HANDLER_DO_DEFAULT;
9951}
9952
9953
9954# ifndef IN_RING3
9955/**
9956 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
9957 * \#PF access handler callback for guest VMX APIC-access page.}
9958 */
9959DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPagePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx,
9960 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
9961
9962{
9963 RT_NOREF3(pVM, pCtx, uUser);
9964
9965 /*
9966 * Handle the VMX APIC-access page only when the guest is in VMX non-root mode.
9967 * Otherwise we must deregister the page and allow regular RAM access.
9968 * Failing to do so lands us with endless EPT VM-exits.
9969 */
9970 RTGCPHYS const GCPhysPage = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9971 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9972 {
9973 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9974 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysPage);
9975
9976 /*
9977 * Check if the access causes an APIC-access VM-exit.
9978 */
9979 uint32_t fAccess;
9980 if (uErr & X86_TRAP_PF_ID)
9981 fAccess = IEM_ACCESS_INSTRUCTION;
9982 else if (uErr & X86_TRAP_PF_RW)
9983 fAccess = IEM_ACCESS_DATA_W;
9984 else
9985 fAccess = IEM_ACCESS_DATA_R;
9986
9987 RTGCPHYS const GCPhysNestedFault = (RTGCPHYS)pvFault;
9988 uint16_t const offAccess = GCPhysNestedFault & GUEST_PAGE_OFFSET_MASK;
9989 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 1 /* cbAccess */, fAccess);
9990 LogFlowFunc(("#PF at %#RGp (GCPhysNestedFault=%#RGp offAccess=%#x)\n", GCPhysFault, GCPhysNestedFault, offAccess));
9991 if (fIntercept)
9992 {
9993 /*
9994 * Query the source VM-exit (from the execution engine) that caused this access
9995 * within the APIC-access page. Currently only HM is supported.
9996 */
9997 AssertMsg(VM_IS_HM_ENABLED(pVM),
9998 ("VM-exit auxiliary info. fetching not supported for execution engine %d\n", pVM->bMainExecutionEngine));
9999
10000 HMEXITAUX HmExitAux;
10001 RT_ZERO(HmExitAux);
10002 int const rc = HMR0GetExitAuxInfo(pVCpu, &HmExitAux, HMVMX_READ_EXIT_INSTR_LEN
10003 | HMVMX_READ_EXIT_QUALIFICATION
10004 | HMVMX_READ_IDT_VECTORING_INFO
10005 | HMVMX_READ_IDT_VECTORING_ERROR_CODE);
10006 AssertRC(rc);
10007
10008 /*
10009 * Verify the VM-exit reason must be an EPT violation.
10010 * Other accesses should go through the other handler (iemVmxApicAccessPageHandler).
10011 * Refer to @bugref{10092#c33s} for a more detailed explanation.
10012 */
10013 AssertMsgReturn(HmExitAux.Vmx.uReason == VMX_EXIT_EPT_VIOLATION,
10014 ("Unexpected call to APIC-access page #PF handler for %#RGp offAcesss=%u uErr=%#RGx uReason=%u\n",
10015 GCPhysPage, offAccess, uErr, HmExitAux.Vmx.uReason), VERR_IEM_IPE_7);
10016
10017 /*
10018 * Construct the virtual APIC-access VM-exit.
10019 */
10020 VMXAPICACCESS enmAccess;
10021 if (HmExitAux.Vmx.u64Qual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID)
10022 {
10023 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
10024 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
10025 else if (fAccess == IEM_ACCESS_INSTRUCTION)
10026 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
10027 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
10028 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
10029 else
10030 enmAccess = VMXAPICACCESS_LINEAR_READ;
10031
10032 /* For linear-address accesss the instruction length must be valid. */
10033 AssertMsg(HmExitAux.Vmx.cbInstr > 0,
10034 ("Invalid APIC-access VM-exit instruction length. cbInstr=%u\n", HmExitAux.Vmx.cbInstr));
10035 }
10036 else
10037 {
10038 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
10039 enmAccess = VMXAPICACCESS_PHYSICAL_EVENT_DELIVERY;
10040 else
10041 {
10042 /** @todo How to distinguish between monitoring/trace vs other instructions
10043 * here? */
10044 enmAccess = VMXAPICACCESS_PHYSICAL_INSTR;
10045 }
10046
10047 /* For physical accesses the instruction length is undefined, we zero it for safety and consistency. */
10048 HmExitAux.Vmx.cbInstr = 0;
10049 }
10050
10051 /*
10052 * Raise the APIC-access VM-exit.
10053 */
10054 LogFlowFunc(("Raising APIC-access VM-exit from #PF handler at offset %#x\n", offAccess));
10055 VMXVEXITINFO const ExitInfo
10056 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN(VMX_EXIT_APIC_ACCESS,
10057 RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
10058 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess),
10059 HmExitAux.Vmx.cbInstr);
10060 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(HmExitAux.Vmx.uIdtVectoringInfo,
10061 HmExitAux.Vmx.uIdtVectoringErrCode);
10062 VBOXSTRICTRC const rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
10063 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10064 }
10065
10066 /*
10067 * The access isn't intercepted, which means it needs to be virtualized.
10068 *
10069 * This requires emulating the instruction because we need the bytes being
10070 * read/written by the instruction not just the offset being accessed within
10071 * the APIC-access page (which we derive from the faulting address).
10072 */
10073 LogFlowFunc(("Access at offset %#x not intercepted -> VINF_EM_RAW_EMULATE_INSTR\n", offAccess));
10074 return VINF_EM_RAW_EMULATE_INSTR;
10075 }
10076
10077 /** @todo This isn't ideal but works for now as nested-hypervisors generally play
10078 * nice because the spec states that this page should be modified only when
10079 * no CPU refers to it VMX non-root mode. Nonetheless, we could use an atomic
10080 * reference counter to ensure the aforementioned condition before
10081 * de-registering the page. */
10082 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysPage));
10083 int const rc = PGMHandlerPhysicalDeregister(pVM, GCPhysPage);
10084 if (RT_FAILURE(rc))
10085 return rc;
10086
10087 return VINF_SUCCESS;
10088}
10089# endif /* !IN_RING3 */
10090
10091#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10092
10093
10094/**
10095 * Implements 'VMCALL'.
10096 */
10097IEM_CIMPL_DEF_0(iemCImpl_vmcall)
10098{
10099 pVCpu->iem.s.cPotentialExits++;
10100
10101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10102 /* Nested-guest intercept. */
10103 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
10104 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
10105#endif
10106
10107 /* Join forces with vmmcall. */
10108 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
10109}
10110
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette