VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 74431

Last change on this file since 74431 was 74429, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 225.4 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 74429 2018-09-24 05:08:48Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31
32/**
33 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
34 *
35 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
36 * second dimension is the Index, see VMXVMCSFIELDENC.
37 */
38uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
39{
40 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
41 {
42 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
43 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
44 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
45 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
46 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
47 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
48 },
49 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
50 {
51 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
52 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
53 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
54 /* 24-25 */ UINT16_MAX, UINT16_MAX
55 },
56 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
57 {
58 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
59 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
60 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
61 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
62 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
63 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
64 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
65 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
66 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
67 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
68 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
69 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
70 },
71 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
72 {
73 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
74 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
75 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
76 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
77 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
78 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
79 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
80 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
81 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
82 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
83 },
84 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
85 {
86 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
87 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
88 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
89 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
90 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
91 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
92 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
93 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
94 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
95 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
96 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
97 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
98 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
99 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
100 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
101 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
102 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
103 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
104 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
105 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
106 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
107 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
108 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
109 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
110 /* 24 */ UINT16_MAX,
111 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
112 },
113 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
114 {
115 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
116 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
117 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
118 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
119 /* 25 */ UINT16_MAX
120 },
121 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
122 {
123 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
124 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
125 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
126 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
127 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
128 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
129 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
130 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
131 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
132 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
133 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
134 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
135 },
136 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
137 {
138 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
139 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
140 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
141 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
142 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
143 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
144 },
145 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
146 {
147 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
148 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
149 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
150 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
151 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
152 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
153 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
154 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
155 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
156 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
157 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
158 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
159 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
160 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
161 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprThreshold),
162 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
163 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
164 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
165 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
166 },
167 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
168 {
169 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
170 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason),
171 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo),
172 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode),
173 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
174 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
175 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen),
176 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
177 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
178 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
179 /* 24-25 */ UINT16_MAX, UINT16_MAX
180 },
181 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
182 {
183 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
184 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
185 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
186 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
187 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
188 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
189 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
190 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
191 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
192 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
193 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
194 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
195 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
196 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
197 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
198 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
199 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
200 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
201 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
202 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
203 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
204 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
205 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
206 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
207 /* 24-25 */ UINT16_MAX, UINT16_MAX
208 },
209 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
210 {
211 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
212 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
213 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
214 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
215 /* 25 */ UINT16_MAX
216 },
217 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
218 {
219 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
220 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
221 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
222 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
223 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
224 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
225 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
226 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
227 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
229 /* 24-25 */ UINT16_MAX, UINT16_MAX
230 },
231 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
232 {
233 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
234 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
235 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
236 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
237 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
238 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
239 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
240 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
241 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
242 },
243 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
244 {
245 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
246 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
247 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
248 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
249 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
250 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
251 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
252 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
253 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
254 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
255 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
256 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
257 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
258 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
259 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
260 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
261 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
262 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
263 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
264 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
265 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
266 },
267 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
268 {
269 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
270 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
271 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
272 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
273 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
274 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
275 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
276 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
277 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
278 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
279 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
280 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
281 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
282 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
283 }
284};
285
286
287/**
288 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
289 * relative offsets.
290 */
291# ifdef IEM_WITH_CODE_TLB
292# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
293# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
294# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
295# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
296# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
297# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
298# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
299# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
300# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
301# else /* !IEM_WITH_CODE_TLB */
302# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
303 do \
304 { \
305 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
306 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
307 } while (0)
308
309# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
310
311# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
312 do \
313 { \
314 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
315 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
316 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
317 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
318 } while (0)
319
320# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
321 do \
322 { \
323 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
324 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
325 } while (0)
326
327# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
328 do \
329 { \
330 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
331 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
332 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
333 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
334 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
335 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
336 } while (0)
337
338# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
339 do \
340 { \
341 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
342 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
343 } while (0)
344
345# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
346 do \
347 { \
348 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
349 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
350 } while (0)
351
352# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
353 do \
354 { \
355 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
356 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
357 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
358 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
359 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
360 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
361 } while (0)
362# endif /* !IEM_WITH_CODE_TLB */
363
364/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
365#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
366
367/** Whether a shadow VMCS is present for the given VCPU. */
368#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
369
370/** Gets the VMXON region pointer. */
371#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
372
373/** Gets the guest-physical address of the current VMCS for the given VCPU. */
374#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
375
376/** Whether a current VMCS is present for the given VCPU. */
377#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
378
379/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
380#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
381 do \
382 { \
383 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
384 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
385 } while (0)
386
387/** Clears any current VMCS for the given VCPU. */
388#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
389 do \
390 { \
391 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
392 } while (0)
393
394/** Check for VMX instructions requiring to be in VMX operation.
395 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs udpating. */
396#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
397 do \
398 { \
399 if (IEM_IS_VMX_ROOT_MODE(a_pVCpu)) \
400 { /* likely */ } \
401 else \
402 { \
403 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
404 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
405 return iemRaiseUndefinedOpcode(a_pVCpu); \
406 } \
407 } while (0)
408
409/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
410#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
411 do \
412 { \
413 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
414 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
415 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
416 return VERR_VMX_VMENTRY_FAILED; \
417 } while (0)
418
419/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
420#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
421 do \
422 { \
423 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
424 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
425 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
426 return VERR_VMX_VMEXIT_FAILED; \
427 } while (0)
428
429
430
431/**
432 * Returns whether the given VMCS field is valid and supported by our emulation.
433 *
434 * @param pVCpu The cross context virtual CPU structure.
435 * @param u64FieldEnc The VMCS field encoding.
436 *
437 * @remarks This takes into account the CPU features exposed to the guest.
438 */
439IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
440{
441 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
442 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
443 if (!uFieldEncHi)
444 { /* likely */ }
445 else
446 return false;
447
448 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
449 switch (uFieldEncLo)
450 {
451 /*
452 * 16-bit fields.
453 */
454 /* Control fields. */
455 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
456 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
457 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
458
459 /* Guest-state fields. */
460 case VMX_VMCS16_GUEST_ES_SEL:
461 case VMX_VMCS16_GUEST_CS_SEL:
462 case VMX_VMCS16_GUEST_SS_SEL:
463 case VMX_VMCS16_GUEST_DS_SEL:
464 case VMX_VMCS16_GUEST_FS_SEL:
465 case VMX_VMCS16_GUEST_GS_SEL:
466 case VMX_VMCS16_GUEST_LDTR_SEL:
467 case VMX_VMCS16_GUEST_TR_SEL:
468 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
469 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
470
471 /* Host-state fields. */
472 case VMX_VMCS16_HOST_ES_SEL:
473 case VMX_VMCS16_HOST_CS_SEL:
474 case VMX_VMCS16_HOST_SS_SEL:
475 case VMX_VMCS16_HOST_DS_SEL:
476 case VMX_VMCS16_HOST_FS_SEL:
477 case VMX_VMCS16_HOST_GS_SEL:
478 case VMX_VMCS16_HOST_TR_SEL: return true;
479
480 /*
481 * 64-bit fields.
482 */
483 /* Control fields. */
484 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
485 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
486 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
487 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
488 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
489 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
490 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
491 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
492 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
493 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
494 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
495 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
496 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
497 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
498 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
499 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
500 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
501 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
502 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
503 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
504 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
505 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
506 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
507 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
508 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
509 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
510 case VMX_VMCS64_CTRL_EPTP_FULL:
511 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
512 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
513 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
514 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
515 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
516 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
517 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
518 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
519 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
520 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
521 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
522 {
523 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
524 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
525 }
526 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
527 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
528 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
529 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
530 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
531 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
532 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
533 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
534 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
535 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
536 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
537 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
538
539 /* Read-only data fields. */
540 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
541 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
542
543 /* Guest-state fields. */
544 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
545 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
546 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
547 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
548 case VMX_VMCS64_GUEST_PAT_FULL:
549 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
550 case VMX_VMCS64_GUEST_EFER_FULL:
551 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
552 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
553 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
554 case VMX_VMCS64_GUEST_PDPTE0_FULL:
555 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
556 case VMX_VMCS64_GUEST_PDPTE1_FULL:
557 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
558 case VMX_VMCS64_GUEST_PDPTE2_FULL:
559 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
560 case VMX_VMCS64_GUEST_PDPTE3_FULL:
561 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
562 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
563 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
564
565 /* Host-state fields. */
566 case VMX_VMCS64_HOST_PAT_FULL:
567 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
568 case VMX_VMCS64_HOST_EFER_FULL:
569 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
570 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
571 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
572
573 /*
574 * 32-bit fields.
575 */
576 /* Control fields. */
577 case VMX_VMCS32_CTRL_PIN_EXEC:
578 case VMX_VMCS32_CTRL_PROC_EXEC:
579 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
580 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
581 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
582 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
583 case VMX_VMCS32_CTRL_EXIT:
584 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
585 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
586 case VMX_VMCS32_CTRL_ENTRY:
587 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
588 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
589 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
590 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
591 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
592 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
593 case VMX_VMCS32_CTRL_PLE_GAP:
594 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
595
596 /* Read-only data fields. */
597 case VMX_VMCS32_RO_VM_INSTR_ERROR:
598 case VMX_VMCS32_RO_EXIT_REASON:
599 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
600 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
601 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
602 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
603 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
604 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
605
606 /* Guest-state fields. */
607 case VMX_VMCS32_GUEST_ES_LIMIT:
608 case VMX_VMCS32_GUEST_CS_LIMIT:
609 case VMX_VMCS32_GUEST_SS_LIMIT:
610 case VMX_VMCS32_GUEST_DS_LIMIT:
611 case VMX_VMCS32_GUEST_FS_LIMIT:
612 case VMX_VMCS32_GUEST_GS_LIMIT:
613 case VMX_VMCS32_GUEST_LDTR_LIMIT:
614 case VMX_VMCS32_GUEST_TR_LIMIT:
615 case VMX_VMCS32_GUEST_GDTR_LIMIT:
616 case VMX_VMCS32_GUEST_IDTR_LIMIT:
617 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
618 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
619 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
620 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
621 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
622 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
623 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
624 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
625 case VMX_VMCS32_GUEST_INT_STATE:
626 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
627 case VMX_VMCS32_GUEST_SMBASE:
628 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
629 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
630
631 /* Host-state fields. */
632 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
633
634 /*
635 * Natural-width fields.
636 */
637 /* Control fields. */
638 case VMX_VMCS_CTRL_CR0_MASK:
639 case VMX_VMCS_CTRL_CR4_MASK:
640 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
641 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
642 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
643 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
644 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
645 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
646
647 /* Read-only data fields. */
648 case VMX_VMCS_RO_EXIT_QUALIFICATION:
649 case VMX_VMCS_RO_IO_RCX:
650 case VMX_VMCS_RO_IO_RSX:
651 case VMX_VMCS_RO_IO_RDI:
652 case VMX_VMCS_RO_IO_RIP:
653 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
654
655 /* Guest-state fields. */
656 case VMX_VMCS_GUEST_CR0:
657 case VMX_VMCS_GUEST_CR3:
658 case VMX_VMCS_GUEST_CR4:
659 case VMX_VMCS_GUEST_ES_BASE:
660 case VMX_VMCS_GUEST_CS_BASE:
661 case VMX_VMCS_GUEST_SS_BASE:
662 case VMX_VMCS_GUEST_DS_BASE:
663 case VMX_VMCS_GUEST_FS_BASE:
664 case VMX_VMCS_GUEST_GS_BASE:
665 case VMX_VMCS_GUEST_LDTR_BASE:
666 case VMX_VMCS_GUEST_TR_BASE:
667 case VMX_VMCS_GUEST_GDTR_BASE:
668 case VMX_VMCS_GUEST_IDTR_BASE:
669 case VMX_VMCS_GUEST_DR7:
670 case VMX_VMCS_GUEST_RSP:
671 case VMX_VMCS_GUEST_RIP:
672 case VMX_VMCS_GUEST_RFLAGS:
673 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
674 case VMX_VMCS_GUEST_SYSENTER_ESP:
675 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
676
677 /* Host-state fields. */
678 case VMX_VMCS_HOST_CR0:
679 case VMX_VMCS_HOST_CR3:
680 case VMX_VMCS_HOST_CR4:
681 case VMX_VMCS_HOST_FS_BASE:
682 case VMX_VMCS_HOST_GS_BASE:
683 case VMX_VMCS_HOST_TR_BASE:
684 case VMX_VMCS_HOST_GDTR_BASE:
685 case VMX_VMCS_HOST_IDTR_BASE:
686 case VMX_VMCS_HOST_SYSENTER_ESP:
687 case VMX_VMCS_HOST_SYSENTER_EIP:
688 case VMX_VMCS_HOST_RSP:
689 case VMX_VMCS_HOST_RIP: return true;
690 }
691
692 return false;
693}
694
695
696/**
697 * Gets a host selector from the VMCS.
698 *
699 * @param pVmcs Pointer to the virtual VMCS.
700 * @param iSelReg The index of the segment register (X86_SREG_XXX).
701 */
702DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
703{
704 Assert(iSegReg < X86_SREG_COUNT);
705 RTSEL HostSel;
706 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
707 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
708 uint8_t const uWidthType = (uWidth << 2) | uType;
709 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
710 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
711 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
712 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
713 uint8_t const *pbField = pbVmcs + offField;
714 HostSel = *(uint16_t *)pbField;
715 return HostSel;
716}
717
718
719/**
720 * Sets a guest segment register in the VMCS.
721 *
722 * @param pVmcs Pointer to the virtual VMCS.
723 * @param iSegReg The index of the segment register (X86_SREG_XXX).
724 * @param pSelReg Pointer to the segment register.
725 */
726IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
727{
728 Assert(pSelReg);
729 Assert(iSegReg < X86_SREG_COUNT);
730
731 /* Selector. */
732 {
733 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
734 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
735 uint8_t const uWidthType = (uWidth << 2) | uType;
736 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
737 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
738 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
739 uint8_t *pbVmcs = (uint8_t *)pVmcs;
740 uint8_t *pbField = pbVmcs + offField;
741 *(uint16_t *)pbField = pSelReg->Sel;
742 }
743
744 /* Limit. */
745 {
746 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
747 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
748 uint8_t const uWidthType = (uWidth << 2) | uType;
749 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
750 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
751 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
752 uint8_t *pbVmcs = (uint8_t *)pVmcs;
753 uint8_t *pbField = pbVmcs + offField;
754 *(uint32_t *)pbField = pSelReg->u32Limit;
755 }
756
757 /* Base. */
758 {
759 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
760 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
761 uint8_t const uWidthType = (uWidth << 2) | uType;
762 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
763 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
764 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
765 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
766 uint8_t const *pbField = pbVmcs + offField;
767 *(uint64_t *)pbField = pSelReg->u64Base;
768 }
769
770 /* Attributes. */
771 {
772 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
773 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
774 | X86DESCATTR_UNUSABLE;
775 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
776 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
777 uint8_t const uWidthType = (uWidth << 2) | uType;
778 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
779 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
780 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
781 uint8_t *pbVmcs = (uint8_t *)pVmcs;
782 uint8_t *pbField = pbVmcs + offField;
783 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
784 }
785}
786
787
788/**
789 * Gets a guest segment register from the VMCS.
790 *
791 * @returns VBox status code.
792 * @param pVmcs Pointer to the virtual VMCS.
793 * @param iSegReg The index of the segment register (X86_SREG_XXX).
794 * @param pSelReg Where to store the segment register (only updated when
795 * VINF_SUCCESS is returned).
796 *
797 * @remarks Warning! This does not validate the contents of the retreived segment
798 * register.
799 */
800IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
801{
802 Assert(pSelReg);
803 Assert(iSegReg < X86_SREG_COUNT);
804
805 /* Selector. */
806 uint16_t u16Sel;
807 {
808 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
809 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
810 uint8_t const uWidthType = (uWidth << 2) | uType;
811 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
812 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
813 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
814 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
815 uint8_t const *pbField = pbVmcs + offField;
816 u16Sel = *(uint16_t *)pbField;
817 }
818
819 /* Limit. */
820 uint32_t u32Limit;
821 {
822 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
823 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
824 uint8_t const uWidthType = (uWidth << 2) | uType;
825 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
826 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
827 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
828 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
829 uint8_t const *pbField = pbVmcs + offField;
830 u32Limit = *(uint32_t *)pbField;
831 }
832
833 /* Base. */
834 uint64_t u64Base;
835 {
836 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
837 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
838 uint8_t const uWidthType = (uWidth << 2) | uType;
839 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
840 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
841 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
842 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
843 uint8_t const *pbField = pbVmcs + offField;
844 u64Base = *(uint64_t *)pbField;
845 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
846 }
847
848 /* Attributes. */
849 uint32_t u32Attr;
850 {
851 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
852 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
853 uint8_t const uWidthType = (uWidth << 2) | uType;
854 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
855 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
856 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
857 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
858 uint8_t const *pbField = pbVmcs + offField;
859 u32Attr = *(uint32_t *)pbField;
860 }
861
862 pSelReg->Sel = u16Sel;
863 pSelReg->ValidSel = u16Sel;
864 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
865 pSelReg->u32Limit = u32Limit;
866 pSelReg->u64Base = u64Base;
867 pSelReg->Attr.u = u32Attr;
868 return VINF_SUCCESS;
869}
870
871
872/**
873 * Gets VM-exit instruction information along with any displacement for an
874 * instruction VM-exit.
875 *
876 * @returns The VM-exit instruction information.
877 * @param pVCpu The cross context virtual CPU structure.
878 * @param uExitReason The VM-exit reason.
879 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX) if
880 * any. Pass VMXINSTRID_NONE otherwise.
881 * @param fPrimaryOpRead If the primary operand of the ModR/M byte (bits 0:3) is
882 * a read or write.
883 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
884 * NULL.
885 */
886IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, bool fPrimaryOpRead,
887 PRTGCPTR pGCPtrDisp)
888{
889 RTGCPTR GCPtrDisp;
890 VMXEXITINSTRINFO ExitInstrInfo;
891 ExitInstrInfo.u = 0;
892
893 /*
894 * Get and parse the ModR/M byte from our decoded opcodes.
895 */
896 uint8_t bRm;
897 uint8_t const offModRm = pVCpu->iem.s.offModRm;
898 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
899 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
900 {
901 /*
902 * ModR/M indicates register addressing.
903 *
904 * The primary/secondary register operands are reported in the iReg1 or iReg2
905 * fields depending on whether it is a read/write form.
906 */
907 uint8_t idxReg1;
908 uint8_t idxReg2;
909 if (fPrimaryOpRead)
910 {
911 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
912 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
913 }
914 else
915 {
916 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
917 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
918 }
919 ExitInstrInfo.All.u2Scaling = 0;
920 ExitInstrInfo.All.iReg1 = idxReg1;
921 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
922 ExitInstrInfo.All.fIsRegOperand = 1;
923 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
924 ExitInstrInfo.All.iSegReg = 0;
925 ExitInstrInfo.All.iIdxReg = 0;
926 ExitInstrInfo.All.fIdxRegInvalid = 1;
927 ExitInstrInfo.All.iBaseReg = 0;
928 ExitInstrInfo.All.fBaseRegInvalid = 1;
929 ExitInstrInfo.All.iReg2 = idxReg2;
930
931 /* Displacement not applicable for register addressing. */
932 GCPtrDisp = 0;
933 }
934 else
935 {
936 /*
937 * ModR/M indicates memory addressing.
938 */
939 uint8_t uScale = 0;
940 bool fBaseRegValid = false;
941 bool fIdxRegValid = false;
942 uint8_t iBaseReg = 0;
943 uint8_t iIdxReg = 0;
944 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
945 {
946 /*
947 * Parse the ModR/M, displacement for 16-bit addressing mode.
948 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
949 */
950 uint16_t u16Disp = 0;
951 uint8_t const offDisp = offModRm + sizeof(bRm);
952 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
953 {
954 /* Displacement without any registers. */
955 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
956 }
957 else
958 {
959 /* Register (index and base). */
960 switch (bRm & X86_MODRM_RM_MASK)
961 {
962 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
963 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
964 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
965 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
966 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
967 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
968 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
969 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
970 }
971
972 /* Register + displacement. */
973 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
974 {
975 case 0: break;
976 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
977 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
978 default:
979 {
980 /* Register addressing, handled at the beginning. */
981 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
982 break;
983 }
984 }
985 }
986
987 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
988 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
989 }
990 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
991 {
992 /*
993 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
994 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
995 */
996 uint32_t u32Disp = 0;
997 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
998 {
999 /* Displacement without any registers. */
1000 uint8_t const offDisp = offModRm + sizeof(bRm);
1001 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1002 }
1003 else
1004 {
1005 /* Register (and perhaps scale, index and base). */
1006 uint8_t offDisp = offModRm + sizeof(bRm);
1007 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1008 if (iBaseReg == 4)
1009 {
1010 /* An SIB byte follows the ModR/M byte, parse it. */
1011 uint8_t bSib;
1012 uint8_t const offSib = offModRm + sizeof(bRm);
1013 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1014
1015 /* A displacement may follow SIB, update its offset. */
1016 offDisp += sizeof(bSib);
1017
1018 /* Get the scale. */
1019 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1020
1021 /* Get the index register. */
1022 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
1023 fIdxRegValid = RT_BOOL(iIdxReg != 4);
1024
1025 /* Get the base register. */
1026 iBaseReg = bSib & X86_SIB_BASE_MASK;
1027 fBaseRegValid = true;
1028 if (iBaseReg == 5)
1029 {
1030 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1031 {
1032 /* Mod is 0 implies a 32-bit displacement with no base. */
1033 fBaseRegValid = false;
1034 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1035 }
1036 else
1037 {
1038 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
1039 iBaseReg = X86_GREG_xBP;
1040 }
1041 }
1042 }
1043
1044 /* Register + displacement. */
1045 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1046 {
1047 case 0: /* Handled above */ break;
1048 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
1049 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
1050 default:
1051 {
1052 /* Register addressing, handled at the beginning. */
1053 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1054 break;
1055 }
1056 }
1057 }
1058
1059 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
1060 }
1061 else
1062 {
1063 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
1064
1065 /*
1066 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
1067 * See Intel instruction spec. 2.2 "IA-32e Mode".
1068 */
1069 uint64_t u64Disp = 0;
1070 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
1071 if (fRipRelativeAddr)
1072 {
1073 /*
1074 * RIP-relative addressing mode.
1075 *
1076 * The displacment is 32-bit signed implying an offset range of +/-2G.
1077 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
1078 */
1079 uint8_t const offDisp = offModRm + sizeof(bRm);
1080 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1081 }
1082 else
1083 {
1084 uint8_t offDisp = offModRm + sizeof(bRm);
1085
1086 /*
1087 * Register (and perhaps scale, index and base).
1088 *
1089 * REX.B extends the most-significant bit of the base register. However, REX.B
1090 * is ignored while determining whether an SIB follows the opcode. Hence, we
1091 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1092 *
1093 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1094 */
1095 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1096 if (iBaseReg == 4)
1097 {
1098 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1099 uint8_t bSib;
1100 uint8_t const offSib = offModRm + sizeof(bRm);
1101 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1102
1103 /* Displacement may follow SIB, update its offset. */
1104 offDisp += sizeof(bSib);
1105
1106 /* Get the scale. */
1107 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1108
1109 /* Get the index. */
1110 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1111 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1112
1113 /* Get the base. */
1114 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1115 fBaseRegValid = true;
1116 if (iBaseReg == 5)
1117 {
1118 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1119 {
1120 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1121 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1122 }
1123 else
1124 {
1125 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1126 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1127 }
1128 }
1129 }
1130 iBaseReg |= pVCpu->iem.s.uRexB;
1131
1132 /* Register + displacement. */
1133 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1134 {
1135 case 0: /* Handled above */ break;
1136 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1137 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1138 default:
1139 {
1140 /* Register addressing, handled at the beginning. */
1141 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1142 break;
1143 }
1144 }
1145 }
1146
1147 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1148 }
1149
1150 /*
1151 * The primary or secondary register operand is reported in iReg2 depending
1152 * on whether the primary operand is in read/write form.
1153 */
1154 uint8_t idxReg2;
1155 if (fPrimaryOpRead)
1156 {
1157 idxReg2 = bRm & X86_MODRM_RM_MASK;
1158 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1159 idxReg2 |= pVCpu->iem.s.uRexB;
1160 }
1161 else
1162 {
1163 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1164 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1165 idxReg2 |= pVCpu->iem.s.uRexReg;
1166 }
1167 ExitInstrInfo.All.u2Scaling = uScale;
1168 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1169 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1170 ExitInstrInfo.All.fIsRegOperand = 0;
1171 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1172 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1173 ExitInstrInfo.All.iIdxReg = iIdxReg;
1174 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1175 ExitInstrInfo.All.iBaseReg = iBaseReg;
1176 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1177 ExitInstrInfo.All.iReg2 = idxReg2;
1178 }
1179
1180 /*
1181 * Handle exceptions to the norm for certain instructions.
1182 * (e.g. some instructions convey an instruction identity in place of iReg2).
1183 */
1184 switch (uExitReason)
1185 {
1186 case VMX_EXIT_GDTR_IDTR_ACCESS:
1187 {
1188 Assert(VMXINSTRID_IS_VALID(uInstrId));
1189 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1190 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1191 break;
1192 }
1193
1194 case VMX_EXIT_LDTR_TR_ACCESS:
1195 {
1196 Assert(VMXINSTRID_IS_VALID(uInstrId));
1197 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1198 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1199 break;
1200 }
1201
1202 case VMX_EXIT_RDRAND:
1203 case VMX_EXIT_RDSEED:
1204 {
1205 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1206 break;
1207 }
1208 }
1209
1210 /* Update displacement and return the constructed VM-exit instruction information field. */
1211 if (pGCPtrDisp)
1212 *pGCPtrDisp = GCPtrDisp;
1213 return ExitInstrInfo.u;
1214}
1215
1216
1217/**
1218 * Sets the VM-instruction error VMCS field.
1219 *
1220 * @param pVCpu The cross context virtual CPU structure.
1221 * @param enmInsErr The VM-instruction error.
1222 */
1223DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1224{
1225 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1226 pVmcs->u32RoVmInstrError = enmInsErr;
1227}
1228
1229
1230/**
1231 * Implements VMSucceed for VMX instruction success.
1232 *
1233 * @param pVCpu The cross context virtual CPU structure.
1234 */
1235DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1236{
1237 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1238}
1239
1240
1241/**
1242 * Implements VMFailInvalid for VMX instruction failure.
1243 *
1244 * @param pVCpu The cross context virtual CPU structure.
1245 */
1246DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1247{
1248 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1249 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1250}
1251
1252
1253/**
1254 * Implements VMFailValid for VMX instruction failure.
1255 *
1256 * @param pVCpu The cross context virtual CPU structure.
1257 * @param enmInsErr The VM instruction error.
1258 */
1259DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1260{
1261 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1262 {
1263 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1264 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1265 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1266 }
1267}
1268
1269
1270/**
1271 * Implements VMFail for VMX instruction failure.
1272 *
1273 * @param pVCpu The cross context virtual CPU structure.
1274 * @param enmInsErr The VM instruction error.
1275 */
1276DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1277{
1278 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1279 iemVmxVmFailValid(pVCpu, enmInsErr);
1280 else
1281 iemVmxVmFailInvalid(pVCpu);
1282}
1283
1284
1285/**
1286 * Checks if the given auto-load/store MSR area count is valid for the
1287 * implementation.
1288 *
1289 * @returns @c true if it's within the valid limit, @c false otherwise.
1290 * @param pVCpu The cross context virtual CPU structure.
1291 * @param uMsrCount The MSR area count to check.
1292 */
1293DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1294{
1295 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
1296 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1297 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1298 if (uMsrCount <= cMaxSupportedMsrs)
1299 return true;
1300 return false;
1301}
1302
1303
1304/**
1305 * Flushes the current VMCS contents back to guest memory.
1306 *
1307 * @returns VBox status code.
1308 * @param pVCpu The cross context virtual CPU structure.
1309 */
1310DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1311{
1312 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1313 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1314 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1315 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1316 return rc;
1317}
1318
1319
1320/**
1321 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1322 *
1323 * @param pVCpu The cross context virtual CPU structure.
1324 */
1325DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1326{
1327 iemVmxVmSucceed(pVCpu);
1328 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1329}
1330
1331
1332/**
1333 * VMREAD common (memory/register) instruction execution worker
1334 *
1335 * @returns Strict VBox status code.
1336 * @param pVCpu The cross context virtual CPU structure.
1337 * @param cbInstr The instruction length.
1338 * @param pu64Dst Where to write the VMCS value (only updated when
1339 * VINF_SUCCESS is returned).
1340 * @param u64FieldEnc The VMCS field encoding.
1341 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1342 * be NULL.
1343 */
1344IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1345 PCVMXVEXITINFO pExitInfo)
1346{
1347 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1348 {
1349 RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
1350 /** @todo NSTVMX: intercept. */
1351 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
1352 }
1353
1354 /* CPL. */
1355 if (pVCpu->iem.s.uCpl > 0)
1356 {
1357 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1358 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
1359 return iemRaiseGeneralProtectionFault0(pVCpu);
1360 }
1361
1362 /* VMCS pointer in root mode. */
1363 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1364 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1365 {
1366 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1367 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
1368 iemVmxVmFailInvalid(pVCpu);
1369 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1370 return VINF_SUCCESS;
1371 }
1372
1373 /* VMCS-link pointer in non-root mode. */
1374 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1375 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1376 {
1377 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1378 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
1379 iemVmxVmFailInvalid(pVCpu);
1380 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1381 return VINF_SUCCESS;
1382 }
1383
1384 /* Supported VMCS field. */
1385 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1386 {
1387 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1388 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
1389 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
1390 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1391 return VINF_SUCCESS;
1392 }
1393
1394 /*
1395 * Setup reading from the current or shadow VMCS.
1396 */
1397 uint8_t *pbVmcs;
1398 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1399 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1400 else
1401 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1402 Assert(pbVmcs);
1403
1404 VMXVMCSFIELDENC FieldEnc;
1405 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1406 uint8_t const uWidth = FieldEnc.n.u2Width;
1407 uint8_t const uType = FieldEnc.n.u2Type;
1408 uint8_t const uWidthType = (uWidth << 2) | uType;
1409 uint8_t const uIndex = FieldEnc.n.u8Index;
1410 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1411 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1412
1413 /*
1414 * Read the VMCS component based on the field's effective width.
1415 *
1416 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1417 * indicates high bits (little endian).
1418 *
1419 * Note! The caller is responsible to trim the result and update registers
1420 * or memory locations are required. Here we just zero-extend to the largest
1421 * type (i.e. 64-bits).
1422 */
1423 uint8_t *pbField = pbVmcs + offField;
1424 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1425 switch (uEffWidth)
1426 {
1427 case VMX_VMCS_ENC_WIDTH_64BIT:
1428 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
1429 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
1430 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
1431 }
1432 return VINF_SUCCESS;
1433}
1434
1435
1436/**
1437 * VMREAD (64-bit register) instruction execution worker.
1438 *
1439 * @returns Strict VBox status code.
1440 * @param pVCpu The cross context virtual CPU structure.
1441 * @param cbInstr The instruction length.
1442 * @param pu64Dst Where to store the VMCS field's value.
1443 * @param u64FieldEnc The VMCS field encoding.
1444 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1445 * be NULL.
1446 */
1447IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1448 PCVMXVEXITINFO pExitInfo)
1449{
1450 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
1451 if (rcStrict == VINF_SUCCESS)
1452 {
1453 iemVmxVmreadSuccess(pVCpu, cbInstr);
1454 return VINF_SUCCESS;
1455 }
1456
1457 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1458 return rcStrict;
1459}
1460
1461
1462/**
1463 * VMREAD (32-bit register) instruction execution worker.
1464 *
1465 * @returns Strict VBox status code.
1466 * @param pVCpu The cross context virtual CPU structure.
1467 * @param cbInstr The instruction length.
1468 * @param pu32Dst Where to store the VMCS field's value.
1469 * @param u32FieldEnc The VMCS field encoding.
1470 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1471 * be NULL.
1472 */
1473IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
1474 PCVMXVEXITINFO pExitInfo)
1475{
1476 uint64_t u64Dst;
1477 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
1478 if (rcStrict == VINF_SUCCESS)
1479 {
1480 *pu32Dst = u64Dst;
1481 iemVmxVmreadSuccess(pVCpu, cbInstr);
1482 return VINF_SUCCESS;
1483 }
1484
1485 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1486 return rcStrict;
1487}
1488
1489
1490/**
1491 * VMREAD (memory) instruction execution worker.
1492 *
1493 * @returns Strict VBox status code.
1494 * @param pVCpu The cross context virtual CPU structure.
1495 * @param cbInstr The instruction length.
1496 * @param iEffSeg The effective segment register to use with @a u64Val.
1497 * Pass UINT8_MAX if it is a register access.
1498 * @param enmEffAddrMode The effective addressing mode (only used with memory
1499 * operand).
1500 * @param GCPtrDst The guest linear address to store the VMCS field's
1501 * value.
1502 * @param u64FieldEnc The VMCS field encoding.
1503 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1504 * be NULL.
1505 */
1506IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
1507 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1508{
1509 uint64_t u64Dst;
1510 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
1511 if (rcStrict == VINF_SUCCESS)
1512 {
1513 /*
1514 * Write the VMCS field's value to the location specified in guest-memory.
1515 *
1516 * The pointer size depends on the address size (address-size prefix allowed).
1517 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
1518 */
1519 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1520 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1521 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
1522
1523 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1524 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1525 else
1526 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1527 if (rcStrict == VINF_SUCCESS)
1528 {
1529 iemVmxVmreadSuccess(pVCpu, cbInstr);
1530 return VINF_SUCCESS;
1531 }
1532
1533 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
1534 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
1535 return rcStrict;
1536 }
1537
1538 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1539 return rcStrict;
1540}
1541
1542
1543/**
1544 * VMWRITE instruction execution worker.
1545 *
1546 * @returns Strict VBox status code.
1547 * @param pVCpu The cross context virtual CPU structure.
1548 * @param cbInstr The instruction length.
1549 * @param iEffSeg The effective segment register to use with @a u64Val.
1550 * Pass UINT8_MAX if it is a register access.
1551 * @param enmEffAddrMode The effective addressing mode (only used with memory
1552 * operand).
1553 * @param u64Val The value to write (or guest linear address to the
1554 * value), @a iEffSeg will indicate if it's a memory
1555 * operand.
1556 * @param u64FieldEnc The VMCS field encoding.
1557 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1558 * be NULL.
1559 */
1560IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
1561 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1562{
1563 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1564 {
1565 RT_NOREF(pExitInfo);
1566 /** @todo NSTVMX: intercept. */
1567 /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
1568 }
1569
1570 /* CPL. */
1571 if (pVCpu->iem.s.uCpl > 0)
1572 {
1573 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1574 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
1575 return iemRaiseGeneralProtectionFault0(pVCpu);
1576 }
1577
1578 /* VMCS pointer in root mode. */
1579 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1580 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1581 {
1582 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1583 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
1584 iemVmxVmFailInvalid(pVCpu);
1585 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1586 return VINF_SUCCESS;
1587 }
1588
1589 /* VMCS-link pointer in non-root mode. */
1590 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1591 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1592 {
1593 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1594 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
1595 iemVmxVmFailInvalid(pVCpu);
1596 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1597 return VINF_SUCCESS;
1598 }
1599
1600 /* If the VMWRITE instruction references memory, access the specified memory operand. */
1601 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
1602 if (!fIsRegOperand)
1603 {
1604 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1605 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1606 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
1607
1608 /* Read the value from the specified guest memory location. */
1609 VBOXSTRICTRC rcStrict;
1610 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1611 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
1612 else
1613 {
1614 uint32_t u32Val;
1615 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
1616 u64Val = u32Val;
1617 }
1618 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1619 {
1620 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
1621 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
1622 return rcStrict;
1623 }
1624 }
1625 else
1626 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
1627
1628 /* Supported VMCS field. */
1629 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1630 {
1631 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1632 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
1633 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
1634 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1635 return VINF_SUCCESS;
1636 }
1637
1638 /* Read-only VMCS field. */
1639 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
1640 if ( fReadOnlyField
1641 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
1642 {
1643 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
1644 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
1645 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
1646 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1647 return VINF_SUCCESS;
1648 }
1649
1650 /*
1651 * Setup writing to the current or shadow VMCS.
1652 */
1653 uint8_t *pbVmcs;
1654 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1655 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1656 else
1657 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1658 Assert(pbVmcs);
1659
1660 VMXVMCSFIELDENC FieldEnc;
1661 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1662 uint8_t const uWidth = FieldEnc.n.u2Width;
1663 uint8_t const uType = FieldEnc.n.u2Type;
1664 uint8_t const uWidthType = (uWidth << 2) | uType;
1665 uint8_t const uIndex = FieldEnc.n.u8Index;
1666 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1667 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1668
1669 /*
1670 * Write the VMCS component based on the field's effective width.
1671 *
1672 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1673 * indicates high bits (little endian).
1674 */
1675 uint8_t *pbField = pbVmcs + offField;
1676 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1677 switch (uEffWidth)
1678 {
1679 case VMX_VMCS_ENC_WIDTH_64BIT:
1680 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
1681 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
1682 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
1683 }
1684
1685 iemVmxVmSucceed(pVCpu);
1686 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1687 return VINF_SUCCESS;
1688}
1689
1690
1691/**
1692 * VMCLEAR instruction execution worker.
1693 *
1694 * @returns Strict VBox status code.
1695 * @param pVCpu The cross context virtual CPU structure.
1696 * @param cbInstr The instruction length.
1697 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1698 * @param GCPtrVmcs The linear address of the VMCS pointer.
1699 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1700 * be NULL.
1701 *
1702 * @remarks Common VMX instruction checks are already expected to by the caller,
1703 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1704 */
1705IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1706 PCVMXVEXITINFO pExitInfo)
1707{
1708 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1709 {
1710 RT_NOREF(pExitInfo);
1711 /** @todo NSTVMX: intercept. */
1712 }
1713 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1714
1715 /* CPL. */
1716 if (pVCpu->iem.s.uCpl > 0)
1717 {
1718 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1719 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
1720 return iemRaiseGeneralProtectionFault0(pVCpu);
1721 }
1722
1723 /* Get the VMCS pointer from the location specified by the source memory operand. */
1724 RTGCPHYS GCPhysVmcs;
1725 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1726 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1727 {
1728 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1729 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
1730 return rcStrict;
1731 }
1732
1733 /* VMCS pointer alignment. */
1734 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1735 {
1736 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
1737 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
1738 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1739 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1740 return VINF_SUCCESS;
1741 }
1742
1743 /* VMCS physical-address width limits. */
1744 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1745 {
1746 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1747 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
1748 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1749 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1750 return VINF_SUCCESS;
1751 }
1752
1753 /* VMCS is not the VMXON region. */
1754 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1755 {
1756 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1757 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
1758 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
1759 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1760 return VINF_SUCCESS;
1761 }
1762
1763 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1764 restriction imposed by our implementation. */
1765 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1766 {
1767 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
1768 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
1769 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1770 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1771 return VINF_SUCCESS;
1772 }
1773
1774 /*
1775 * VMCLEAR allows committing and clearing any valid VMCS pointer.
1776 *
1777 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
1778 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
1779 * to 'clear'.
1780 */
1781 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
1782 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
1783 {
1784 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
1785 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
1786 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
1787 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1788 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1789 }
1790 else
1791 {
1792 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
1793 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
1794 }
1795
1796 iemVmxVmSucceed(pVCpu);
1797 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1798 return rcStrict;
1799}
1800
1801
1802/**
1803 * VMPTRST instruction execution worker.
1804 *
1805 * @returns Strict VBox status code.
1806 * @param pVCpu The cross context virtual CPU structure.
1807 * @param cbInstr The instruction length.
1808 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1809 * @param GCPtrVmcs The linear address of where to store the current VMCS
1810 * pointer.
1811 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1812 * be NULL.
1813 *
1814 * @remarks Common VMX instruction checks are already expected to by the caller,
1815 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1816 */
1817IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1818 PCVMXVEXITINFO pExitInfo)
1819{
1820 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1821 {
1822 RT_NOREF(pExitInfo);
1823 /** @todo NSTVMX: intercept. */
1824 }
1825 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1826
1827 /* CPL. */
1828 if (pVCpu->iem.s.uCpl > 0)
1829 {
1830 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1831 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
1832 return iemRaiseGeneralProtectionFault0(pVCpu);
1833 }
1834
1835 /* Set the VMCS pointer to the location specified by the destination memory operand. */
1836 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
1837 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
1838 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1839 {
1840 iemVmxVmSucceed(pVCpu);
1841 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1842 return rcStrict;
1843 }
1844
1845 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1846 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
1847 return rcStrict;
1848}
1849
1850
1851/**
1852 * VMPTRLD instruction execution worker.
1853 *
1854 * @returns Strict VBox status code.
1855 * @param pVCpu The cross context virtual CPU structure.
1856 * @param cbInstr The instruction length.
1857 * @param GCPtrVmcs The linear address of the current VMCS pointer.
1858 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1859 * be NULL.
1860 *
1861 * @remarks Common VMX instruction checks are already expected to by the caller,
1862 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1863 */
1864IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1865 PCVMXVEXITINFO pExitInfo)
1866{
1867 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1868 {
1869 RT_NOREF(pExitInfo);
1870 /** @todo NSTVMX: intercept. */
1871 }
1872 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1873
1874 /* CPL. */
1875 if (pVCpu->iem.s.uCpl > 0)
1876 {
1877 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1878 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
1879 return iemRaiseGeneralProtectionFault0(pVCpu);
1880 }
1881
1882 /* Get the VMCS pointer from the location specified by the source memory operand. */
1883 RTGCPHYS GCPhysVmcs;
1884 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1885 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1886 {
1887 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1888 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
1889 return rcStrict;
1890 }
1891
1892 /* VMCS pointer alignment. */
1893 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1894 {
1895 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
1896 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
1897 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1898 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1899 return VINF_SUCCESS;
1900 }
1901
1902 /* VMCS physical-address width limits. */
1903 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1904 {
1905 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1906 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
1907 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1908 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1909 return VINF_SUCCESS;
1910 }
1911
1912 /* VMCS is not the VMXON region. */
1913 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1914 {
1915 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1916 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
1917 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
1918 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1919 return VINF_SUCCESS;
1920 }
1921
1922 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1923 restriction imposed by our implementation. */
1924 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1925 {
1926 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
1927 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
1928 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1929 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1930 return VINF_SUCCESS;
1931 }
1932
1933 /* Read the VMCS revision ID from the VMCS. */
1934 VMXVMCSREVID VmcsRevId;
1935 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
1936 if (RT_FAILURE(rc))
1937 {
1938 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
1939 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
1940 return rc;
1941 }
1942
1943 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
1944 also check VMCS shadowing feature. */
1945 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
1946 || ( VmcsRevId.n.fIsShadowVmcs
1947 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
1948 {
1949 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
1950 {
1951 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
1952 VmcsRevId.n.u31RevisionId));
1953 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
1954 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1955 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1956 return VINF_SUCCESS;
1957 }
1958
1959 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
1960 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
1961 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1962 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1963 return VINF_SUCCESS;
1964 }
1965
1966 /*
1967 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
1968 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
1969 * a new VMCS as current.
1970 */
1971 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
1972 {
1973 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1974 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
1975 }
1976
1977 iemVmxVmSucceed(pVCpu);
1978 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1979 return VINF_SUCCESS;
1980}
1981
1982
1983/**
1984 * VMXON instruction execution worker.
1985 *
1986 * @returns Strict VBox status code.
1987 * @param pVCpu The cross context virtual CPU structure.
1988 * @param cbInstr The instruction length.
1989 * @param iEffSeg The effective segment register to use with @a
1990 * GCPtrVmxon.
1991 * @param GCPtrVmxon The linear address of the VMXON pointer.
1992 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1993 * Optional, can be NULL.
1994 *
1995 * @remarks Common VMX instruction checks are already expected to by the caller,
1996 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1997 */
1998IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
1999 PCVMXVEXITINFO pExitInfo)
2000{
2001#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
2002 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
2003 return VINF_EM_RAW_EMULATE_INSTR;
2004#else
2005 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
2006 {
2007 /* CPL. */
2008 if (pVCpu->iem.s.uCpl > 0)
2009 {
2010 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
2011 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
2012 return iemRaiseGeneralProtectionFault0(pVCpu);
2013 }
2014
2015 /* A20M (A20 Masked) mode. */
2016 if (!PGMPhysIsA20Enabled(pVCpu))
2017 {
2018 Log(("vmxon: A20M mode -> #GP(0)\n"));
2019 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
2020 return iemRaiseGeneralProtectionFault0(pVCpu);
2021 }
2022
2023 /* CR0 MB1 bits. */
2024 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2025 if (~pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0)
2026 {
2027 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
2028 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
2029 return iemRaiseGeneralProtectionFault0(pVCpu);
2030 }
2031
2032 /* CR4 MB1 bits. */
2033 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2034 if (~pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0)
2035 {
2036 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
2037 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
2038 return iemRaiseGeneralProtectionFault0(pVCpu);
2039 }
2040
2041 /* Feature control MSR's LOCK and VMXON bits. */
2042 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
2043 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
2044 {
2045 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
2046 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
2047 return iemRaiseGeneralProtectionFault0(pVCpu);
2048 }
2049
2050 /* Get the VMXON pointer from the location specified by the source memory operand. */
2051 RTGCPHYS GCPhysVmxon;
2052 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
2053 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2054 {
2055 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
2056 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
2057 return rcStrict;
2058 }
2059
2060 /* VMXON region pointer alignment. */
2061 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
2062 {
2063 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
2064 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
2065 iemVmxVmFailInvalid(pVCpu);
2066 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2067 return VINF_SUCCESS;
2068 }
2069
2070 /* VMXON physical-address width limits. */
2071 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
2072 {
2073 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
2074 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
2075 iemVmxVmFailInvalid(pVCpu);
2076 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2077 return VINF_SUCCESS;
2078 }
2079
2080 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
2081 restriction imposed by our implementation. */
2082 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
2083 {
2084 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
2085 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
2086 iemVmxVmFailInvalid(pVCpu);
2087 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2088 return VINF_SUCCESS;
2089 }
2090
2091 /* Read the VMCS revision ID from the VMXON region. */
2092 VMXVMCSREVID VmcsRevId;
2093 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
2094 if (RT_FAILURE(rc))
2095 {
2096 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
2097 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
2098 return rc;
2099 }
2100
2101 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
2102 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
2103 {
2104 /* Revision ID mismatch. */
2105 if (!VmcsRevId.n.fIsShadowVmcs)
2106 {
2107 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
2108 VmcsRevId.n.u31RevisionId));
2109 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
2110 iemVmxVmFailInvalid(pVCpu);
2111 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2112 return VINF_SUCCESS;
2113 }
2114
2115 /* Shadow VMCS disallowed. */
2116 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
2117 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
2118 iemVmxVmFailInvalid(pVCpu);
2119 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2120 return VINF_SUCCESS;
2121 }
2122
2123 /*
2124 * Record that we're in VMX operation, block INIT, block and disable A20M.
2125 */
2126 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
2127 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
2128 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
2129
2130 EMMonitorWaitClear(pVCpu);
2131 /** @todo NSTVMX: Intel PT. */
2132
2133 iemVmxVmSucceed(pVCpu);
2134 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2135# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2136 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
2137# else
2138 return VINF_SUCCESS;
2139# endif
2140 }
2141 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
2142 {
2143 RT_NOREF(pExitInfo);
2144 /** @todo NSTVMX: intercept. */
2145 }
2146
2147 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
2148
2149 /* CPL. */
2150 if (pVCpu->iem.s.uCpl > 0)
2151 {
2152 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
2153 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
2154 return iemRaiseGeneralProtectionFault0(pVCpu);
2155 }
2156
2157 /* VMXON when already in VMX root mode. */
2158 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
2159 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
2160 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2161 return VINF_SUCCESS;
2162#endif
2163}
2164
2165
2166/**
2167 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2168 * nested-guest.
2169 *
2170 * @param iSegReg The segment index (X86_SREG_XXX).
2171 */
2172IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
2173{
2174 switch (iSegReg)
2175 {
2176 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
2177 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
2178 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
2179 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
2180 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
2181 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
2182 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
2183 }
2184}
2185
2186
2187/**
2188 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2189 * nested-guest that is in Virtual-8086 mode.
2190 *
2191 * @param iSegReg The segment index (X86_SREG_XXX).
2192 */
2193IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
2194{
2195 switch (iSegReg)
2196 {
2197 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
2198 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
2199 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
2200 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
2201 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
2202 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
2203 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2204 }
2205}
2206
2207
2208/**
2209 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
2210 * nested-guest that is in Virtual-8086 mode.
2211 *
2212 * @param iSegReg The segment index (X86_SREG_XXX).
2213 */
2214IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
2215{
2216 switch (iSegReg)
2217 {
2218 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
2219 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
2220 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
2221 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
2222 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
2223 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
2224 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
2225 }
2226}
2227
2228
2229/**
2230 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
2231 * nested-guest that is in Virtual-8086 mode.
2232 *
2233 * @param iSegReg The segment index (X86_SREG_XXX).
2234 */
2235IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
2236{
2237 switch (iSegReg)
2238 {
2239 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
2240 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
2241 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
2242 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
2243 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
2244 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
2245 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
2246 }
2247}
2248
2249
2250/**
2251 * Gets the instruction diagnostic for segment attributes reserved bits failure
2252 * during VM-entry of a nested-guest.
2253 *
2254 * @param iSegReg The segment index (X86_SREG_XXX).
2255 */
2256IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
2257{
2258 switch (iSegReg)
2259 {
2260 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
2261 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
2262 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
2263 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
2264 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
2265 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
2266 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
2267 }
2268}
2269
2270
2271/**
2272 * Gets the instruction diagnostic for segment attributes descriptor-type
2273 * (code/segment or system) failure during VM-entry of a nested-guest.
2274 *
2275 * @param iSegReg The segment index (X86_SREG_XXX).
2276 */
2277IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
2278{
2279 switch (iSegReg)
2280 {
2281 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
2282 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
2283 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
2284 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
2285 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
2286 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
2287 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
2288 }
2289}
2290
2291
2292/**
2293 * Gets the instruction diagnostic for segment attributes descriptor-type
2294 * (code/segment or system) failure during VM-entry of a nested-guest.
2295 *
2296 * @param iSegReg The segment index (X86_SREG_XXX).
2297 */
2298IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
2299{
2300 switch (iSegReg)
2301 {
2302 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
2303 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
2304 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
2305 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
2306 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
2307 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
2308 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
2309 }
2310}
2311
2312
2313/**
2314 * Gets the instruction diagnostic for segment attribute granularity failure during
2315 * VM-entry of a nested-guest.
2316 *
2317 * @param iSegReg The segment index (X86_SREG_XXX).
2318 */
2319IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
2320{
2321 switch (iSegReg)
2322 {
2323 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
2324 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
2325 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
2326 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
2327 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
2328 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
2329 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
2330 }
2331}
2332
2333/**
2334 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
2335 * VM-entry of a nested-guest.
2336 *
2337 * @param iSegReg The segment index (X86_SREG_XXX).
2338 */
2339IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
2340{
2341 switch (iSegReg)
2342 {
2343 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
2344 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
2345 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
2346 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
2347 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
2348 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
2349 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
2350 }
2351}
2352
2353
2354/**
2355 * Gets the instruction diagnostic for segment attribute type accessed failure
2356 * during VM-entry of a nested-guest.
2357 *
2358 * @param iSegReg The segment index (X86_SREG_XXX).
2359 */
2360IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
2361{
2362 switch (iSegReg)
2363 {
2364 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
2365 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
2366 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
2367 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
2368 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
2369 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
2370 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
2371 }
2372}
2373
2374
2375/**
2376 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
2377 * failure during VM-entry of a nested-guest.
2378 *
2379 * @param iSegReg The PDPTE entry index.
2380 */
2381IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
2382{
2383 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
2384 switch (iPdpte)
2385 {
2386 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
2387 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
2388 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
2389 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
2390 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
2391 }
2392}
2393
2394
2395/**
2396 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
2397 * failure during VM-exit of a nested-guest.
2398 *
2399 * @param iSegReg The PDPTE entry index.
2400 */
2401IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
2402{
2403 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
2404 switch (iPdpte)
2405 {
2406 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
2407 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
2408 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
2409 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
2410 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
2411 }
2412}
2413
2414
2415/**
2416 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
2417 *
2418 * @param pVCpu The cross context virtual CPU structure.
2419 * @param pszInstr The VMX instruction name (for logging purposes).
2420 */
2421IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
2422{
2423 /*
2424 * Guest Control Registers, Debug Registers, and MSRs.
2425 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
2426 */
2427 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2428 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2429 const char *const pszFailure = "VM-exit";
2430
2431 /* CR0 reserved bits. */
2432 {
2433 /* CR0 MB1 bits. */
2434 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2435 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
2436 if (fUnrestrictedGuest)
2437 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
2438 if (~pVmcs->u64GuestCr0.u & u64Cr0Fixed0)
2439 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
2440
2441 /* CR0 MBZ bits. */
2442 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
2443 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
2444 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
2445
2446 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
2447 if ( !fUnrestrictedGuest
2448 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2449 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
2450 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
2451 }
2452
2453 /* CR4 reserved bits. */
2454 {
2455 /* CR4 MB1 bits. */
2456 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2457 if (~pVmcs->u64GuestCr4.u & u64Cr4Fixed0)
2458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
2459
2460 /* CR4 MBZ bits. */
2461 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
2462 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
2463 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
2464 }
2465
2466 /* DEBUGCTL MSR. */
2467 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2468 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
2469 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
2470
2471 /* 64-bit CPU checks. */
2472 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2473 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2474 {
2475 if (fGstInLongMode)
2476 {
2477 /* PAE must be set. */
2478 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2479 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
2480 { /* likely */ }
2481 else
2482 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
2483 }
2484 else
2485 {
2486 /* PCIDE should not be set. */
2487 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
2488 { /* likely */ }
2489 else
2490 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
2491 }
2492
2493 /* CR3. */
2494 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
2495 { /* likely */ }
2496 else
2497 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
2498
2499 /* DR7. */
2500 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2501 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
2502 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
2503
2504 /* SYSENTER ESP and SYSENTER EIP. */
2505 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
2506 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
2507 { /* likely */ }
2508 else
2509 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
2510 }
2511
2512 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2513 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
2514
2515 /* PAT MSR. */
2516 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
2517 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
2518 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
2519
2520 /* EFER MSR. */
2521 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
2522 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
2523 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
2524 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
2525
2526 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
2527 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
2528 if ( fGstInLongMode == fGstLma
2529 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
2530 || fGstLma == fGstLme))
2531 { /* likely */ }
2532 else
2533 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
2534
2535 /* We don't support IA32_BNDCFGS MSR yet. */
2536 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
2537
2538 NOREF(pszInstr);
2539 NOREF(pszFailure);
2540 return VINF_SUCCESS;
2541}
2542
2543
2544/**
2545 * Checks guest segment registers, LDTR and TR as part of VM-entry.
2546 *
2547 * @param pVCpu The cross context virtual CPU structure.
2548 * @param pszInstr The VMX instruction name (for logging purposes).
2549 */
2550IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
2551{
2552 /*
2553 * Segment registers.
2554 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2555 */
2556 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2557 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
2558 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2559 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2560 const char *const pszFailure = "VM-exit";
2561
2562 /* Selectors. */
2563 if ( !fGstInV86Mode
2564 && !fUnrestrictedGuest
2565 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
2566 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
2567
2568 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2569 {
2570 CPUMSELREG SelReg;
2571 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
2572 if (RT_LIKELY(rc == VINF_SUCCESS))
2573 { /* likely */ }
2574 else
2575 return rc;
2576
2577 /*
2578 * Virtual-8086 mode checks.
2579 */
2580 if (fGstInV86Mode)
2581 {
2582 /* Base address. */
2583 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
2584 { /* likely */ }
2585 else
2586 {
2587 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
2588 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2589 }
2590
2591 /* Limit. */
2592 if (SelReg.u32Limit == 0xffff)
2593 { /* likely */ }
2594 else
2595 {
2596 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
2597 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2598 }
2599
2600 /* Attribute. */
2601 if (SelReg.Attr.u == 0xf3)
2602 { /* likely */ }
2603 else
2604 {
2605 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
2606 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2607 }
2608
2609 /* We're done; move to checking the next segment. */
2610 continue;
2611 }
2612
2613 /* Checks done by 64-bit CPUs. */
2614 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2615 {
2616 /* Base address. */
2617 if ( iSegReg == X86_SREG_FS
2618 || iSegReg == X86_SREG_GS)
2619 {
2620 if (X86_IS_CANONICAL(SelReg.u64Base))
2621 { /* likely */ }
2622 else
2623 {
2624 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2625 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2626 }
2627 }
2628 else if (iSegReg == X86_SREG_CS)
2629 {
2630 if (!RT_HI_U32(SelReg.u64Base))
2631 { /* likely */ }
2632 else
2633 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
2634 }
2635 else
2636 {
2637 if ( SelReg.Attr.n.u1Unusable
2638 || !RT_HI_U32(SelReg.u64Base))
2639 { /* likely */ }
2640 else
2641 {
2642 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2643 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2644 }
2645 }
2646 }
2647
2648 /*
2649 * Checks outside Virtual-8086 mode.
2650 */
2651 uint8_t const uSegType = SelReg.Attr.n.u4Type;
2652 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
2653 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
2654 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
2655 uint8_t const fPresent = SelReg.Attr.n.u1Present;
2656 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
2657 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
2658 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
2659
2660 /* Code or usable segment. */
2661 if ( iSegReg == X86_SREG_CS
2662 || fUsable)
2663 {
2664 /* Reserved bits (bits 31:17 and bits 11:8). */
2665 if (!(SelReg.Attr.u & 0xfffe0f00))
2666 { /* likely */ }
2667 else
2668 {
2669 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
2670 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2671 }
2672
2673 /* Descriptor type. */
2674 if (fCodeDataSeg)
2675 { /* likely */ }
2676 else
2677 {
2678 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
2679 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2680 }
2681
2682 /* Present. */
2683 if (fPresent)
2684 { /* likely */ }
2685 else
2686 {
2687 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
2688 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2689 }
2690
2691 /* Granularity. */
2692 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
2693 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
2694 { /* likely */ }
2695 else
2696 {
2697 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
2698 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2699 }
2700 }
2701
2702 if (iSegReg == X86_SREG_CS)
2703 {
2704 /* Segment Type and DPL. */
2705 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2706 && fUnrestrictedGuest)
2707 {
2708 if (uDpl == 0)
2709 { /* likely */ }
2710 else
2711 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
2712 }
2713 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
2714 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2715 {
2716 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
2717 if (uDpl == AttrSs.n.u2Dpl)
2718 { /* likely */ }
2719 else
2720 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
2721 }
2722 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2723 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2724 {
2725 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
2726 if (uDpl <= AttrSs.n.u2Dpl)
2727 { /* likely */ }
2728 else
2729 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
2730 }
2731 else
2732 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
2733
2734 /* Def/Big. */
2735 if ( fGstInLongMode
2736 && fSegLong)
2737 {
2738 if (uDefBig == 0)
2739 { /* likely */ }
2740 else
2741 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
2742 }
2743 }
2744 else if (iSegReg == X86_SREG_SS)
2745 {
2746 /* Segment Type. */
2747 if ( !fUsable
2748 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2749 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
2750 { /* likely */ }
2751 else
2752 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
2753
2754 /* DPL. */
2755 if (fUnrestrictedGuest)
2756 {
2757 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
2758 { /* likely */ }
2759 else
2760 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
2761 }
2762 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
2763 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2764 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
2765 {
2766 if (uDpl == 0)
2767 { /* likely */ }
2768 else
2769 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
2770 }
2771 }
2772 else
2773 {
2774 /* DS, ES, FS, GS. */
2775 if (fUsable)
2776 {
2777 /* Segment type. */
2778 if (uSegType & X86_SEL_TYPE_ACCESSED)
2779 { /* likely */ }
2780 else
2781 {
2782 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
2783 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2784 }
2785
2786 if ( !(uSegType & X86_SEL_TYPE_CODE)
2787 || (uSegType & X86_SEL_TYPE_READ))
2788 { /* likely */ }
2789 else
2790 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
2791
2792 /* DPL. */
2793 if ( !fUnrestrictedGuest
2794 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2795 {
2796 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
2797 { /* likely */ }
2798 else
2799 {
2800 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
2801 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2802 }
2803 }
2804 }
2805 }
2806 }
2807
2808 /*
2809 * LDTR.
2810 */
2811 {
2812 CPUMSELREG Ldtr;
2813 Ldtr.Sel = pVmcs->GuestLdtr;
2814 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
2815 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
2816 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
2817
2818 if (!Ldtr.Attr.n.u1Unusable)
2819 {
2820 /* Selector. */
2821 if (!(Ldtr.Sel & X86_SEL_LDT))
2822 { /* likely */ }
2823 else
2824 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
2825
2826 /* Base. */
2827 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2828 {
2829 if (X86_IS_CANONICAL(Ldtr.u64Base))
2830 { /* likely */ }
2831 else
2832 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
2833 }
2834
2835 /* Attributes. */
2836 /* Reserved bits (bits 31:17 and bits 11:8). */
2837 if (!(Ldtr.Attr.u & 0xfffe0f00))
2838 { /* likely */ }
2839 else
2840 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
2841
2842 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
2843 { /* likely */ }
2844 else
2845 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
2846
2847 if (!Ldtr.Attr.n.u1DescType)
2848 { /* likely */ }
2849 else
2850 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
2851
2852 if (Ldtr.Attr.n.u1Present)
2853 { /* likely */ }
2854 else
2855 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
2856
2857 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
2858 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
2859 { /* likely */ }
2860 else
2861 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
2862 }
2863 }
2864
2865 /*
2866 * TR.
2867 */
2868 {
2869 CPUMSELREG Tr;
2870 Tr.Sel = pVmcs->GuestTr;
2871 Tr.u32Limit = pVmcs->u32GuestTrLimit;
2872 Tr.u64Base = pVmcs->u64GuestTrBase.u;
2873 Tr.Attr.u = pVmcs->u32GuestTrLimit;
2874
2875 /* Selector. */
2876 if (!(Tr.Sel & X86_SEL_LDT))
2877 { /* likely */ }
2878 else
2879 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
2880
2881 /* Base. */
2882 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2883 {
2884 if (X86_IS_CANONICAL(Tr.u64Base))
2885 { /* likely */ }
2886 else
2887 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
2888 }
2889
2890 /* Attributes. */
2891 /* Reserved bits (bits 31:17 and bits 11:8). */
2892 if (!(Tr.Attr.u & 0xfffe0f00))
2893 { /* likely */ }
2894 else
2895 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
2896
2897 if (!Tr.Attr.n.u1Unusable)
2898 { /* likely */ }
2899 else
2900 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
2901
2902 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
2903 || ( !fGstInLongMode
2904 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
2905 { /* likely */ }
2906 else
2907 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
2908
2909 if (!Tr.Attr.n.u1DescType)
2910 { /* likely */ }
2911 else
2912 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
2913
2914 if (Tr.Attr.n.u1Present)
2915 { /* likely */ }
2916 else
2917 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
2918
2919 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
2920 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
2921 { /* likely */ }
2922 else
2923 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
2924 }
2925
2926 NOREF(pszInstr);
2927 NOREF(pszFailure);
2928 return VINF_SUCCESS;
2929}
2930
2931
2932/**
2933 * Checks guest GDTR and IDTR as part of VM-entry.
2934 *
2935 * @param pVCpu The cross context virtual CPU structure.
2936 * @param pszInstr The VMX instruction name (for logging purposes).
2937 */
2938IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
2939{
2940 /*
2941 * GDTR and IDTR.
2942 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
2943 */
2944 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2945 const char *const pszFailure = "VM-exit";
2946 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2947 {
2948 /* Base. */
2949 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
2950 { /* likely */ }
2951 else
2952 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
2953
2954 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
2955 { /* likely */ }
2956 else
2957 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
2958 }
2959
2960 /* Limit. */
2961 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
2962 { /* likely */ }
2963 else
2964 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
2965
2966 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
2967 { /* likely */ }
2968 else
2969 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
2970
2971 NOREF(pszInstr);
2972 NOREF(pszFailure);
2973 return VINF_SUCCESS;
2974}
2975
2976
2977/**
2978 * Checks guest RIP and RFLAGS as part of VM-entry.
2979 *
2980 * @param pVCpu The cross context virtual CPU structure.
2981 * @param pszInstr The VMX instruction name (for logging purposes).
2982 */
2983IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
2984{
2985 /*
2986 * RIP and RFLAGS.
2987 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
2988 */
2989 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2990 const char *const pszFailure = "VM-exit";
2991 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2992
2993 /* RIP. */
2994 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2995 {
2996 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
2997 if ( !fGstInLongMode
2998 || !AttrCs.n.u1Long)
2999 {
3000 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
3001 { /* likely */ }
3002 else
3003 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
3004 }
3005
3006 if ( fGstInLongMode
3007 && AttrCs.n.u1Long)
3008 {
3009 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
3010 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
3011 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
3012 { /* likely */ }
3013 else
3014 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
3015 }
3016 }
3017
3018 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
3019 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
3020 : pVmcs->u64GuestRFlags.s.Lo;
3021 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
3022 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
3023 { /* likely */ }
3024 else
3025 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
3026
3027 if ( fGstInLongMode
3028 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
3029 {
3030 if (!(uGuestRFlags & X86_EFL_VM))
3031 { /* likely */ }
3032 else
3033 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
3034 }
3035
3036 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
3037 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3038 {
3039 if (uGuestRFlags & X86_EFL_IF)
3040 { /* likely */ }
3041 else
3042 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
3043 }
3044
3045 NOREF(pszInstr);
3046 NOREF(pszFailure);
3047 return VINF_SUCCESS;
3048}
3049
3050
3051/**
3052 * Checks guest non-register state as part of VM-entry.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 * @param pszInstr The VMX instruction name (for logging purposes).
3056 */
3057IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
3058{
3059 /*
3060 * Guest non-register state.
3061 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
3062 */
3063 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3064 const char *const pszFailure = "VM-exit";
3065
3066 /*
3067 * Activity state.
3068 */
3069 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
3070 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
3071 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
3072 { /* likely */ }
3073 else
3074 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
3075
3076 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
3077 if ( !AttrSs.n.u2Dpl
3078 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
3079 { /* likely */ }
3080 else
3081 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
3082
3083 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
3084 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
3085 {
3086 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
3087 { /* likely */ }
3088 else
3089 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
3090 }
3091
3092 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3093 {
3094 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3095 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
3096 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
3097 switch (pVmcs->u32GuestActivityState)
3098 {
3099 case VMX_VMCS_GUEST_ACTIVITY_HLT:
3100 {
3101 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
3102 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
3103 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3104 && ( uVector == X86_XCPT_DB
3105 || uVector == X86_XCPT_MC))
3106 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
3107 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
3108 { /* likely */ }
3109 else
3110 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
3111 break;
3112 }
3113
3114 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
3115 {
3116 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
3117 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3118 && uVector == X86_XCPT_MC))
3119 { /* likely */ }
3120 else
3121 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
3122 break;
3123 }
3124
3125 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
3126 default:
3127 break;
3128 }
3129 }
3130
3131 /*
3132 * Interruptibility state.
3133 */
3134 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
3135 { /* likely */ }
3136 else
3137 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
3138
3139 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3140 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3141 { /* likely */ }
3142 else
3143 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
3144
3145 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
3146 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3147 { /* likely */ }
3148 else
3149 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
3150
3151 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3152 {
3153 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3154 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3155 {
3156 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3157 { /* likely */ }
3158 else
3159 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
3160 }
3161 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
3162 {
3163 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3164 { /* likely */ }
3165 else
3166 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
3167
3168 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3169 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
3170 { /* likely */ }
3171 else
3172 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
3173 }
3174 }
3175
3176 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
3177 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
3178 { /* likely */ }
3179 else
3180 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
3181
3182 /* We don't support SGX yet. So enclave-interruption must not be set. */
3183 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
3184 { /* likely */ }
3185 else
3186 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
3187
3188 /*
3189 * Pending debug exceptions.
3190 */
3191 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
3192 ? pVmcs->u64GuestPendingDbgXcpt.u
3193 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
3194 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
3195 { /* likely */ }
3196 else
3197 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
3198
3199 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3200 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
3201 {
3202 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3203 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
3204 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3205 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
3206
3207 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3208 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
3209 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3210 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
3211 }
3212
3213 /* We don't support RTM (Real-time Transactional Memory) yet. */
3214 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
3215 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
3216
3217 /*
3218 * VMCS link pointer.
3219 */
3220 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
3221 {
3222 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
3223 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
3224 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
3225 { /* likely */ }
3226 else
3227 {
3228 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3229 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
3230 }
3231
3232 /* Validate the address. */
3233 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
3234 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3235 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
3236 {
3237 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3238 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
3239 }
3240
3241 /* Read the VMCS-link pointer from guest memory. */
3242 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
3243 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
3244 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
3245 if (RT_FAILURE(rc))
3246 {
3247 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3248 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
3249 }
3250
3251 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
3252 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
3253 { /* likely */ }
3254 else
3255 {
3256 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3257 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
3258 }
3259
3260 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
3261 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3262 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
3263 { /* likely */ }
3264 else
3265 {
3266 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3267 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
3268 }
3269
3270 /* Finally update our cache of the guest physical address of the shadow VMCS. */
3271 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
3272 }
3273
3274 NOREF(pszInstr);
3275 NOREF(pszFailure);
3276 return VINF_SUCCESS;
3277}
3278
3279
3280/**
3281 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
3282 * VM-entry.
3283 *
3284 * @returns @c true if all PDPTEs are valid, @c false otherwise.
3285 * @param pVCpu The cross context virtual CPU structure.
3286 * @param pszInstr The VMX instruction name (for logging purposes).
3287 * @param pVmcs Pointer to the virtual VMCS.
3288 */
3289IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
3290{
3291 /*
3292 * Check PDPTEs.
3293 * See Intel spec. 4.4.1 "PDPTE Registers".
3294 */
3295 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
3296 const char *const pszFailure = "VM-exit";
3297
3298 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
3299 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
3300 if (RT_SUCCESS(rc))
3301 {
3302 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
3303 {
3304 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
3305 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
3306 { /* likely */ }
3307 else
3308 {
3309 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;
3310 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
3311 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3312 }
3313 }
3314 }
3315 else
3316 {
3317 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;
3318 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
3319 }
3320
3321 NOREF(pszFailure);
3322 return rc;
3323}
3324
3325
3326/**
3327 * Checks guest PDPTEs as part of VM-entry.
3328 *
3329 * @param pVCpu The cross context virtual CPU structure.
3330 * @param pszInstr The VMX instruction name (for logging purposes).
3331 */
3332IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
3333{
3334 /*
3335 * Guest PDPTEs.
3336 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
3337 */
3338 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3339 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3340
3341 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
3342 int rc;
3343 if ( !fGstInLongMode
3344 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
3345 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
3346 {
3347 /*
3348 * We don't support nested-paging for nested-guests yet.
3349 *
3350 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
3351 * rather we need to check the PDPTEs referenced by the guest CR3.
3352 */
3353 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
3354 }
3355 else
3356 rc = VINF_SUCCESS;
3357 return rc;
3358}
3359
3360
3361/**
3362 * Checks guest-state as part of VM-entry.
3363 *
3364 * @returns VBox status code.
3365 * @param pVCpu The cross context virtual CPU structure.
3366 * @param pszInstr The VMX instruction name (for logging purposes).
3367 */
3368IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
3369{
3370 /* Check control registers, debug registers and MSRs. */
3371 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
3372 if (RT_SUCCESS(rc))
3373 {
3374 /* Check guest segment registers, LDTR, TR. */
3375 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
3376 if (RT_SUCCESS(rc))
3377 {
3378 /* Check guest GDTR and IDTR. */
3379 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
3380 if (RT_SUCCESS(rc))
3381 {
3382 /* Check guest RIP, RSP and RFLAGS. */
3383 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
3384 if (RT_SUCCESS(rc))
3385 {
3386 /* Check guest non-register state. */
3387 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
3388 if (RT_SUCCESS(rc))
3389 {
3390 /* Check guest PDPTEs. */
3391 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
3392 }
3393 }
3394 }
3395 }
3396 }
3397 return rc;
3398}
3399
3400
3401/**
3402 * Checks host-state as part of VM-entry.
3403 *
3404 * @returns VBox status code.
3405 * @param pVCpu The cross context virtual CPU structure.
3406 * @param pszInstr The VMX instruction name (for logging purposes).
3407 */
3408IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
3409{
3410 /*
3411 * Host Control Registers and MSRs.
3412 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
3413 */
3414 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3415 const char * const pszFailure = "VMFail";
3416
3417 /* CR0 reserved bits. */
3418 {
3419 /* CR0 MB1 bits. */
3420 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
3421 if (~pVmcs->u64HostCr0.u & u64Cr0Fixed0)
3422 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
3423
3424 /* CR0 MBZ bits. */
3425 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
3426 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
3427 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
3428 }
3429
3430 /* CR4 reserved bits. */
3431 {
3432 /* CR4 MB1 bits. */
3433 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
3434 if (~pVmcs->u64HostCr4.u & u64Cr4Fixed0)
3435 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
3436
3437 /* CR4 MBZ bits. */
3438 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
3439 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
3440 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
3441 }
3442
3443 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3444 {
3445 /* CR3 reserved bits. */
3446 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
3447 { /* likely */ }
3448 else
3449 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
3450
3451 /* SYSENTER ESP and SYSENTER EIP. */
3452 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
3453 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
3454 { /* likely */ }
3455 else
3456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
3457 }
3458
3459 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
3460 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
3461
3462 /* PAT MSR. */
3463 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
3464 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
3465 { /* likely */ }
3466 else
3467 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
3468
3469 /* EFER MSR. */
3470 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
3471 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
3472 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
3473 { /* likely */ }
3474 else
3475 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
3476
3477 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
3478 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
3479 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
3480 if ( fHostInLongMode == fHostLma
3481 && fHostInLongMode == fHostLme)
3482 { /* likely */ }
3483 else
3484 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
3485
3486 /*
3487 * Host Segment and Descriptor-Table Registers.
3488 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
3489 */
3490 /* Selector RPL and TI. */
3491 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
3492 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
3493 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
3494 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
3495 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
3496 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
3497 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
3498 { /* likely */ }
3499 else
3500 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
3501
3502 /* CS and TR selectors cannot be 0. */
3503 if ( pVmcs->HostCs
3504 && pVmcs->HostTr)
3505 { /* likely */ }
3506 else
3507 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
3508
3509 /* SS cannot be 0 if 32-bit host. */
3510 if ( fHostInLongMode
3511 || pVmcs->HostSs)
3512 { /* likely */ }
3513 else
3514 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
3515
3516 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3517 {
3518 /* FS, GS, GDTR, IDTR, TR base address. */
3519 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3520 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3521 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
3522 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
3523 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
3524 { /* likely */ }
3525 else
3526 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
3527 }
3528
3529 /*
3530 * Host address-space size for 64-bit CPUs.
3531 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
3532 */
3533 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3534 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3535 {
3536 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
3537
3538 /* Logical processor in IA-32e mode. */
3539 if (fCpuInLongMode)
3540 {
3541 if (fHostInLongMode)
3542 {
3543 /* PAE must be set. */
3544 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
3545 { /* likely */ }
3546 else
3547 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
3548
3549 /* RIP must be canonical. */
3550 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
3551 { /* likely */ }
3552 else
3553 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
3554 }
3555 else
3556 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
3557 }
3558 else
3559 {
3560 /* Logical processor is outside IA-32e mode. */
3561 if ( !fGstInLongMode
3562 && !fHostInLongMode)
3563 {
3564 /* PCIDE should not be set. */
3565 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
3566 { /* likely */ }
3567 else
3568 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
3569
3570 /* The high 32-bits of RIP MBZ. */
3571 if (!pVmcs->u64HostRip.s.Hi)
3572 { /* likely */ }
3573 else
3574 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
3575 }
3576 else
3577 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
3578 }
3579 }
3580 else
3581 {
3582 /* Host address-space size for 32-bit CPUs. */
3583 if ( !fGstInLongMode
3584 && !fHostInLongMode)
3585 { /* likely */ }
3586 else
3587 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
3588 }
3589
3590 NOREF(pszInstr);
3591 NOREF(pszFailure);
3592 return VINF_SUCCESS;
3593}
3594
3595
3596/**
3597 * Checks VM-entry controls fields as part of VM-entry.
3598 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
3599 *
3600 * @returns VBox status code.
3601 * @param pVCpu The cross context virtual CPU structure.
3602 * @param pszInstr The VMX instruction name (for logging purposes).
3603 */
3604IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
3605{
3606 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3607 const char * const pszFailure = "VMFail";
3608
3609 /* VM-entry controls. */
3610 VMXCTLSMSR EntryCtls;
3611 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
3612 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
3613 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
3614
3615 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
3616 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
3617
3618 /* Event injection. */
3619 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
3620 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
3621 {
3622 /* Type and vector. */
3623 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
3624 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
3625 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
3626 if ( !uRsvd
3627 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
3628 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
3629 { /* likely */ }
3630 else
3631 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
3632
3633 /* Exception error code. */
3634 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
3635 {
3636 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
3637 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
3638 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
3639 { /* likely */ }
3640 else
3641 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
3642
3643 /* Exceptions that provide an error code. */
3644 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3645 && ( uVector == X86_XCPT_DF
3646 || uVector == X86_XCPT_TS
3647 || uVector == X86_XCPT_NP
3648 || uVector == X86_XCPT_SS
3649 || uVector == X86_XCPT_GP
3650 || uVector == X86_XCPT_PF
3651 || uVector == X86_XCPT_AC))
3652 { /* likely */ }
3653 else
3654 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
3655
3656 /* Exception error-code reserved bits. */
3657 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
3658 { /* likely */ }
3659 else
3660 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
3661
3662 /* Injecting a software interrupt, software exception or privileged software exception. */
3663 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
3664 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
3665 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
3666 {
3667 /* Instruction length must be in the range 0-15. */
3668 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
3669 { /* likely */ }
3670 else
3671 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
3672
3673 /* Instruction length of 0 is allowed only when its CPU feature is present. */
3674 if ( pVmcs->u32EntryInstrLen == 0
3675 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
3676 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
3677 }
3678 }
3679 }
3680
3681 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
3682 if (pVmcs->u32EntryMsrLoadCount)
3683 {
3684 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3685 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3686 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
3687 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
3688 }
3689
3690 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
3691 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
3692
3693 NOREF(pszInstr);
3694 NOREF(pszFailure);
3695 return VINF_SUCCESS;
3696}
3697
3698
3699/**
3700 * Checks VM-exit controls fields as part of VM-entry.
3701 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
3702 *
3703 * @returns VBox status code.
3704 * @param pVCpu The cross context virtual CPU structure.
3705 * @param pszInstr The VMX instruction name (for logging purposes).
3706 */
3707IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
3708{
3709 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3710 const char * const pszFailure = "VMFail";
3711
3712 /* VM-exit controls. */
3713 VMXCTLSMSR ExitCtls;
3714 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
3715 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
3716 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
3717
3718 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
3719 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
3720
3721 /* Save preemption timer without activating it. */
3722 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
3723 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
3724 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
3725
3726 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
3727 if (pVmcs->u32ExitMsrStoreCount)
3728 {
3729 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
3730 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3731 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
3732 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
3733 }
3734
3735 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
3736 if (pVmcs->u32ExitMsrLoadCount)
3737 {
3738 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3739 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3740 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
3741 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
3742 }
3743
3744 NOREF(pszInstr);
3745 NOREF(pszFailure);
3746 return VINF_SUCCESS;
3747}
3748
3749
3750/**
3751 * Checks VM-execution controls fields as part of VM-entry.
3752 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
3753 *
3754 * @returns VBox status code.
3755 * @param pVCpu The cross context virtual CPU structure.
3756 * @param pszInstr The VMX instruction name (for logging purposes).
3757 *
3758 * @remarks This may update secondary-processor based VM-execution control fields
3759 * in the current VMCS if necessary.
3760 */
3761IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
3762{
3763 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3764 const char * const pszFailure = "VMFail";
3765
3766 /* Pin-based VM-execution controls. */
3767 {
3768 VMXCTLSMSR PinCtls;
3769 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
3770 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
3771 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
3772
3773 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
3774 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
3775 }
3776
3777 /* Processor-based VM-execution controls. */
3778 {
3779 VMXCTLSMSR ProcCtls;
3780 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
3781 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
3782 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
3783
3784 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
3785 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
3786 }
3787
3788 /* Secondary processor-based VM-execution controls. */
3789 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3790 {
3791 VMXCTLSMSR ProcCtls2;
3792 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
3793 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
3794 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
3795
3796 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
3797 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
3798 }
3799 else
3800 Assert(!pVmcs->u32ProcCtls2);
3801
3802 /* CR3-target count. */
3803 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
3804 { /* likely */ }
3805 else
3806 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
3807
3808 /* IO bitmaps physical addresses. */
3809 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
3810 {
3811 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
3812 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3813 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
3814 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
3815
3816 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
3817 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3818 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
3819 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
3820 }
3821
3822 /* MSR bitmap physical address. */
3823 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3824 {
3825 if ( (pVmcs->u64AddrMsrBitmap.u & X86_PAGE_4K_OFFSET_MASK)
3826 || (pVmcs->u64AddrMsrBitmap.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3827 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u))
3828 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
3829 }
3830
3831 /* TPR shadow related controls. */
3832 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
3833 {
3834 /* Virtual-APIC page physical address. */
3835 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
3836 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
3837 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3838 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
3839 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
3840
3841 /* Read the Virtual-APIC page. */
3842 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
3843 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
3844 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
3845 if (RT_FAILURE(rc))
3846 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
3847
3848 /* TPR threshold without virtual-interrupt delivery. */
3849 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3850 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
3851 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
3852
3853 /* TPR threshold and VTPR. */
3854 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
3855 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
3856 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3857 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3858 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
3859 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
3860 }
3861 else
3862 {
3863 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3864 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3865 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
3866 { /* likely */ }
3867 else
3868 {
3869 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3870 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
3871 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3872 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
3873 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
3874 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
3875 }
3876 }
3877
3878 /* NMI exiting and virtual-NMIs. */
3879 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
3880 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3881 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
3882
3883 /* Virtual-NMIs and NMI-window exiting. */
3884 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3885 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
3886 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
3887
3888 /* Virtualize APIC accesses. */
3889 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3890 {
3891 /* APIC-access physical address. */
3892 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
3893 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
3894 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3895 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
3896 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
3897 }
3898
3899 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
3900 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3901 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
3902 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3903
3904 /* Virtual-interrupt delivery requires external interrupt exiting. */
3905 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3906 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
3907 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3908
3909 /* VPID. */
3910 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
3911 || pVmcs->u16Vpid != 0)
3912 { /* likely */ }
3913 else
3914 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
3915
3916 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
3917 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
3918 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
3919 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
3920 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
3921 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
3922 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
3923
3924 /* VMCS shadowing. */
3925 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3926 {
3927 /* VMREAD-bitmap physical address. */
3928 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
3929 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
3930 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3931 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
3932 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
3933
3934 /* VMWRITE-bitmap physical address. */
3935 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
3936 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
3937 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3938 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
3939 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
3940
3941 /* Read the VMREAD-bitmap. */
3942 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
3943 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
3944 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3945 if (RT_FAILURE(rc))
3946 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
3947
3948 /* Read the VMWRITE-bitmap. */
3949 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
3950 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
3951 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3952 if (RT_FAILURE(rc))
3953 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
3954 }
3955
3956 NOREF(pszInstr);
3957 NOREF(pszFailure);
3958 return VINF_SUCCESS;
3959}
3960
3961
3962/**
3963 * Loads the guest control registers, debug register and some MSRs as part of
3964 * VM-entry.
3965 *
3966 * @param pVCpu The cross context virtual CPU structure.
3967 */
3968IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
3969{
3970 /*
3971 * Load guest control registers, debug registers and MSRs.
3972 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
3973 */
3974 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3975 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
3976 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
3977 CPUMSetGuestCR0(pVCpu, uGstCr0);
3978 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
3979 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
3980
3981 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
3982 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
3983
3984 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
3985 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
3986 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
3987
3988 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3989 {
3990 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
3991
3992 /* EFER MSR. */
3993 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
3994 {
3995 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3996 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
3997 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
3998 if (fGstInLongMode)
3999 {
4000 /* If the nested-guest is in long mode, LMA and LME are both set. */
4001 Assert(fGstPaging);
4002 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
4003 }
4004 else
4005 {
4006 /*
4007 * If the nested-guest is outside long mode:
4008 * - With paging: LMA is cleared, LME is cleared.
4009 * - Without paging: LMA is cleared, LME is left unmodified.
4010 */
4011 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
4012 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
4013 }
4014 }
4015 /* else: see below. */
4016 }
4017
4018 /* PAT MSR. */
4019 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4020 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
4021
4022 /* EFER MSR. */
4023 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4024 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
4025
4026 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4027 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
4028
4029 /* We don't support IA32_BNDCFGS MSR yet. */
4030 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
4031
4032 /* Nothing to do for SMBASE register - We don't support SMM yet. */
4033}
4034
4035
4036/**
4037 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
4038 *
4039 * @param pVCpu The cross context virtual CPU structure.
4040 */
4041IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
4042{
4043 /*
4044 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
4045 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
4046 */
4047 /* CS, SS, ES, DS, FS, GS. */
4048 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4049 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
4050 {
4051 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
4052 CPUMSELREG VmcsSelReg;
4053 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
4054 AssertRC(rc); NOREF(rc);
4055 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
4056 {
4057 pGstSelReg->Sel = VmcsSelReg.Sel;
4058 pGstSelReg->ValidSel = VmcsSelReg.Sel;
4059 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
4060 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4061 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
4062 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
4063 }
4064 else
4065 {
4066 pGstSelReg->Sel = VmcsSelReg.Sel;
4067 pGstSelReg->ValidSel = VmcsSelReg.Sel;
4068 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
4069 switch (iSegReg)
4070 {
4071 case X86_SREG_CS:
4072 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4073 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
4074 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
4075 break;
4076
4077 case X86_SREG_SS:
4078 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
4079 pGstSelReg->u32Limit = 0;
4080 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
4081 break;
4082
4083 case X86_SREG_ES:
4084 case X86_SREG_DS:
4085 pGstSelReg->u64Base = 0;
4086 pGstSelReg->u32Limit = 0;
4087 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
4088 break;
4089
4090 case X86_SREG_FS:
4091 case X86_SREG_GS:
4092 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4093 pGstSelReg->u32Limit = 0;
4094 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
4095 break;
4096 }
4097 Assert(pGstSelReg->Attr.n.u1Unusable);
4098 }
4099 }
4100
4101 /* LDTR. */
4102 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
4103 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
4104 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4105 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
4106 {
4107 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
4108 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
4109 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
4110 }
4111 else
4112 {
4113 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
4114 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
4115 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4116 }
4117
4118 /* TR. */
4119 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
4120 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
4121 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
4122 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4123 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
4124 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
4125 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
4126
4127 /* GDTR. */
4128 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
4129 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
4130
4131 /* IDTR. */
4132 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
4133 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
4134}
4135
4136
4137/**
4138 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
4139 *
4140 * @returns VBox status code.
4141 * @param pVCpu The cross context virtual CPU structure.
4142 * @param pszInstr The VMX instruction name (for logging purposes).
4143 */
4144IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
4145{
4146 /*
4147 * Load guest MSRs.
4148 * See Intel spec. 26.4 "Loading MSRs".
4149 */
4150 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4151 const char *const pszFailure = "VM-exit";
4152
4153 /*
4154 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
4155 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
4156 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
4157 */
4158 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
4159 if (!cMsrs)
4160 return VINF_SUCCESS;
4161
4162 /*
4163 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
4164 * exceeded including possibly raising #MC exceptions during VMX transition. Our
4165 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
4166 */
4167 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
4168 if (fIsMsrCountValid)
4169 { /* likely */ }
4170 else
4171 {
4172 pVmcs->u64ExitQual.u = VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR);
4173 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
4174 }
4175
4176 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
4177 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
4178 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
4179 if (RT_SUCCESS(rc))
4180 {
4181 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
4182 Assert(pMsr);
4183 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
4184 {
4185 if ( !pMsr->u32Reserved
4186 && pMsr->u32Msr != MSR_K8_FS_BASE
4187 && pMsr->u32Msr != MSR_K8_GS_BASE
4188 && pMsr->u32Msr != MSR_K6_EFER
4189 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
4190 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)
4191 {
4192 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
4193 if (rcStrict == VINF_SUCCESS)
4194 continue;
4195
4196 /*
4197 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
4198 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
4199 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
4200 * further by our own, specific diagnostic code. Later, we can try implement handling of the
4201 * MSR in ring-0 if possible, or come up with a better, generic solution.
4202 */
4203 pVmcs->u64ExitQual.u = idxMsr;
4204 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
4205 ? kVmxVDiag_Vmentry_MsrLoadRing3
4206 : kVmxVDiag_Vmentry_MsrLoad;
4207 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4208 }
4209 else
4210 {
4211 pVmcs->u64ExitQual.u = idxMsr;
4212 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
4213 }
4214 }
4215 }
4216 else
4217 {
4218 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
4219 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
4220 }
4221
4222 NOREF(pszInstr);
4223 NOREF(pszFailure);
4224 return VINF_SUCCESS;
4225}
4226
4227
4228/**
4229 * Loads the guest-state non-register state as part of VM-entry.
4230 *
4231 * @returns VBox status code.
4232 * @param pVCpu The cross context virtual CPU structure.
4233 *
4234 * @remarks This must be called only after loading the nested-guest register state
4235 * (especially nested-guest RIP).
4236 */
4237IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
4238{
4239 /*
4240 * Load guest non-register state.
4241 * See Intel spec. 26.6 "Special Features of VM Entry"
4242 */
4243 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4244 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
4245 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
4246 {
4247 /** @todo NSTVMX: Pending debug exceptions. */
4248 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
4249
4250 if (pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
4251 {
4252 /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense.
4253 * We probably need a different force flag for virtual-NMI
4254 * pending/blocking. */
4255 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI));
4256 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
4257 }
4258 else if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
4259 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
4260 {
4261 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
4262 }
4263
4264 /* SMI blocking is irrelevant. We don't support SMIs yet. */
4265 }
4266
4267 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
4268 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
4269
4270 /* VPID is irrelevant. We don't support VPID yet. */
4271
4272 /* Clear address-range monitoring. */
4273 EMMonitorWaitClear(pVCpu);
4274}
4275
4276
4277/**
4278 * Saves the guest force-flags in prepartion of entering the nested-guest.
4279 *
4280 * @param pVCpu The cross context virtual CPU structure.
4281 */
4282IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
4283{
4284 /* Assert that we are not called multiple times during VM-entry. */
4285 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
4286
4287 /*
4288 * Preserve the required force-flags.
4289 *
4290 * We only preserve the force-flags that would affect the execution of the
4291 * nested-guest (or the guest).
4292 *
4293 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be preserved as VM-exit explicitly
4294 * clears interrupt-inhibition and on VM-entry the guest-interruptibility
4295 * state provides the inhibition if any.
4296 *
4297 * - VMCPU_FF_BLOCK_NMIS needs not be preserved as VM-entry does not discard
4298 * any NMI blocking. VM-exits caused directly by NMIs (intercepted by the
4299 * exception bitmap) do block subsequent NMIs.
4300 *
4301 * - MTF need not be preserved as it's used only in VMX non-root mode and
4302 * is supplied on VM-entry through the VM-execution controls.
4303 *
4304 * The remaining FFs (e.g. timers) can stay in place so that we will be able to
4305 * generate interrupts that should cause #VMEXITs for the nested-guest.
4306 */
4307 uint32_t const fDiscardMask = VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_MTF | VMCPU_FF_BLOCK_NMIS;
4308 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & fDiscardMask;
4309 VMCPU_FF_CLEAR(pVCpu, fDiscardMask);
4310}
4311
4312
4313/**
4314 * Restores the guest force-flags in prepartion of exiting the nested-guest.
4315 *
4316 * @param pVCpu The cross context virtual CPU structure.
4317 */
4318IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
4319{
4320 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
4321 {
4322 VMCPU_FF_SET(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
4323 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
4324 }
4325}
4326
4327
4328/**
4329 * Loads the guest-state as part of VM-entry.
4330 *
4331 * @returns VBox status code.
4332 * @param pVCpu The cross context virtual CPU structure.
4333 * @param pszInstr The VMX instruction name (for logging purposes).
4334 *
4335 * @remarks This must be done after all the necessary steps prior to loading of
4336 * guest-state (e.g. checking various VMCS state).
4337 */
4338IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
4339{
4340 /*
4341 * Load guest control, debug, segment, descriptor-table registers and some MSRs.
4342 */
4343 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
4344 iemVmxVmentryLoadGuestSegRegs(pVCpu);
4345
4346 /*
4347 * Load guest RIP, RSP and RFLAGS.
4348 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
4349 */
4350 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4351 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
4352 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
4353 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
4354
4355 /* Load guest non-register state. */
4356 iemVmxVmentryLoadGuestNonRegState(pVCpu);
4357
4358 NOREF(pszInstr);
4359 return VINF_SUCCESS;
4360}
4361
4362
4363/**
4364 * Performs event injection (if any) as part of VM-entry.
4365 *
4366 * @param pVCpu The cross context virtual CPU structure.
4367 * @param pszInstr The VMX instruction name (for logging purposes).
4368 */
4369IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
4370{
4371 /*
4372 * Inject events.
4373 * See Intel spec. 26.5 "Event Injection".
4374 */
4375 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4376 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
4377 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
4378 {
4379 /*
4380 * The event that is going to be made pending for injection is not subject to VMX intercepts,
4381 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
4382 * of the current event -are- subject to intercepts, hence this flag will be flipped during
4383 * the actually delivery of this event.
4384 */
4385 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
4386
4387 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
4388 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
4389 {
4390 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
4391 VMCPU_FF_SET(pVCpu, VMCPU_FF_MTF);
4392 return VINF_SUCCESS;
4393 }
4394
4395 int rc = HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
4396 pVCpu->cpum.GstCtx.cr2);
4397 AssertRCReturn(rc, rc);
4398 }
4399
4400 NOREF(pszInstr);
4401 return VINF_SUCCESS;
4402}
4403
4404
4405/**
4406 * Perform a VMX transition updated PGM, IEM and CPUM.
4407 *
4408 * @param pVCpu The cross context virtual CPU structure.
4409 */
4410IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
4411{
4412 /*
4413 * Inform PGM about paging mode changes.
4414 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
4415 * see comment in iemMemPageTranslateAndCheckAccess().
4416 */
4417 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
4418# ifdef IN_RING3
4419 Assert(rc != VINF_PGM_CHANGE_MODE);
4420# endif
4421 AssertRCReturn(rc, rc);
4422
4423 /* Inform CPUM (recompiler), can later be removed. */
4424 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
4425
4426 /*
4427 * Flush the TLB with new CR3. This is required in case the PGM mode change
4428 * above doesn't actually change anything.
4429 */
4430 if (rc == VINF_SUCCESS)
4431 {
4432 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
4433 AssertRCReturn(rc, rc);
4434 }
4435
4436 /* Re-initialize IEM cache/state after the drastic mode switch. */
4437 iemReInitExec(pVCpu);
4438 return rc;
4439}
4440
4441
4442/**
4443 * VMLAUNCH/VMRESUME instruction execution worker.
4444 *
4445 * @returns Strict VBox status code.
4446 * @param pVCpu The cross context virtual CPU structure.
4447 * @param cbInstr The instruction length.
4448 * @param uInstrId The instruction identity (either VMXINSTRID_VMLAUNCH or
4449 * VMXINSTRID_VMRESUME).
4450 * @param pExitInfo Pointer to the VM-exit instruction information struct.
4451 * Optional, can be NULL.
4452 *
4453 * @remarks Common VMX instruction checks are already expected to by the caller,
4454 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
4455 */
4456IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
4457{
4458 Assert( uInstrId == VMXINSTRID_VMLAUNCH
4459 || uInstrId == VMXINSTRID_VMRESUME);
4460
4461 const char *pszInstr = uInstrId == VMXINSTRID_VMLAUNCH ? "vmlaunch" : "vmresume";
4462 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
4463 {
4464 RT_NOREF(pExitInfo);
4465 /** @todo NSTVMX: intercept. */
4466 }
4467 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
4468
4469 /* CPL. */
4470 if (pVCpu->iem.s.uCpl > 0)
4471 {
4472 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
4473 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
4474 return iemRaiseGeneralProtectionFault0(pVCpu);
4475 }
4476
4477 /* Current VMCS valid. */
4478 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
4479 {
4480 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
4481 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
4482 iemVmxVmFailInvalid(pVCpu);
4483 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4484 return VINF_SUCCESS;
4485 }
4486
4487 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
4488 * use block-by-STI here which is not quite correct. */
4489 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4490 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
4491 {
4492 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
4493 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
4494 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
4495 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4496 return VINF_SUCCESS;
4497 }
4498
4499 if (uInstrId == VMXINSTRID_VMLAUNCH)
4500 {
4501 /* VMLAUNCH with non-clear VMCS. */
4502 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
4503 { /* likely */ }
4504 else
4505 {
4506 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
4507 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
4508 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
4509 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4510 return VINF_SUCCESS;
4511 }
4512 }
4513 else
4514 {
4515 /* VMRESUME with non-launched VMCS. */
4516 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
4517 { /* likely */ }
4518 else
4519 {
4520 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
4521 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
4522 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
4523 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4524 return VINF_SUCCESS;
4525 }
4526 }
4527
4528 /*
4529 * Load the current VMCS.
4530 */
4531 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
4532 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
4533 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
4534 if (RT_FAILURE(rc))
4535 {
4536 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
4537 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
4538 return rc;
4539 }
4540
4541 /* Check VM-execution control fields. */
4542 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
4543 if (RT_SUCCESS(rc))
4544 {
4545 /* Check VM-exit control fields. */
4546 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
4547 if (RT_SUCCESS(rc))
4548 {
4549 /* Check VM-entry control fields. */
4550 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
4551 if (RT_SUCCESS(rc))
4552 {
4553 /* Check host-state fields. */
4554 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
4555 if (RT_SUCCESS(rc))
4556 {
4557 /* Save the (outer) guest force-flags as VM-exits can occur from this point on. */
4558 iemVmxVmentrySaveForceFlags(pVCpu);
4559
4560 /* Check guest-state fields. */
4561 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
4562 if (RT_SUCCESS(rc))
4563 {
4564 /* Load guest-state fields. */
4565 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
4566 if (RT_SUCCESS(rc))
4567 {
4568 /* Load MSRs from the VM-entry auto-load MSR area. */
4569 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
4570 if (RT_SUCCESS(rc))
4571 {
4572 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
4573
4574 /* VMLAUNCH instruction must update the VMCS launch state. */
4575 if (uInstrId == VMXINSTRID_VMLAUNCH)
4576 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
4577
4578 /* Perform the VMX transition (PGM updates). */
4579 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
4580 if (rcStrict == VINF_SUCCESS)
4581 { /* likely */ }
4582 else if (RT_SUCCESS(rcStrict))
4583 {
4584 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
4585 VBOXSTRICTRC_VAL(rcStrict)));
4586 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
4587 }
4588 else
4589 {
4590 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
4591 return rcStrict;
4592 }
4593
4594 /* We've now entered nested-guest execution. */
4595 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
4596
4597 /* Event injection. */
4598 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
4599
4600 /** @todo NSTVMX: Setup VMX preemption timer */
4601 /** @todo NSTVMX: TPR thresholding. */
4602
4603 return VINF_SUCCESS;
4604 }
4605 /** @todo NSTVMX: VMExit with VMX_EXIT_ERR_MSR_LOAD and set
4606 * VMX_BF_EXIT_REASON_ENTRY_FAILED. */
4607 }
4608 }
4609 /** @todo NSTVMX: VMExit with VMX_EXIT_ERR_INVALID_GUEST_STATE and set
4610 * VMX_BF_EXIT_REASON_ENTRY_FAILED. */
4611 return VINF_SUCCESS;
4612 }
4613
4614 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
4615 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4616 return VINF_SUCCESS;
4617 }
4618 }
4619 }
4620
4621 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
4622 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4623 return VINF_SUCCESS;
4624}
4625
4626
4627/**
4628 * Saves the guest control registers, debug registers and some MSRs are part of
4629 * VM-exit.
4630 *
4631 * @param pVCpu The cross context virtual CPU structure.
4632 */
4633IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
4634{
4635 /*
4636 * Saves the guest control registers, debug registers and some MSRs.
4637 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
4638 */
4639 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4640
4641 /* Save control registers. */
4642 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
4643 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
4644 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
4645
4646 /* Save SYSENTER CS, ESP, EIP. */
4647 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
4648 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4649 {
4650 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
4651 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
4652 }
4653 else
4654 {
4655 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
4656 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
4657 }
4658
4659 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
4660 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
4661 {
4662 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
4663 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
4664 }
4665
4666 /* Save PAT MSR. */
4667 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
4668 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
4669
4670 /* Save EFER MSR. */
4671 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
4672 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
4673
4674 /* We don't support clearing IA32_BNDCFGS MSR yet. */
4675 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
4676
4677 /* Nothing to do for SMBASE register - We don't support SMM yet. */
4678}
4679
4680
4681/**
4682 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
4683 *
4684 * @param pVCpu The cross context virtual CPU structure.
4685 */
4686IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
4687{
4688 /*
4689 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
4690 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
4691 */
4692 /* CS, SS, ES, DS, FS, GS. */
4693 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4694 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
4695 {
4696 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
4697 if (!pSelReg->Attr.n.u1Unusable)
4698 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
4699 else
4700 {
4701 /*
4702 * For unusable segments the attributes are undefined except for CS and SS.
4703 * For the rest we don't bother preserving anything but the unusable bit.
4704 */
4705 switch (iSegReg)
4706 {
4707 case X86_SREG_CS:
4708 pVmcs->GuestCs = pSelReg->Sel;
4709 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
4710 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
4711 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
4712 | X86DESCATTR_UNUSABLE);
4713 break;
4714
4715 case X86_SREG_SS:
4716 pVmcs->GuestSs = pSelReg->Sel;
4717 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4718 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
4719 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
4720 break;
4721
4722 case X86_SREG_DS:
4723 pVmcs->GuestDs = pSelReg->Sel;
4724 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4725 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
4726 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
4727 break;
4728
4729 case X86_SREG_ES:
4730 pVmcs->GuestEs = pSelReg->Sel;
4731 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4732 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
4733 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
4734 break;
4735
4736 case X86_SREG_FS:
4737 pVmcs->GuestFs = pSelReg->Sel;
4738 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
4739 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
4740 break;
4741
4742 case X86_SREG_GS:
4743 pVmcs->GuestGs = pSelReg->Sel;
4744 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
4745 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
4746 break;
4747 }
4748 }
4749 }
4750
4751 /* Segment attribute bits 31:7 and 11:8 MBZ. */
4752 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
4753 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
4754 /* LDTR. */
4755 {
4756 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
4757 pVmcs->GuestLdtr = pSelReg->Sel;
4758 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
4759 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
4760 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
4761 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
4762 }
4763
4764 /* TR. */
4765 {
4766 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
4767 pVmcs->GuestTr = pSelReg->Sel;
4768 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
4769 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
4770 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
4771 }
4772
4773 /* GDTR. */
4774 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
4775 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
4776
4777 /* IDTR. */
4778 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
4779 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
4780}
4781
4782
4783/**
4784 * Saves guest non-register state as part of VM-exit.
4785 *
4786 * @param pVCpu The cross context virtual CPU structure.
4787 * @param uExitReason The VM-exit reason.
4788 */
4789IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
4790{
4791 /*
4792 * Save guest non-register state.
4793 * See Intel spec. 27.3.4 "Saving Non-Register State".
4794 */
4795 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4796
4797 /*
4798 * Activity-state: VM-exits occur before changing the activity state
4799 * of the processor and hence we shouldn't need to change it.
4800 */
4801
4802 /* Interruptibility-state. */
4803 pVmcs->u32GuestIntrState = 0;
4804 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
4805 { /** @todo NSTVMX: Virtual-NMI blocking. */ }
4806 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
4807 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
4808
4809 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4810 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
4811 {
4812 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
4813 * currently. */
4814 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4815 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4816 }
4817 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
4818
4819 /* Pending debug exceptions. */
4820 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
4821 && uExitReason != VMX_EXIT_SMI
4822 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
4823 && !HMVmxIsTrapLikeVmexit(uExitReason))
4824 {
4825 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
4826 * block-by-MovSS is in effect. */
4827 pVmcs->u64GuestPendingDbgXcpt.u = 0;
4828 }
4829
4830 /** @todo NSTVMX: Save VMX preemption timer value. */
4831
4832 /* PDPTEs. */
4833 /* We don't support EPT yet. */
4834 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
4835 pVmcs->u64GuestPdpte0.u = 0;
4836 pVmcs->u64GuestPdpte1.u = 0;
4837 pVmcs->u64GuestPdpte2.u = 0;
4838 pVmcs->u64GuestPdpte3.u = 0;
4839}
4840
4841
4842/**
4843 * Saves the guest-state as part of VM-exit.
4844 *
4845 * @returns VBox status code.
4846 * @param pVCpu The cross context virtual CPU structure.
4847 * @param uExitReason The VM-exit reason.
4848 */
4849IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
4850{
4851 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4852 Assert(pVmcs);
4853
4854 /*
4855 * Save guest control, debug, segment, descriptor-table registers and some MSRs.
4856 */
4857 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
4858 iemVmxVmexitSaveGuestSegRegs(pVCpu);
4859
4860 /*
4861 * Save guest RIP, RSP and RFLAGS.
4862 */
4863 /* We don't support enclave mode yet. */
4864 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
4865 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
4866 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
4867
4868 /* Save guest non-register state. */
4869 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
4870}
4871
4872
4873/**
4874 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
4875 *
4876 * @returns VBox status code.
4877 * @param pVCpu The cross context virtual CPU structure.
4878 * @param uExitReason The VM-exit reason (for diagnostic purposes).
4879 */
4880IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
4881{
4882 /*
4883 * Save guest MSRs.
4884 * See Intel spec. 27.4 "Saving MSRs".
4885 */
4886 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4887 const char *const pszFailure = "VMX-abort";
4888
4889 /*
4890 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
4891 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
4892 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
4893 */
4894 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
4895 if (!cMsrs)
4896 return VINF_SUCCESS;
4897
4898 /*
4899 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
4900 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
4901 * implementation causes a VMX-abort followed by a triple-fault.
4902 */
4903 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
4904 if (fIsMsrCountValid)
4905 { /* likely */ }
4906 else
4907 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
4908
4909 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
4910 Assert(pMsr);
4911 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
4912 {
4913 if ( !pMsr->u32Reserved
4914 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
4915 && pMsr->u32Msr != MSR_IA32_SMBASE)
4916 {
4917 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
4918 if (rcStrict == VINF_SUCCESS)
4919 continue;
4920
4921 /*
4922 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
4923 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
4924 * recording the MSR index in the auxiliary info. field and indicated further by our
4925 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
4926 * if possible, or come up with a better, generic solution.
4927 */
4928 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
4929 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
4930 ? kVmxVDiag_Vmexit_MsrStoreRing3
4931 : kVmxVDiag_Vmexit_MsrStore;
4932 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
4933 }
4934 else
4935 {
4936 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
4937 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
4938 }
4939 }
4940
4941 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
4942 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
4943 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
4944 if (RT_SUCCESS(rc))
4945 { /* likely */ }
4946 else
4947 {
4948 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
4949 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
4950 }
4951
4952 NOREF(uExitReason);
4953 NOREF(pszFailure);
4954 return VINF_SUCCESS;
4955}
4956
4957
4958/**
4959 * Performs a VMX abort (due to an fatal error during VM-exit).
4960 *
4961 * @returns VBox status code.
4962 * @param pVCpu The cross context virtual CPU structure.
4963 * @param enmAbort The VMX abort reason.
4964 */
4965IEM_STATIC int iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
4966{
4967 /*
4968 * Perform the VMX abort.
4969 * See Intel spec. 27.7 "VMX Aborts".
4970 */
4971 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
4972
4973 /* We don't support SMX yet. */
4974 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
4975 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
4976 {
4977 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
4978 uint32_t const offVmxAbort = RT_OFFSETOF(VMXVVMCS, u32VmxAbortId);
4979 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
4980 }
4981
4982 return VINF_EM_TRIPLE_FAULT;
4983}
4984
4985
4986/**
4987 * Loads host control registers, debug registers and MSRs as part of VM-exit.
4988 *
4989 * @param pVCpu The cross context virtual CPU structure.
4990 */
4991IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
4992{
4993 /*
4994 * Load host control registers, debug registers and MSRs.
4995 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
4996 */
4997 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4998 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
4999
5000 /* CR0. */
5001 {
5002 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
5003 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
5004 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
5005 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
5006 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
5007 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
5008 CPUMSetGuestCR0(pVCpu, uValidCr0);
5009 }
5010
5011 /* CR4. */
5012 {
5013 /* CR4 MB1 bits are not modified. */
5014 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
5015 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
5016 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
5017 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
5018 if (fHostInLongMode)
5019 uValidCr4 |= X86_CR4_PAE;
5020 else
5021 uValidCr4 &= ~X86_CR4_PCIDE;
5022 CPUMSetGuestCR4(pVCpu, uValidCr4);
5023 }
5024
5025 /* CR3 (host value validated while checking host-state during VM-entry). */
5026 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
5027
5028 /* DR7. */
5029 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
5030
5031 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
5032
5033 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
5034 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
5035 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
5036 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
5037
5038 /* FS, GS bases are loaded later while we load host segment registers. */
5039
5040 /* EFER MSR (host value validated while checking host-state during VM-entry). */
5041 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
5042 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
5043 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5044 {
5045 if (fHostInLongMode)
5046 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
5047 else
5048 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
5049 }
5050
5051 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5052
5053 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
5054 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
5055 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
5056
5057 /* We don't support IA32_BNDCFGS MSR yet. */
5058}
5059
5060
5061/**
5062 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
5063 *
5064 * @param pVCpu The cross context virtual CPU structure.
5065 */
5066IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
5067{
5068 /*
5069 * Load host segment registers, GDTR, IDTR, LDTR and TR.
5070 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
5071 *
5072 * Warning! Be careful to not touch fields that are reserved by VT-x,
5073 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
5074 */
5075 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5076 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5077
5078 /* CS, SS, ES, DS, FS, GS. */
5079 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
5080 {
5081 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
5082 bool const fUnusable = RT_BOOL(HostSel == 0);
5083
5084 /* Selector. */
5085 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
5086 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
5087 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
5088
5089 /* Limit. */
5090 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
5091
5092 /* Base and Attributes. */
5093 switch (iSegReg)
5094 {
5095 case X86_SREG_CS:
5096 {
5097 pVCpu->cpum.GstCtx.cs.u64Base = 0;
5098 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
5099 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
5100 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
5101 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
5102 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
5103 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
5104 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
5105 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
5106 Assert(!fUnusable);
5107 break;
5108 }
5109
5110 case X86_SREG_SS:
5111 case X86_SREG_ES:
5112 case X86_SREG_DS:
5113 {
5114 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
5115 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
5116 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
5117 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
5118 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
5119 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
5120 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
5121 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
5122 break;
5123 }
5124
5125 case X86_SREG_FS:
5126 {
5127 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
5128 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
5129 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
5130 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
5131 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
5132 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
5133 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
5134 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
5135 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
5136 break;
5137 }
5138
5139 case X86_SREG_GS:
5140 {
5141 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
5142 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
5143 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
5144 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
5145 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
5146 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
5147 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
5148 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
5149 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
5150 break;
5151 }
5152 }
5153 }
5154
5155 /* TR. */
5156 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
5157 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
5158 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
5159 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
5160 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5161 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
5162 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
5163 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
5164 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
5165 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
5166 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
5167 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
5168 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
5169
5170 /* LDTR. */
5171 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
5172 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
5173 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5174 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
5175 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5176 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
5177
5178 /* GDTR. */
5179 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
5180 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
5181 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
5182
5183 /* IDTR.*/
5184 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
5185 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
5186 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
5187}
5188
5189
5190/**
5191 * Checks host PDPTes as part of VM-exit.
5192 *
5193 * @param pVCpu The cross context virtual CPU structure.
5194 * @param uExitReason The VM-exit reason (for logging purposes).
5195 */
5196IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
5197{
5198 /*
5199 * Check host PDPTEs.
5200 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
5201 */
5202 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5203 const char *const pszFailure = "VMX-abort";
5204 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5205
5206 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5207 && !fHostInLongMode)
5208 {
5209 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
5210 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
5211 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
5212 if (RT_SUCCESS(rc))
5213 {
5214 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
5215 {
5216 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
5217 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
5218 { /* likely */ }
5219 else
5220 {
5221 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
5222 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
5223 }
5224 }
5225 }
5226 else
5227 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
5228 }
5229
5230 NOREF(pszFailure);
5231 NOREF(uExitReason);
5232 return VINF_SUCCESS;
5233}
5234
5235
5236/**
5237 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
5238 *
5239 * @returns VBox status code.
5240 * @param pVCpu The cross context virtual CPU structure.
5241 * @param pszInstr The VMX instruction name (for logging purposes).
5242 */
5243IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
5244{
5245 /*
5246 * Load host MSRs.
5247 * See Intel spec. 27.6 "Loading MSRs".
5248 */
5249 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5250 const char *const pszFailure = "VMX-abort";
5251
5252 /*
5253 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
5254 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
5255 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
5256 */
5257 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
5258 if (!cMsrs)
5259 return VINF_SUCCESS;
5260
5261 /*
5262 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
5263 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
5264 * implementation causes a VMX-abort followed by a triple-fault.
5265 */
5266 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
5267 if (fIsMsrCountValid)
5268 { /* likely */ }
5269 else
5270 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
5271
5272 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
5273 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
5274 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
5275 if (RT_SUCCESS(rc))
5276 {
5277 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
5278 Assert(pMsr);
5279 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
5280 {
5281 if ( !pMsr->u32Reserved
5282 && pMsr->u32Msr != MSR_K8_FS_BASE
5283 && pMsr->u32Msr != MSR_K8_GS_BASE
5284 && pMsr->u32Msr != MSR_K6_EFER
5285 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
5286 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)
5287 {
5288 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
5289 if (rcStrict == VINF_SUCCESS)
5290 continue;
5291
5292 /*
5293 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
5294 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
5295 * recording the MSR index in the auxiliary info. field and indicated further by our
5296 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
5297 * if possible, or come up with a better, generic solution.
5298 */
5299 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
5300 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
5301 ? kVmxVDiag_Vmexit_MsrLoadRing3
5302 : kVmxVDiag_Vmexit_MsrLoad;
5303 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
5304 }
5305 else
5306 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
5307 }
5308 }
5309 else
5310 {
5311 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
5312 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
5313 }
5314
5315 NOREF(uExitReason);
5316 NOREF(pszFailure);
5317 return VINF_SUCCESS;
5318}
5319
5320
5321/**
5322 * Loads the host state as part of VM-exit.
5323 *
5324 * @returns VBox status code.
5325 * @param pVCpu The cross context virtual CPU structure.
5326 * @param uExitReason The VM-exit reason (for logging purposes).
5327 */
5328IEM_STATIC int iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
5329{
5330 /*
5331 * Load host state.
5332 * See Intel spec. 27.5 "Loading Host State".
5333 */
5334 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5335 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5336
5337 /* We cannot return from a long-mode guest to a host that is not in long mode. */
5338 if ( CPUMIsGuestInLongMode(pVCpu)
5339 && !fHostInLongMode)
5340 {
5341 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
5342 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
5343 }
5344
5345 /*
5346 * Load host control, debug, segment, descriptor-table registers and some MSRs.
5347 */
5348 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
5349 iemVmxVmexitLoadHostSegRegs(pVCpu);
5350
5351 /*
5352 * Load host RIP, RSP and RFLAGS.
5353 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
5354 */
5355 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
5356 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
5357 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
5358
5359 /* Update non-register state. */
5360 iemVmxVmexitRestoreForceFlags(pVCpu);
5361
5362 /* Clear address range monitoring. */
5363 EMMonitorWaitClear(pVCpu);
5364
5365 /* Perform the VMX transition (PGM updates). */
5366 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
5367 if (rcStrict == VINF_SUCCESS)
5368 {
5369 /* Check host PDPTEs. */
5370 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
5371 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
5372 if (RT_FAILURE(rc))
5373 {
5374 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
5375 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
5376 }
5377 }
5378 else if (RT_SUCCESS(rcStrict))
5379 {
5380 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
5381 uExitReason));
5382 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5383 }
5384 else
5385 {
5386 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
5387 return rcStrict;
5388 }
5389
5390 Assert(rcStrict == VINF_SUCCESS);
5391
5392 /* Load MSRs from the VM-exit auto-load MSR area. */
5393 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
5394 if (RT_FAILURE(rc))
5395 {
5396 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
5397 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
5398 }
5399
5400 return VINF_SUCCESS;
5401}
5402
5403
5404/**
5405 * VMX VM-exit handler.
5406 *
5407 * @returns Strict VBox status code.
5408 * @param pVCpu The cross context virtual CPU structure.
5409 * @param uExitReason The VM-exit reason.
5410 * @param cbInstr The instruction length.
5411 */
5412IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint32_t cbInstr)
5413{
5414 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5415 Assert(pVmcs);
5416
5417 pVmcs->u32RoExitReason = uExitReason;
5418 pVmcs->u32RoExitInstrLen = cbInstr;
5419
5420 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
5421 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
5422 * during injection. */
5423
5424 /*
5425 * Save the guest state back into the VMCS.
5426 * We only need to save the state when the VM-entry was successful.
5427 */
5428 if ( uExitReason != VMX_EXIT_ERR_INVALID_GUEST_STATE
5429 && uExitReason != VMX_EXIT_ERR_MSR_LOAD
5430 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK)
5431 {
5432 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
5433 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
5434 if (RT_SUCCESS(rc))
5435 { /* likely */ }
5436 else
5437 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
5438 }
5439
5440 int rc = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
5441 if (RT_FAILURE(rc))
5442 return rc;
5443
5444 /** @todo NSTVMX: rest of VM-exit. */
5445
5446 /* We're no longer in nested-guest execution mode. */
5447 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
5448
5449 return VINF_SUCCESS;
5450}
5451
5452/**
5453 * Implements 'VMXON'.
5454 */
5455IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
5456{
5457 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
5458}
5459
5460
5461/**
5462 * Implements 'VMXOFF'.
5463 *
5464 * @remarks Common VMX instruction checks are already expected to by the caller,
5465 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5466 */
5467IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
5468{
5469# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
5470 RT_NOREF2(pVCpu, cbInstr);
5471 return VINF_EM_RAW_EMULATE_INSTR;
5472# else
5473 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
5474 {
5475 /** @todo NSTVMX: intercept. */
5476 }
5477
5478 /* CPL. */
5479 if (pVCpu->iem.s.uCpl > 0)
5480 {
5481 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5482 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
5483 return iemRaiseGeneralProtectionFault0(pVCpu);
5484 }
5485
5486 /* Dual monitor treatment of SMIs and SMM. */
5487 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
5488 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
5489 {
5490 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
5491 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5492 return VINF_SUCCESS;
5493 }
5494
5495 /*
5496 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
5497 */
5498 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
5499 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
5500
5501 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
5502 { /** @todo NSTVMX: Unblock SMI. */ }
5503
5504 EMMonitorWaitClear(pVCpu);
5505 /** @todo NSTVMX: Unblock and enable A20M. */
5506
5507 iemVmxVmSucceed(pVCpu);
5508 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5509# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
5510 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
5511# else
5512 return VINF_SUCCESS;
5513# endif
5514# endif
5515}
5516
5517
5518/**
5519 * Implements 'VMLAUNCH'.
5520 */
5521IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
5522{
5523 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
5524}
5525
5526
5527/**
5528 * Implements 'VMRESUME'.
5529 */
5530IEM_CIMPL_DEF_0(iemCImpl_vmresume)
5531{
5532 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
5533}
5534
5535
5536/**
5537 * Implements 'VMPTRLD'.
5538 */
5539IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
5540{
5541 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
5542}
5543
5544
5545/**
5546 * Implements 'VMPTRST'.
5547 */
5548IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
5549{
5550 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
5551}
5552
5553
5554/**
5555 * Implements 'VMCLEAR'.
5556 */
5557IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
5558{
5559 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
5560}
5561
5562
5563/**
5564 * Implements 'VMWRITE' register.
5565 */
5566IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
5567{
5568 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
5569 NULL /* pExitInfo */);
5570}
5571
5572
5573/**
5574 * Implements 'VMWRITE' memory.
5575 */
5576IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
5577{
5578 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
5579}
5580
5581
5582/**
5583 * Implements 'VMREAD' 64-bit register.
5584 */
5585IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
5586{
5587 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
5588}
5589
5590
5591/**
5592 * Implements 'VMREAD' 32-bit register.
5593 */
5594IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
5595{
5596 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
5597}
5598
5599
5600/**
5601 * Implements 'VMREAD' memory.
5602 */
5603IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
5604{
5605 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
5606}
5607
5608#endif
5609
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette