VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 97145

Last change on this file since 97145 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 79.1 KB
Line 
1/* $Id: HMVMXAll.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_HM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include "HMInternal.h"
35#include <VBox/vmm/hmvmxinline.h>
36#include <VBox/vmm/vmcc.h>
37#include <VBox/vmm/pdmapi.h>
38#include <iprt/errcore.h>
39
40#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
41# include <iprt/asm-amd64-x86.h> /* ASMCpuId_EAX */
42#endif
43
44
45/*********************************************************************************************************************************
46* Global Variables *
47*********************************************************************************************************************************/
48#define VMXV_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
49/** VMX virtual-instructions and VM-exit diagnostics. */
50static const char * const g_apszVmxVDiagDesc[] =
51{
52 /* Internal processing errors. */
53 VMXV_DIAG_DESC(kVmxVDiag_None , "None" ),
54 VMXV_DIAG_DESC(kVmxVDiag_Ipe_1 , "Ipe_1" ),
55 VMXV_DIAG_DESC(kVmxVDiag_Ipe_2 , "Ipe_2" ),
56 VMXV_DIAG_DESC(kVmxVDiag_Ipe_3 , "Ipe_3" ),
57 VMXV_DIAG_DESC(kVmxVDiag_Ipe_4 , "Ipe_4" ),
58 VMXV_DIAG_DESC(kVmxVDiag_Ipe_5 , "Ipe_5" ),
59 VMXV_DIAG_DESC(kVmxVDiag_Ipe_6 , "Ipe_6" ),
60 VMXV_DIAG_DESC(kVmxVDiag_Ipe_7 , "Ipe_7" ),
61 VMXV_DIAG_DESC(kVmxVDiag_Ipe_8 , "Ipe_8" ),
62 VMXV_DIAG_DESC(kVmxVDiag_Ipe_9 , "Ipe_9" ),
63 VMXV_DIAG_DESC(kVmxVDiag_Ipe_10 , "Ipe_10" ),
64 VMXV_DIAG_DESC(kVmxVDiag_Ipe_11 , "Ipe_11" ),
65 VMXV_DIAG_DESC(kVmxVDiag_Ipe_12 , "Ipe_12" ),
66 VMXV_DIAG_DESC(kVmxVDiag_Ipe_13 , "Ipe_13" ),
67 VMXV_DIAG_DESC(kVmxVDiag_Ipe_14 , "Ipe_14" ),
68 VMXV_DIAG_DESC(kVmxVDiag_Ipe_15 , "Ipe_15" ),
69 VMXV_DIAG_DESC(kVmxVDiag_Ipe_16 , "Ipe_16" ),
70 /* VMXON. */
71 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_A20M , "A20M" ),
72 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cpl , "Cpl" ),
73 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
74 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cr0Fixed1 , "Cr0Fixed1" ),
75 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
76 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cr4Fixed1 , "Cr4Fixed1" ),
77 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Intercept , "Intercept" ),
78 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_LongModeCS , "LongModeCS" ),
79 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
80 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
81 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrAlign , "PtrAlign" ),
82 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrMap , "PtrMap" ),
83 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrReadPhys , "PtrReadPhys" ),
84 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrWidth , "PtrWidth" ),
85 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ),
86 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
87 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_VmxAlreadyRoot , "VmxAlreadyRoot" ),
88 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Vmxe , "Vmxe" ),
89 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
90 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
91 /* VMXOFF. */
92 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_Cpl , "Cpl" ),
93 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_Intercept , "Intercept" ),
94 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_LongModeCS , "LongModeCS" ),
95 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ),
96 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_Vmxe , "Vmxe" ),
97 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_VmxRoot , "VmxRoot" ),
98 /* VMPTRLD. */
99 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_Cpl , "Cpl" ),
100 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_LongModeCS , "LongModeCS" ),
101 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
102 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrAlign , "PtrAlign" ),
103 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrMap , "PtrMap" ),
104 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
105 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
106 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrWidth , "PtrWidth" ),
107 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_RealOrV86Mode , "RealOrV86Mode" ),
108 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_RevPtrReadPhys , "RevPtrReadPhys" ),
109 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
110 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_VmcsRevId , "VmcsRevId" ),
111 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_VmxRoot , "VmxRoot" ),
112 /* VMPTRST. */
113 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_Cpl , "Cpl" ),
114 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_LongModeCS , "LongModeCS" ),
115 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_PtrMap , "PtrMap" ),
116 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_RealOrV86Mode , "RealOrV86Mode" ),
117 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_VmxRoot , "VmxRoot" ),
118 /* VMCLEAR. */
119 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_Cpl , "Cpl" ),
120 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_LongModeCS , "LongModeCS" ),
121 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ),
122 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrAlign , "PtrAlign" ),
123 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrMap , "PtrMap" ),
124 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ),
125 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrVmxon , "PtrVmxon" ),
126 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrWidth , "PtrWidth" ),
127 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_RealOrV86Mode , "RealOrV86Mode" ),
128 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_VmxRoot , "VmxRoot" ),
129 /* VMWRITE. */
130 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_Cpl , "Cpl" ),
131 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_FieldInvalid , "FieldInvalid" ),
132 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_FieldRo , "FieldRo" ),
133 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_LinkPtrInvalid , "LinkPtrInvalid" ),
134 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_LongModeCS , "LongModeCS" ),
135 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_PtrInvalid , "PtrInvalid" ),
136 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_PtrMap , "PtrMap" ),
137 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_RealOrV86Mode , "RealOrV86Mode" ),
138 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_VmxRoot , "VmxRoot" ),
139 /* VMREAD. */
140 VMXV_DIAG_DESC(kVmxVDiag_Vmread_Cpl , "Cpl" ),
141 VMXV_DIAG_DESC(kVmxVDiag_Vmread_FieldInvalid , "FieldInvalid" ),
142 VMXV_DIAG_DESC(kVmxVDiag_Vmread_LinkPtrInvalid , "LinkPtrInvalid" ),
143 VMXV_DIAG_DESC(kVmxVDiag_Vmread_LongModeCS , "LongModeCS" ),
144 VMXV_DIAG_DESC(kVmxVDiag_Vmread_PtrInvalid , "PtrInvalid" ),
145 VMXV_DIAG_DESC(kVmxVDiag_Vmread_PtrMap , "PtrMap" ),
146 VMXV_DIAG_DESC(kVmxVDiag_Vmread_RealOrV86Mode , "RealOrV86Mode" ),
147 VMXV_DIAG_DESC(kVmxVDiag_Vmread_VmxRoot , "VmxRoot" ),
148 /* INVVPID. */
149 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_Cpl , "Cpl" ),
150 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_DescRsvd , "DescRsvd" ),
151 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_LongModeCS , "LongModeCS" ),
152 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_RealOrV86Mode , "RealOrV86Mode" ),
153 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_TypeInvalid , "TypeInvalid" ),
154 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_Type0InvalidAddr , "Type0InvalidAddr" ),
155 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_Type0InvalidVpid , "Type0InvalidVpid" ),
156 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_Type1InvalidVpid , "Type1InvalidVpid" ),
157 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_Type3InvalidVpid , "Type3InvalidVpid" ),
158 VMXV_DIAG_DESC(kVmxVDiag_Invvpid_VmxRoot , "VmxRoot" ),
159 /* INVEPT. */
160 VMXV_DIAG_DESC(kVmxVDiag_Invept_Cpl , "Cpl" ),
161 VMXV_DIAG_DESC(kVmxVDiag_Invept_DescRsvd , "DescRsvd" ),
162 VMXV_DIAG_DESC(kVmxVDiag_Invept_EptpInvalid , "EptpInvalid" ),
163 VMXV_DIAG_DESC(kVmxVDiag_Invept_LongModeCS , "LongModeCS" ),
164 VMXV_DIAG_DESC(kVmxVDiag_Invept_RealOrV86Mode , "RealOrV86Mode" ),
165 VMXV_DIAG_DESC(kVmxVDiag_Invept_TypeInvalid , "TypeInvalid" ),
166 VMXV_DIAG_DESC(kVmxVDiag_Invept_VmxRoot , "VmxRoot" ),
167 /* VMLAUNCH/VMRESUME. */
168 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrApicAccess , "AddrApicAccess" ),
169 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic , "AddrApicAccessEqVirtApic" ),
170 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrApicAccessHandlerReg , "AddrApicAccessHandlerReg" ),
171 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrEntryMsrLoad , "AddrEntryMsrLoad" ),
172 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrExitMsrLoad , "AddrExitMsrLoad" ),
173 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrExitMsrStore , "AddrExitMsrStore" ),
174 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrIoBitmapA , "AddrIoBitmapA" ),
175 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrIoBitmapB , "AddrIoBitmapB" ),
176 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrMsrBitmap , "AddrMsrBitmap" ),
177 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVirtApicPage , "AddrVirtApicPage" ),
178 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVmcsLinkPtr , "AddrVmcsLinkPtr" ),
179 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVmreadBitmap , "AddrVmreadBitmap" ),
180 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVmwriteBitmap , "AddrVmwriteBitmap" ),
181 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ApicRegVirt , "ApicRegVirt" ),
182 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_BlocKMovSS , "BlockMovSS" ),
183 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Cpl , "Cpl" ),
184 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Cr3TargetCount , "Cr3TargetCount" ),
185 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryCtlsAllowed1 , "EntryCtlsAllowed1" ),
186 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryCtlsDisallowed0 , "EntryCtlsDisallowed0" ),
187 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryInstrLen , "EntryInstrLen" ),
188 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryInstrLenZero , "EntryInstrLenZero" ),
189 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryIntInfoErrCodePe , "EntryIntInfoErrCodePe" ),
190 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec , "EntryIntInfoErrCodeVec" ),
191 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd , "EntryIntInfoTypeVecRsvd" ),
192 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd , "EntryXcptErrCodeRsvd" ),
193 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EptpAccessDirty , "EptpAccessDirty" ),
194 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EptpPageWalkLength , "EptpPageWalkLength" ),
195 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EptpMemType , "EptpMemType" ),
196 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EptpRsvd , "EptpRsvd" ),
197 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ),
198 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ),
199 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateHlt , "GuestActStateHlt" ),
200 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateRsvd , "GuestActStateRsvd" ),
201 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateShutdown , "GuestActStateShutdown" ),
202 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateSsDpl , "GuestActStateSsDpl" ),
203 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateStiMovSs , "GuestActStateStiMovSs" ),
204 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr0Fixed0 , "GuestCr0Fixed0" ),
205 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr0Fixed1 , "GuestCr0Fixed1" ),
206 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr0PgPe , "GuestCr0PgPe" ),
207 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr3 , "GuestCr3" ),
208 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr4Fixed0 , "GuestCr4Fixed0" ),
209 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr4Fixed1 , "GuestCr4Fixed1" ),
210 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestDebugCtl , "GuestDebugCtl" ),
211 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestDr7 , "GuestDr7" ),
212 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestEferMsr , "GuestEferMsr" ),
213 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestEferMsrRsvd , "GuestEferMsrRsvd" ),
214 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestGdtrBase , "GuestGdtrBase" ),
215 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestGdtrLimit , "GuestGdtrLimit" ),
216 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIdtrBase , "GuestIdtrBase" ),
217 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIdtrLimit , "GuestIdtrLimit" ),
218 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateEnclave , "GuestIntStateEnclave" ),
219 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateExtInt , "GuestIntStateExtInt" ),
220 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateNmi , "GuestIntStateNmi" ),
221 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateRFlagsSti , "GuestIntStateRFlagsSti" ),
222 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateRsvd , "GuestIntStateRsvd" ),
223 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateSmi , "GuestIntStateSmi" ),
224 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateStiMovSs , "GuestIntStateStiMovSs" ),
225 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateVirtNmi , "GuestIntStateVirtNmi" ),
226 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPae , "GuestPae" ),
227 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPatMsr , "GuestPatMsr" ),
228 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPcide , "GuestPcide" ),
229 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte , "GuestPdpteRsvd" ),
230 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf , "GuestPndDbgXcptBsNoTf" ),
231 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf , "GuestPndDbgXcptBsTf" ),
232 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd , "GuestPndDbgXcptRsvd" ),
233 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptRtm , "GuestPndDbgXcptRtm" ),
234 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRip , "GuestRip" ),
235 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRipRsvd , "GuestRipRsvd" ),
236 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRFlagsIf , "GuestRFlagsIf" ),
237 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRFlagsRsvd , "GuestRFlagsRsvd" ),
238 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRFlagsVm , "GuestRFlagsVm" ),
239 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDefBig , "GuestSegAttrCsDefBig" ),
240 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs , "GuestSegAttrCsDplEqSs" ),
241 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs , "GuestSegAttrCsDplLtSs" ),
242 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDplZero , "GuestSegAttrCsDplZero" ),
243 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsType , "GuestSegAttrCsType" ),
244 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead , "GuestSegAttrCsTypeRead" ),
245 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs , "GuestSegAttrDescTypeCs" ),
246 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs , "GuestSegAttrDescTypeDs" ),
247 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs , "GuestSegAttrDescTypeEs" ),
248 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs , "GuestSegAttrDescTypeFs" ),
249 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs , "GuestSegAttrDescTypeGs" ),
250 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs , "GuestSegAttrDescTypeSs" ),
251 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplCs , "GuestSegAttrDplRplCs" ),
252 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplDs , "GuestSegAttrDplRplDs" ),
253 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplEs , "GuestSegAttrDplRplEs" ),
254 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplFs , "GuestSegAttrDplRplFs" ),
255 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplGs , "GuestSegAttrDplRplGs" ),
256 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplSs , "GuestSegAttrDplRplSs" ),
257 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranCs , "GuestSegAttrGranCs" ),
258 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranDs , "GuestSegAttrGranDs" ),
259 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranEs , "GuestSegAttrGranEs" ),
260 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranFs , "GuestSegAttrGranFs" ),
261 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranGs , "GuestSegAttrGranGs" ),
262 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranSs , "GuestSegAttrGranSs" ),
263 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType , "GuestSegAttrLdtrDescType" ),
264 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrGran , "GuestSegAttrLdtrGran" ),
265 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent , "GuestSegAttrLdtrPresent" ),
266 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd , "GuestSegAttrLdtrRsvd" ),
267 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrType , "GuestSegAttrLdtrType" ),
268 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentCs , "GuestSegAttrPresentCs" ),
269 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentDs , "GuestSegAttrPresentDs" ),
270 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentEs , "GuestSegAttrPresentEs" ),
271 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentFs , "GuestSegAttrPresentFs" ),
272 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentGs , "GuestSegAttrPresentGs" ),
273 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentSs , "GuestSegAttrPresentSs" ),
274 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdCs , "GuestSegAttrRsvdCs" ),
275 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdDs , "GuestSegAttrRsvdDs" ),
276 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdEs , "GuestSegAttrRsvdEs" ),
277 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdFs , "GuestSegAttrRsvdFs" ),
278 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdGs , "GuestSegAttrRsvdGs" ),
279 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdSs , "GuestSegAttrRsvdSs" ),
280 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl , "GuestSegAttrSsDplEqRpl" ),
281 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrSsDplZero , "GuestSegAttrSsDplZero " ),
282 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrSsType , "GuestSegAttrSsType" ),
283 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrDescType , "GuestSegAttrTrDescType" ),
284 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrGran , "GuestSegAttrTrGran" ),
285 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrPresent , "GuestSegAttrTrPresent" ),
286 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrRsvd , "GuestSegAttrTrRsvd" ),
287 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrType , "GuestSegAttrTrType" ),
288 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrUnusable , "GuestSegAttrTrUnusable" ),
289 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs , "GuestSegAttrTypeAccCs" ),
290 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs , "GuestSegAttrTypeAccDs" ),
291 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs , "GuestSegAttrTypeAccEs" ),
292 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs , "GuestSegAttrTypeAccFs" ),
293 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs , "GuestSegAttrTypeAccGs" ),
294 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs , "GuestSegAttrTypeAccSs" ),
295 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Cs , "GuestSegAttrV86Cs" ),
296 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Ds , "GuestSegAttrV86Ds" ),
297 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Es , "GuestSegAttrV86Es" ),
298 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Fs , "GuestSegAttrV86Fs" ),
299 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Gs , "GuestSegAttrV86Gs" ),
300 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Ss , "GuestSegAttrV86Ss" ),
301 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseCs , "GuestSegBaseCs" ),
302 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseDs , "GuestSegBaseDs" ),
303 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseEs , "GuestSegBaseEs" ),
304 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseFs , "GuestSegBaseFs" ),
305 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseGs , "GuestSegBaseGs" ),
306 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseLdtr , "GuestSegBaseLdtr" ),
307 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseSs , "GuestSegBaseSs" ),
308 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseTr , "GuestSegBaseTr" ),
309 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Cs , "GuestSegBaseV86Cs" ),
310 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Ds , "GuestSegBaseV86Ds" ),
311 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Es , "GuestSegBaseV86Es" ),
312 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Fs , "GuestSegBaseV86Fs" ),
313 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Gs , "GuestSegBaseV86Gs" ),
314 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Ss , "GuestSegBaseV86Ss" ),
315 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Cs , "GuestSegLimitV86Cs" ),
316 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Ds , "GuestSegLimitV86Ds" ),
317 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Es , "GuestSegLimitV86Es" ),
318 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Fs , "GuestSegLimitV86Fs" ),
319 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Gs , "GuestSegLimitV86Gs" ),
320 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Ss , "GuestSegLimitV86Ss" ),
321 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegSelCsSsRpl , "GuestSegSelCsSsRpl" ),
322 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegSelLdtr , "GuestSegSelLdtr" ),
323 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegSelTr , "GuestSegSelTr" ),
324 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSysenterEspEip , "GuestSysenterEspEip" ),
325 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs , "VmcsLinkPtrCurVmcs" ),
326 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys , "VmcsLinkPtrReadPhys" ),
327 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrRevId , "VmcsLinkPtrRevId" ),
328 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrShadow , "VmcsLinkPtrShadow" ),
329 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr0Fixed0 , "HostCr0Fixed0" ),
330 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr0Fixed1 , "HostCr0Fixed1" ),
331 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr3 , "HostCr3" ),
332 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Fixed0 , "HostCr4Fixed0" ),
333 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Fixed1 , "HostCr4Fixed1" ),
334 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Pae , "HostCr4Pae" ),
335 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Pcide , "HostCr4Pcide" ),
336 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCsTr , "HostCsTr" ),
337 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostEferMsr , "HostEferMsr" ),
338 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostEferMsrRsvd , "HostEferMsrRsvd" ),
339 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostGuestLongMode , "HostGuestLongMode" ),
340 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostGuestLongModeNoCpu , "HostGuestLongModeNoCpu" ),
341 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostLongMode , "HostLongMode" ),
342 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostPatMsr , "HostPatMsr" ),
343 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostRip , "HostRip" ),
344 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostRipRsvd , "HostRipRsvd" ),
345 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSel , "HostSel" ),
346 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSegBase , "HostSegBase" ),
347 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSs , "HostSs" ),
348 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSysenterEspEip , "HostSysenterEspEip" ),
349 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_IoBitmapAPtrReadPhys , "IoBitmapAPtrReadPhys" ),
350 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_IoBitmapBPtrReadPhys , "IoBitmapBPtrReadPhys" ),
351 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_LongModeCS , "LongModeCS" ),
352 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys , "MsrBitmapPtrReadPhys" ),
353 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoad , "MsrLoad" ),
354 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadCount , "MsrLoadCount" ),
355 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadPtrReadPhys , "MsrLoadPtrReadPhys" ),
356 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadRing3 , "MsrLoadRing3" ),
357 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadRsvd , "MsrLoadRsvd" ),
358 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ),
359 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PinCtlsAllowed1 , "PinCtlsAllowed1" ),
360 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PinCtlsDisallowed0 , "PinCtlsDisallowed0" ),
361 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtlsAllowed1 , "ProcCtlsAllowed1" ),
362 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtlsDisallowed0 , "ProcCtlsDisallowed0" ),
363 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtls2Allowed1 , "ProcCtls2Allowed1" ),
364 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtls2Disallowed0 , "ProcCtls2Disallowed0" ),
365 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PtrInvalid , "PtrInvalid" ),
366 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PtrShadowVmcs , "PtrShadowVmcs" ),
367 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_RealOrV86Mode , "RealOrV86Mode" ),
368 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_SavePreemptTimer , "SavePreemptTimer" ),
369 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_TprThresholdRsvd , "TprThresholdRsvd" ),
370 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_TprThresholdVTpr , "TprThresholdVTpr" ),
371 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys , "VirtApicPageReadPhys" ),
372 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtIntDelivery , "VirtIntDelivery" ),
373 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtNmi , "VirtNmi" ),
374 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtX2ApicTprShadow , "VirtX2ApicTprShadow" ),
375 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtX2ApicVirtApic , "VirtX2ApicVirtApic" ),
376 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsClear , "VmcsClear" ),
377 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLaunch , "VmcsLaunch" ),
378 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys , "VmreadBitmapPtrReadPhys" ),
379 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys , "VmwriteBitmapPtrReadPhys" ),
380 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmxRoot , "VmxRoot" ),
381 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid , "Vpid" ),
382 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte , "HostPdpte" ),
383 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoad , "MsrLoad" ),
384 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadCount , "MsrLoadCount" ),
385 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadPtrReadPhys , "MsrLoadPtrReadPhys" ),
386 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadRing3 , "MsrLoadRing3" ),
387 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadRsvd , "MsrLoadRsvd" ),
388 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStore , "MsrStore" ),
389 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreCount , "MsrStoreCount" ),
390 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStorePtrReadPhys , "MsrStorePtrReadPhys" ),
391 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStorePtrWritePhys , "MsrStorePtrWritePhys" ),
392 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRing3 , "MsrStoreRing3" ),
393 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrStoreRsvd , "MsrStoreRsvd" ),
394 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_VirtApicPagePtrWritePhys , "VirtApicPagePtrWritePhys" )
395 /* kVmxVDiag_End */
396};
397AssertCompile(RT_ELEMENTS(g_apszVmxVDiagDesc) == kVmxVDiag_End);
398#undef VMXV_DIAG_DESC
399
400
401/**
402 * Gets the descriptive name of a VMX instruction/VM-exit diagnostic code.
403 *
404 * @returns The descriptive string.
405 * @param enmDiag The VMX diagnostic.
406 */
407VMM_INT_DECL(const char *) HMGetVmxDiagDesc(VMXVDIAG enmDiag)
408{
409 if (RT_LIKELY((unsigned)enmDiag < RT_ELEMENTS(g_apszVmxVDiagDesc)))
410 return g_apszVmxVDiagDesc[enmDiag];
411 return "Unknown/invalid";
412}
413
414
415/**
416 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
417 * VMX when unrestricted execution isn't available.
418 *
419 * @returns true if selector is suitable for VMX, otherwise
420 * false.
421 * @param pSel Pointer to the selector to check (CS).
422 * @param uStackDpl The CPL, aka the DPL of the stack segment.
423 */
424static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
425{
426 /*
427 * Segment must be an accessed code segment, it must be present and it must
428 * be usable.
429 * Note! These are all standard requirements and if CS holds anything else
430 * we've got buggy code somewhere!
431 */
432 AssertCompile(X86DESCATTR_TYPE == 0xf);
433 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
434 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
435 ("%#x\n", pSel->Attr.u),
436 false);
437
438 /*
439 * For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL must equal
440 * SS.DPL for non-confroming segments.
441 * Note! This is also a hard requirement like above.
442 */
443 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
444 ? pSel->Attr.n.u2Dpl <= uStackDpl
445 : pSel->Attr.n.u2Dpl == uStackDpl,
446 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
447 false);
448
449 /*
450 * The following two requirements are VT-x specific:
451 * - G bit must be set if any high limit bits are set.
452 * - G bit must be clear if any low limit bits are clear.
453 */
454 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
455 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
456 return true;
457 return false;
458}
459
460
461/**
462 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
463 * hardware-assisted VMX when unrestricted execution isn't available.
464 *
465 * @returns true if selector is suitable for VMX, otherwise
466 * false.
467 * @param pSel Pointer to the selector to check
468 * (DS/ES/FS/GS).
469 */
470static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
471{
472 /*
473 * Unusable segments are OK. These days they should be marked as such, as
474 * but as an alternative we for old saved states and AMD<->VT-x migration
475 * we also treat segments with all the attributes cleared as unusable.
476 */
477 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
478 return true;
479
480 /** @todo tighten these checks. Will require CPUM load adjusting. */
481
482 /* Segment must be accessed. */
483 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
484 {
485 /* Code segments must also be readable. */
486 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
487 || (pSel->Attr.u & X86_SEL_TYPE_READ))
488 {
489 /* The S bit must be set. */
490 if (pSel->Attr.n.u1DescType)
491 {
492 /* Except for conforming segments, DPL >= RPL. */
493 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
494 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
495 {
496 /* Segment must be present. */
497 if (pSel->Attr.n.u1Present)
498 {
499 /*
500 * The following two requirements are VT-x specific:
501 * - G bit must be set if any high limit bits are set.
502 * - G bit must be clear if any low limit bits are clear.
503 */
504 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
505 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
506 return true;
507 }
508 }
509 }
510 }
511 }
512
513 return false;
514}
515
516
517/**
518 * Checks if the stack selector (SS) is suitable for execution using
519 * hardware-assisted VMX when unrestricted execution isn't available.
520 *
521 * @returns true if selector is suitable for VMX, otherwise
522 * false.
523 * @param pSel Pointer to the selector to check (SS).
524 */
525static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
526{
527 /*
528 * Unusable segments are OK. These days they should be marked as such, as
529 * but as an alternative we for old saved states and AMD<->VT-x migration
530 * we also treat segments with all the attributes cleared as unusable.
531 */
532 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
533 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
534 return true;
535
536 /*
537 * Segment must be an accessed writable segment, it must be present.
538 * Note! These are all standard requirements and if SS holds anything else
539 * we've got buggy code somewhere!
540 */
541 AssertCompile(X86DESCATTR_TYPE == 0xf);
542 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
543 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
544 ("%#x\n", pSel->Attr.u), false);
545
546 /*
547 * DPL must equal RPL. But in real mode or soon after enabling protected
548 * mode, it might not be.
549 */
550 if (pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL))
551 {
552 /*
553 * The following two requirements are VT-x specific:
554 * - G bit must be set if any high limit bits are set.
555 * - G bit must be clear if any low limit bits are clear.
556 */
557 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
558 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
559 return true;
560 }
561 return false;
562}
563
564
565#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
566/**
567 * Checks if the CPU is subject to the "VMX-Preemption Timer Does Not Count Down at
568 * the Rate Specified" erratum.
569 *
570 * Errata names and related steppings:
571 * - BA86 - D0.
572 * - AAX65 - C2.
573 * - AAU65 - C2, K0.
574 * - AAO95 - B1.
575 * - AAT59 - C2.
576 * - AAK139 - D0.
577 * - AAM126 - C0, C1, D0.
578 * - AAN92 - B1.
579 * - AAJ124 - C0, D0.
580 * - AAP86 - B1.
581 *
582 * Steppings: B1, C0, C1, C2, D0, K0.
583 *
584 * @returns @c true if subject to it, @c false if not.
585 */
586VMM_INT_DECL(bool) HMIsSubjectToVmxPreemptTimerErratum(void)
587{
588 uint32_t u = ASMCpuId_EAX(1);
589 u &= ~(RT_BIT_32(14) | RT_BIT_32(15) | RT_BIT_32(28) | RT_BIT_32(29) | RT_BIT_32(30) | RT_BIT_32(31));
590 if ( u == 0x000206E6 /* 323344.pdf - BA86 - D0 - Xeon Processor 7500 Series */
591 || u == 0x00020652 /* 323056.pdf - AAX65 - C2 - Xeon Processor L3406 */
592 /* 322814.pdf - AAT59 - C2 - CoreTM i7-600, i5-500, i5-400 and i3-300 Mobile Processor Series */
593 /* 322911.pdf - AAU65 - C2 - CoreTM i5-600, i3-500 Desktop Processor Series and Intel Pentium Processor G6950 */
594 || u == 0x00020655 /* 322911.pdf - AAU65 - K0 - CoreTM i5-600, i3-500 Desktop Processor Series and Intel Pentium Processor G6950 */
595 || u == 0x000106E5 /* 322373.pdf - AAO95 - B1 - Xeon Processor 3400 Series */
596 /* 322166.pdf - AAN92 - B1 - CoreTM i7-800 and i5-700 Desktop Processor Series */
597 /* 320767.pdf - AAP86 - B1 - Core i7-900 Mobile Processor Extreme Edition Series, Intel Core i7-800 and i7-700 Mobile Processor Series */
598 || u == 0x000106A0 /* 321333.pdf - AAM126 - C0 - Xeon Processor 3500 Series Specification */
599 || u == 0x000106A1 /* 321333.pdf - AAM126 - C1 - Xeon Processor 3500 Series Specification */
600 || u == 0x000106A4 /* 320836.pdf - AAJ124 - C0 - Core i7-900 Desktop Processor Extreme Edition Series and Intel Core i7-900 Desktop Processor Series */
601 || u == 0x000106A5 /* 321333.pdf - AAM126 - D0 - Xeon Processor 3500 Series Specification */
602 /* 321324.pdf - AAK139 - D0 - Xeon Processor 5500 Series Specification */
603 /* 320836.pdf - AAJ124 - D0 - Core i7-900 Desktop Processor Extreme Edition Series and Intel Core i7-900 Desktop Processor Series */
604 || u == 0x000306A8 /* ?????????? - ?????? - ?? - Xeon E3-1220 v2 */
605 )
606 return true;
607 return false;
608}
609#endif
610
611
612/**
613 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
614 *
615 * @returns @c true if it is suitable, @c false otherwise.
616 * @param pVM The cross context VM structure.
617 * @param pVCpu The cross context virtual CPU structure.
618 * @param pCtx Pointer to the guest CPU context.
619 *
620 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
621 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
622 * Secondly, if additional checks are added that require more of the CPU
623 * state, make sure REM (which supplies a partial state) is updated.
624 */
625VMM_INT_DECL(bool) HMCanExecuteVmxGuest(PVMCC pVM, PVMCPUCC pVCpu, PCCPUMCTX pCtx)
626{
627 Assert(HMIsEnabled(pVM));
628 bool const fUnrestrictedGuest = CTX_EXPR(pVM->hm.s.vmx.fUnrestrictedGuestCfg, pVM->hmr0.s.vmx.fUnrestrictedGuest, RT_NOTHING);
629 Assert( ( fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
630 || (!fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
631
632 pVCpu->hm.s.fActive = false;
633
634 bool const fSupportsRealMode = fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
635 if (!fUnrestrictedGuest)
636 {
637 /*
638 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
639 * guest execution feature is missing (VT-x only).
640 */
641 if (fSupportsRealMode)
642 {
643 if (CPUMIsGuestInRealModeEx(pCtx))
644 {
645 /*
646 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
647 * bases, limits, and attributes, i.e. limit must be 64K, base must be selector * 16,
648 * and attributes must be 0x9b for code and 0x93 for code segments.
649 * If this is not true, we cannot execute real mode as V86 and have to fall
650 * back to emulation.
651 */
652 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
653 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
654 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
655 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
656 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
657 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
658 {
659 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
660 return false;
661 }
662 if ( (pCtx->cs.u32Limit != 0xffff)
663 || (pCtx->ds.u32Limit != 0xffff)
664 || (pCtx->es.u32Limit != 0xffff)
665 || (pCtx->ss.u32Limit != 0xffff)
666 || (pCtx->fs.u32Limit != 0xffff)
667 || (pCtx->gs.u32Limit != 0xffff))
668 {
669 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
670 return false;
671 }
672 if ( (pCtx->cs.Attr.u != 0x9b)
673 || (pCtx->ds.Attr.u != 0x93)
674 || (pCtx->es.Attr.u != 0x93)
675 || (pCtx->ss.Attr.u != 0x93)
676 || (pCtx->fs.Attr.u != 0x93)
677 || (pCtx->gs.Attr.u != 0x93))
678 {
679 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelAttr);
680 return false;
681 }
682 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
683 }
684 else
685 {
686 /*
687 * Verify the requirements for executing code in protected mode. VT-x can't
688 * handle the CPU state right after a switch from real to protected mode
689 * (all sorts of RPL & DPL assumptions).
690 */
691 PCVMXVMCSINFOSHARED pVmcsInfo = hmGetVmxActiveVmcsInfoShared(pVCpu);
692 if (pVmcsInfo->fWasInRealMode)
693 {
694 if (!CPUMIsGuestInV86ModeEx(pCtx))
695 {
696 /* The guest switched to protected mode, check if the state is suitable for VT-x. */
697 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
698 {
699 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
700 return false;
701 }
702 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
703 || !hmVmxIsDataSelectorOk(&pCtx->ds)
704 || !hmVmxIsDataSelectorOk(&pCtx->es)
705 || !hmVmxIsDataSelectorOk(&pCtx->fs)
706 || !hmVmxIsDataSelectorOk(&pCtx->gs)
707 || !hmVmxIsStackSelectorOk(&pCtx->ss))
708 {
709 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
710 return false;
711 }
712 }
713 else
714 {
715 /* The guest switched to V86 mode, check if the state is suitable for VT-x. */
716 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
717 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
718 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
719 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
720 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
721 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
722 {
723 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadV86SelBase);
724 return false;
725 }
726 if ( pCtx->cs.u32Limit != 0xffff
727 || pCtx->ds.u32Limit != 0xffff
728 || pCtx->es.u32Limit != 0xffff
729 || pCtx->ss.u32Limit != 0xffff
730 || pCtx->fs.u32Limit != 0xffff
731 || pCtx->gs.u32Limit != 0xffff)
732 {
733 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadV86SelLimit);
734 return false;
735 }
736 if ( pCtx->cs.Attr.u != 0xf3
737 || pCtx->ds.Attr.u != 0xf3
738 || pCtx->es.Attr.u != 0xf3
739 || pCtx->ss.Attr.u != 0xf3
740 || pCtx->fs.Attr.u != 0xf3
741 || pCtx->gs.Attr.u != 0xf3)
742 {
743 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadV86SelAttr);
744 return false;
745 }
746 }
747 }
748 }
749 }
750 else
751 {
752 if (!CPUMIsGuestInLongModeEx(pCtx))
753 {
754 if (/* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap: */
755 !CTX_EXPR(pVM->hm.s.fNestedPagingCfg, pVM->hmr0.s.fNestedPaging, RT_NOTHING)
756 /* Requires a fake TSS for real mode - stored in the VMM device heap: */
757 || CPUMIsGuestInRealModeEx(pCtx))
758 return false;
759
760 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
761 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
762 return false;
763
764 /*
765 * The guest is about to complete the switch to protected mode. Wait a bit longer.
766 * Windows XP; switch to protected mode; all selectors are marked not present
767 * in the hidden registers (possible recompiler bug; see load_seg_vm).
768 */
769 /** @todo Is this supposed recompiler bug still relevant with IEM? */
770 if (pCtx->cs.Attr.n.u1Present == 0)
771 return false;
772 if (pCtx->ss.Attr.n.u1Present == 0)
773 return false;
774
775 /*
776 * Windows XP: possible same as above, but new recompiler requires new
777 * heuristics? VT-x doesn't seem to like something about the guest state and
778 * this stuff avoids it.
779 */
780 /** @todo This check is actually wrong, it doesn't take the direction of the
781 * stack segment into account. But, it does the job for now. */
782 if (pCtx->rsp >= pCtx->ss.u32Limit)
783 return false;
784 }
785 }
786 }
787
788 if (pVM->hm.s.vmx.fEnabled)
789 {
790 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
791 uint32_t uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr0Fixed0, g_HmMsrs.u.vmx.u64Cr0Fixed0, RT_NOTHING);
792
793 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
794 uCr0Mask &= ~X86_CR0_NE;
795
796 if (fSupportsRealMode)
797 {
798 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
799 uCr0Mask &= ~(X86_CR0_PG | X86_CR0_PE);
800 }
801 else
802 {
803 /* We support protected mode without paging using identity mapping. */
804 uCr0Mask &= ~X86_CR0_PG;
805 }
806 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
807 return false;
808
809 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
810 uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr0Fixed1, g_HmMsrs.u.vmx.u64Cr0Fixed1, RT_NOTHING);
811 if ((pCtx->cr0 & uCr0Mask) != 0)
812 return false;
813
814 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
815 uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr4Fixed0, g_HmMsrs.u.vmx.u64Cr4Fixed0, RT_NOTHING);
816 uCr0Mask &= ~X86_CR4_VMXE;
817 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
818 return false;
819
820 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
821 uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr4Fixed1, g_HmMsrs.u.vmx.u64Cr4Fixed1, RT_NOTHING);
822 if ((pCtx->cr4 & uCr0Mask) != 0)
823 return false;
824
825 pVCpu->hm.s.fActive = true;
826 return true;
827 }
828
829 return false;
830}
831
832
833/**
834 * Dumps the virtual VMCS state to the release log.
835 *
836 * This is a purely a convenience function to output to the release log because
837 * cpumR3InfoVmxVmcs dumps only to the debug console and isn't always easy to use in
838 * case of a crash.
839 *
840 * @param pVCpu The cross context virtual CPU structure.
841 */
842VMM_INT_DECL(void) HMDumpHwvirtVmxState(PVMCPU pVCpu)
843{
844 /* The string width of -4 used in the macros below to cover 'LDTR', 'GDTR', 'IDTR. */
845#define HMVMX_DUMP_HOST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
846 do { \
847 LogRel((" %s%-4s = {base=%016RX64}\n", \
848 (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Host##a_Seg##Base.u)); \
849 } while (0)
850#define HMVMX_DUMP_HOST_FS_GS_TR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
851 do { \
852 LogRel((" %s%-4s = {%04x base=%016RX64}\n", \
853 (a_pszPrefix), (a_SegName), (a_pVmcs)->Host##a_Seg, (a_pVmcs)->u64Host##a_Seg##Base.u)); \
854 } while (0)
855#define HMVMX_DUMP_GUEST_SEGREG(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
856 do { \
857 LogRel((" %s%-4s = {%04x base=%016RX64 limit=%08x flags=%04x}\n", \
858 (a_pszPrefix), (a_SegName), (a_pVmcs)->Guest##a_Seg, (a_pVmcs)->u64Guest##a_Seg##Base.u, \
859 (a_pVmcs)->u32Guest##a_Seg##Limit, (a_pVmcs)->u32Guest##a_Seg##Attr)); \
860 } while (0)
861#define HMVMX_DUMP_GUEST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \
862 do { \
863 LogRel((" %s%-4s = {base=%016RX64 limit=%08x}\n", \
864 (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Guest##a_Seg##Base.u, (a_pVmcs)->u32Guest##a_Seg##Limit)); \
865 } while (0)
866
867 PCCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
868 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
869 if (!pVmcs)
870 {
871 LogRel(("Virtual VMCS not allocated\n"));
872 return;
873 }
874 LogRel(("GCPhysVmxon = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmxon));
875 LogRel(("GCPhysVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmcs));
876 LogRel(("GCPhysShadowVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysShadowVmcs));
877 LogRel(("enmDiag = %u (%s)\n", pCtx->hwvirt.vmx.enmDiag, HMGetVmxDiagDesc(pCtx->hwvirt.vmx.enmDiag)));
878 LogRel(("uDiagAux = %#RX64\n", pCtx->hwvirt.vmx.uDiagAux));
879 LogRel(("enmAbort = %u (%s)\n", pCtx->hwvirt.vmx.enmAbort, VMXGetAbortDesc(pCtx->hwvirt.vmx.enmAbort)));
880 LogRel(("uAbortAux = %u (%#x)\n", pCtx->hwvirt.vmx.uAbortAux, pCtx->hwvirt.vmx.uAbortAux));
881 LogRel(("fInVmxRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxRootMode));
882 LogRel(("fInVmxNonRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxNonRootMode));
883 LogRel(("fInterceptEvents = %RTbool\n", pCtx->hwvirt.vmx.fInterceptEvents));
884 LogRel(("fNmiUnblockingIret = %RTbool\n", pCtx->hwvirt.vmx.fNmiUnblockingIret));
885 LogRel(("uFirstPauseLoopTick = %RX64\n", pCtx->hwvirt.vmx.uFirstPauseLoopTick));
886 LogRel(("uPrevPauseTick = %RX64\n", pCtx->hwvirt.vmx.uPrevPauseTick));
887 LogRel(("uEntryTick = %RX64\n", pCtx->hwvirt.vmx.uEntryTick));
888 LogRel(("offVirtApicWrite = %#RX16\n", pCtx->hwvirt.vmx.offVirtApicWrite));
889 LogRel(("fVirtNmiBlocking = %RTbool\n", pCtx->hwvirt.vmx.fVirtNmiBlocking));
890 LogRel(("VMCS cache:\n"));
891
892 const char *pszPrefix = " ";
893 /* Header. */
894 {
895 LogRel(("%sHeader:\n", pszPrefix));
896 LogRel((" %sVMCS revision id = %#RX32\n", pszPrefix, pVmcs->u32VmcsRevId));
897 LogRel((" %sVMX-abort id = %#RX32 (%s)\n", pszPrefix, pVmcs->enmVmxAbort, VMXGetAbortDesc(pVmcs->enmVmxAbort)));
898 LogRel((" %sVMCS state = %#x (%s)\n", pszPrefix, pVmcs->fVmcsState, VMXGetVmcsStateDesc(pVmcs->fVmcsState)));
899 }
900
901 /* Control fields. */
902 {
903 /* 16-bit. */
904 LogRel(("%sControl:\n", pszPrefix));
905 LogRel((" %sVPID = %#RX16\n", pszPrefix, pVmcs->u16Vpid));
906 LogRel((" %sPosted intr notify vector = %#RX16\n", pszPrefix, pVmcs->u16PostIntNotifyVector));
907 LogRel((" %sEPTP index = %#RX16\n", pszPrefix, pVmcs->u16EptpIndex));
908
909 /* 32-bit. */
910 LogRel((" %sPin ctls = %#RX32\n", pszPrefix, pVmcs->u32PinCtls));
911 LogRel((" %sProcessor ctls = %#RX32\n", pszPrefix, pVmcs->u32ProcCtls));
912 LogRel((" %sSecondary processor ctls = %#RX32\n", pszPrefix, pVmcs->u32ProcCtls2));
913 LogRel((" %sVM-exit ctls = %#RX32\n", pszPrefix, pVmcs->u32ExitCtls));
914 LogRel((" %sVM-entry ctls = %#RX32\n", pszPrefix, pVmcs->u32EntryCtls));
915 LogRel((" %sException bitmap = %#RX32\n", pszPrefix, pVmcs->u32XcptBitmap));
916 LogRel((" %sPage-fault mask = %#RX32\n", pszPrefix, pVmcs->u32XcptPFMask));
917 LogRel((" %sPage-fault match = %#RX32\n", pszPrefix, pVmcs->u32XcptPFMatch));
918 LogRel((" %sCR3-target count = %RU32\n", pszPrefix, pVmcs->u32Cr3TargetCount));
919 LogRel((" %sVM-exit MSR store count = %RU32\n", pszPrefix, pVmcs->u32ExitMsrStoreCount));
920 LogRel((" %sVM-exit MSR load count = %RU32\n", pszPrefix, pVmcs->u32ExitMsrLoadCount));
921 LogRel((" %sVM-entry MSR load count = %RU32\n", pszPrefix, pVmcs->u32EntryMsrLoadCount));
922 LogRel((" %sVM-entry interruption info = %#RX32\n", pszPrefix, pVmcs->u32EntryIntInfo));
923 {
924 uint32_t const fInfo = pVmcs->u32EntryIntInfo;
925 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(fInfo);
926 LogRel((" %sValid = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_VALID(fInfo)));
927 LogRel((" %sType = %#x (%s)\n", pszPrefix, uType, VMXGetEntryIntInfoTypeDesc(uType)));
928 LogRel((" %sVector = %#x\n", pszPrefix, VMX_ENTRY_INT_INFO_VECTOR(fInfo)));
929 LogRel((" %sNMI-unblocking-IRET = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo)));
930 LogRel((" %sError-code valid = %RTbool\n", pszPrefix, VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(fInfo)));
931 }
932 LogRel((" %sVM-entry xcpt error-code = %#RX32\n", pszPrefix, pVmcs->u32EntryXcptErrCode));
933 LogRel((" %sVM-entry instr length = %u byte(s)\n", pszPrefix, pVmcs->u32EntryInstrLen));
934 LogRel((" %sTPR threshold = %#RX32\n", pszPrefix, pVmcs->u32TprThreshold));
935 LogRel((" %sPLE gap = %#RX32\n", pszPrefix, pVmcs->u32PleGap));
936 LogRel((" %sPLE window = %#RX32\n", pszPrefix, pVmcs->u32PleWindow));
937
938 /* 64-bit. */
939 LogRel((" %sIO-bitmap A addr = %#RX64\n", pszPrefix, pVmcs->u64AddrIoBitmapA.u));
940 LogRel((" %sIO-bitmap B addr = %#RX64\n", pszPrefix, pVmcs->u64AddrIoBitmapB.u));
941 LogRel((" %sMSR-bitmap addr = %#RX64\n", pszPrefix, pVmcs->u64AddrMsrBitmap.u));
942 LogRel((" %sVM-exit MSR store addr = %#RX64\n", pszPrefix, pVmcs->u64AddrExitMsrStore.u));
943 LogRel((" %sVM-exit MSR load addr = %#RX64\n", pszPrefix, pVmcs->u64AddrExitMsrLoad.u));
944 LogRel((" %sVM-entry MSR load addr = %#RX64\n", pszPrefix, pVmcs->u64AddrEntryMsrLoad.u));
945 LogRel((" %sExecutive VMCS ptr = %#RX64\n", pszPrefix, pVmcs->u64ExecVmcsPtr.u));
946 LogRel((" %sPML addr = %#RX64\n", pszPrefix, pVmcs->u64AddrPml.u));
947 LogRel((" %sTSC offset = %#RX64\n", pszPrefix, pVmcs->u64TscOffset.u));
948 LogRel((" %sVirtual-APIC addr = %#RX64\n", pszPrefix, pVmcs->u64AddrVirtApic.u));
949 LogRel((" %sAPIC-access addr = %#RX64\n", pszPrefix, pVmcs->u64AddrApicAccess.u));
950 LogRel((" %sPosted-intr desc addr = %#RX64\n", pszPrefix, pVmcs->u64AddrPostedIntDesc.u));
951 LogRel((" %sVM-functions control = %#RX64\n", pszPrefix, pVmcs->u64VmFuncCtls.u));
952 LogRel((" %sEPTP ptr = %#RX64\n", pszPrefix, pVmcs->u64EptPtr.u));
953 LogRel((" %sEOI-exit bitmap 0 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap0.u));
954 LogRel((" %sEOI-exit bitmap 1 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap1.u));
955 LogRel((" %sEOI-exit bitmap 2 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap2.u));
956 LogRel((" %sEOI-exit bitmap 3 = %#RX64\n", pszPrefix, pVmcs->u64EoiExitBitmap3.u));
957 LogRel((" %sEPTP-list addr = %#RX64\n", pszPrefix, pVmcs->u64AddrEptpList.u));
958 LogRel((" %sVMREAD-bitmap addr = %#RX64\n", pszPrefix, pVmcs->u64AddrVmreadBitmap.u));
959 LogRel((" %sVMWRITE-bitmap addr = %#RX64\n", pszPrefix, pVmcs->u64AddrVmwriteBitmap.u));
960 LogRel((" %sVirt-Xcpt info addr = %#RX64\n", pszPrefix, pVmcs->u64AddrXcptVeInfo.u));
961 LogRel((" %sXSS-exiting bitmap = %#RX64\n", pszPrefix, pVmcs->u64XssExitBitmap.u));
962 LogRel((" %sENCLS-exiting bitmap = %#RX64\n", pszPrefix, pVmcs->u64EnclsExitBitmap.u));
963 LogRel((" %sSPP table pointer = %#RX64\n", pszPrefix, pVmcs->u64SppTablePtr.u));
964 LogRel((" %sTSC multiplier = %#RX64\n", pszPrefix, pVmcs->u64TscMultiplier.u));
965 LogRel((" %sENCLV-exiting bitmap = %#RX64\n", pszPrefix, pVmcs->u64EnclvExitBitmap.u));
966
967 /* Natural width. */
968 LogRel((" %sCR0 guest/host mask = %#RX64\n", pszPrefix, pVmcs->u64Cr0Mask.u));
969 LogRel((" %sCR4 guest/host mask = %#RX64\n", pszPrefix, pVmcs->u64Cr4Mask.u));
970 LogRel((" %sCR0 read shadow = %#RX64\n", pszPrefix, pVmcs->u64Cr0ReadShadow.u));
971 LogRel((" %sCR4 read shadow = %#RX64\n", pszPrefix, pVmcs->u64Cr4ReadShadow.u));
972 LogRel((" %sCR3-target 0 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target0.u));
973 LogRel((" %sCR3-target 1 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target1.u));
974 LogRel((" %sCR3-target 2 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target2.u));
975 LogRel((" %sCR3-target 3 = %#RX64\n", pszPrefix, pVmcs->u64Cr3Target3.u));
976 }
977
978 /* Guest state. */
979 {
980 LogRel(("%sGuest state:\n", pszPrefix));
981
982 /* 16-bit. */
983 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Cs, "cs", pszPrefix);
984 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Ss, "ss", pszPrefix);
985 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Es, "es", pszPrefix);
986 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Ds, "ds", pszPrefix);
987 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Fs, "fs", pszPrefix);
988 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Gs, "gs", pszPrefix);
989 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Ldtr, "ldtr", pszPrefix);
990 HMVMX_DUMP_GUEST_SEGREG(pVmcs, Tr, "tr", pszPrefix);
991 HMVMX_DUMP_GUEST_XDTR( pVmcs, Gdtr, "gdtr", pszPrefix);
992 HMVMX_DUMP_GUEST_XDTR( pVmcs, Idtr, "idtr", pszPrefix);
993 LogRel((" %sInterrupt status = %#RX16\n", pszPrefix, pVmcs->u16GuestIntStatus));
994 LogRel((" %sPML index = %#RX16\n", pszPrefix, pVmcs->u16PmlIndex));
995
996 /* 32-bit. */
997 LogRel((" %sInterruptibility state = %#RX32\n", pszPrefix, pVmcs->u32GuestIntrState));
998 LogRel((" %sActivity state = %#RX32\n", pszPrefix, pVmcs->u32GuestActivityState));
999 LogRel((" %sSMBASE = %#RX32\n", pszPrefix, pVmcs->u32GuestSmBase));
1000 LogRel((" %sSysEnter CS = %#RX32\n", pszPrefix, pVmcs->u32GuestSysenterCS));
1001 LogRel((" %sVMX-preemption timer value = %#RX32\n", pszPrefix, pVmcs->u32PreemptTimer));
1002
1003 /* 64-bit. */
1004 LogRel((" %sVMCS link ptr = %#RX64\n", pszPrefix, pVmcs->u64VmcsLinkPtr.u));
1005 LogRel((" %sDBGCTL = %#RX64\n", pszPrefix, pVmcs->u64GuestDebugCtlMsr.u));
1006 LogRel((" %sPAT = %#RX64\n", pszPrefix, pVmcs->u64GuestPatMsr.u));
1007 LogRel((" %sEFER = %#RX64\n", pszPrefix, pVmcs->u64GuestEferMsr.u));
1008 LogRel((" %sPERFGLOBALCTRL = %#RX64\n", pszPrefix, pVmcs->u64GuestPerfGlobalCtlMsr.u));
1009 LogRel((" %sPDPTE 0 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte0.u));
1010 LogRel((" %sPDPTE 1 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte1.u));
1011 LogRel((" %sPDPTE 2 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte2.u));
1012 LogRel((" %sPDPTE 3 = %#RX64\n", pszPrefix, pVmcs->u64GuestPdpte3.u));
1013 LogRel((" %sBNDCFGS = %#RX64\n", pszPrefix, pVmcs->u64GuestBndcfgsMsr.u));
1014 LogRel((" %sRTIT_CTL = %#RX64\n", pszPrefix, pVmcs->u64GuestRtitCtlMsr.u));
1015
1016 /* Natural width. */
1017 LogRel((" %scr0 = %#RX64\n", pszPrefix, pVmcs->u64GuestCr0.u));
1018 LogRel((" %scr3 = %#RX64\n", pszPrefix, pVmcs->u64GuestCr3.u));
1019 LogRel((" %scr4 = %#RX64\n", pszPrefix, pVmcs->u64GuestCr4.u));
1020 LogRel((" %sdr7 = %#RX64\n", pszPrefix, pVmcs->u64GuestDr7.u));
1021 LogRel((" %srsp = %#RX64\n", pszPrefix, pVmcs->u64GuestRsp.u));
1022 LogRel((" %srip = %#RX64\n", pszPrefix, pVmcs->u64GuestRip.u));
1023 LogRel((" %srflags = %#RX64\n", pszPrefix, pVmcs->u64GuestRFlags.u));
1024 LogRel((" %sPending debug xcpts = %#RX64\n", pszPrefix, pVmcs->u64GuestPendingDbgXcpts.u));
1025 LogRel((" %sSysEnter ESP = %#RX64\n", pszPrefix, pVmcs->u64GuestSysenterEsp.u));
1026 LogRel((" %sSysEnter EIP = %#RX64\n", pszPrefix, pVmcs->u64GuestSysenterEip.u));
1027 }
1028
1029 /* Host state. */
1030 {
1031 LogRel(("%sHost state:\n", pszPrefix));
1032
1033 /* 16-bit. */
1034 LogRel((" %scs = %#RX16\n", pszPrefix, pVmcs->HostCs));
1035 LogRel((" %sss = %#RX16\n", pszPrefix, pVmcs->HostSs));
1036 LogRel((" %sds = %#RX16\n", pszPrefix, pVmcs->HostDs));
1037 LogRel((" %ses = %#RX16\n", pszPrefix, pVmcs->HostEs));
1038 HMVMX_DUMP_HOST_FS_GS_TR(pVmcs, Fs, "fs", pszPrefix);
1039 HMVMX_DUMP_HOST_FS_GS_TR(pVmcs, Gs, "gs", pszPrefix);
1040 HMVMX_DUMP_HOST_FS_GS_TR(pVmcs, Tr, "tr", pszPrefix);
1041 HMVMX_DUMP_HOST_XDTR(pVmcs, Gdtr, "gdtr", pszPrefix);
1042 HMVMX_DUMP_HOST_XDTR(pVmcs, Idtr, "idtr", pszPrefix);
1043
1044 /* 32-bit. */
1045 LogRel((" %sSysEnter CS = %#RX32\n", pszPrefix, pVmcs->u32HostSysenterCs));
1046
1047 /* 64-bit. */
1048 LogRel((" %sEFER = %#RX64\n", pszPrefix, pVmcs->u64HostEferMsr.u));
1049 LogRel((" %sPAT = %#RX64\n", pszPrefix, pVmcs->u64HostPatMsr.u));
1050 LogRel((" %sPERFGLOBALCTRL = %#RX64\n", pszPrefix, pVmcs->u64HostPerfGlobalCtlMsr.u));
1051
1052 /* Natural width. */
1053 LogRel((" %scr0 = %#RX64\n", pszPrefix, pVmcs->u64HostCr0.u));
1054 LogRel((" %scr3 = %#RX64\n", pszPrefix, pVmcs->u64HostCr3.u));
1055 LogRel((" %scr4 = %#RX64\n", pszPrefix, pVmcs->u64HostCr4.u));
1056 LogRel((" %sSysEnter ESP = %#RX64\n", pszPrefix, pVmcs->u64HostSysenterEsp.u));
1057 LogRel((" %sSysEnter EIP = %#RX64\n", pszPrefix, pVmcs->u64HostSysenterEip.u));
1058 LogRel((" %srsp = %#RX64\n", pszPrefix, pVmcs->u64HostRsp.u));
1059 LogRel((" %srip = %#RX64\n", pszPrefix, pVmcs->u64HostRip.u));
1060 }
1061
1062 /* Read-only fields. */
1063 {
1064 LogRel(("%sRead-only data fields:\n", pszPrefix));
1065
1066 /* 16-bit (none currently). */
1067
1068 /* 32-bit. */
1069 uint32_t const uExitReason = pVmcs->u32RoExitReason;
1070 LogRel((" %sExit reason = %u (%s)\n", pszPrefix, uExitReason, HMGetVmxExitName(uExitReason)));
1071 LogRel((" %sExit qualification = %#RX64\n", pszPrefix, pVmcs->u64RoExitQual.u));
1072 LogRel((" %sVM-instruction error = %#RX32\n", pszPrefix, pVmcs->u32RoVmInstrError));
1073 LogRel((" %sVM-exit intr info = %#RX32\n", pszPrefix, pVmcs->u32RoExitIntInfo));
1074 {
1075 uint32_t const fInfo = pVmcs->u32RoExitIntInfo;
1076 uint8_t const uType = VMX_EXIT_INT_INFO_TYPE(fInfo);
1077 LogRel((" %sValid = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_VALID(fInfo)));
1078 LogRel((" %sType = %#x (%s)\n", pszPrefix, uType, VMXGetExitIntInfoTypeDesc(uType)));
1079 LogRel((" %sVector = %#x\n", pszPrefix, VMX_EXIT_INT_INFO_VECTOR(fInfo)));
1080 LogRel((" %sNMI-unblocking-IRET = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(fInfo)));
1081 LogRel((" %sError-code valid = %RTbool\n", pszPrefix, VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(fInfo)));
1082 }
1083 LogRel((" %sVM-exit intr error-code = %#RX32\n", pszPrefix, pVmcs->u32RoExitIntErrCode));
1084 LogRel((" %sIDT-vectoring info = %#RX32\n", pszPrefix, pVmcs->u32RoIdtVectoringInfo));
1085 {
1086 uint32_t const fInfo = pVmcs->u32RoIdtVectoringInfo;
1087 uint8_t const uType = VMX_IDT_VECTORING_INFO_TYPE(fInfo);
1088 LogRel((" %sValid = %RTbool\n", pszPrefix, VMX_IDT_VECTORING_INFO_IS_VALID(fInfo)));
1089 LogRel((" %sType = %#x (%s)\n", pszPrefix, uType, VMXGetIdtVectoringInfoTypeDesc(uType)));
1090 LogRel((" %sVector = %#x\n", pszPrefix, VMX_IDT_VECTORING_INFO_VECTOR(fInfo)));
1091 LogRel((" %sError-code valid = %RTbool\n", pszPrefix, VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(fInfo)));
1092 }
1093 LogRel((" %sIDT-vectoring error-code = %#RX32\n", pszPrefix, pVmcs->u32RoIdtVectoringErrCode));
1094 LogRel((" %sVM-exit instruction length = %u bytes\n", pszPrefix, pVmcs->u32RoExitInstrLen));
1095 LogRel((" %sVM-exit instruction info = %#RX64\n", pszPrefix, pVmcs->u32RoExitInstrInfo));
1096
1097 /* 64-bit. */
1098 LogRel((" %sGuest-physical addr = %#RX64\n", pszPrefix, pVmcs->u64RoGuestPhysAddr.u));
1099
1100 /* Natural width. */
1101 LogRel((" %sI/O RCX = %#RX64\n", pszPrefix, pVmcs->u64RoIoRcx.u));
1102 LogRel((" %sI/O RSI = %#RX64\n", pszPrefix, pVmcs->u64RoIoRsi.u));
1103 LogRel((" %sI/O RDI = %#RX64\n", pszPrefix, pVmcs->u64RoIoRdi.u));
1104 LogRel((" %sI/O RIP = %#RX64\n", pszPrefix, pVmcs->u64RoIoRip.u));
1105 LogRel((" %sGuest-linear addr = %#RX64\n", pszPrefix, pVmcs->u64RoGuestLinearAddr.u));
1106 }
1107
1108#undef HMVMX_DUMP_HOST_XDTR
1109#undef HMVMX_DUMP_HOST_FS_GS_TR
1110#undef HMVMX_DUMP_GUEST_SEGREG
1111#undef HMVMX_DUMP_GUEST_XDTR
1112}
1113
1114
1115/**
1116 * Gets the active (in use) VMCS info. object for the specified VCPU.
1117 *
1118 * This is either the guest or nested-guest VMCS info. and need not necessarily
1119 * pertain to the "current" VMCS (in the VMX definition of the term). For instance,
1120 * if the VM-entry failed due to an invalid-guest state, we may have "cleared" the
1121 * current VMCS while returning to ring-3. However, the VMCS info. object for that
1122 * VMCS would still be active and returned here so that we could dump the VMCS
1123 * fields to ring-3 for diagnostics. This function is thus only used to
1124 * distinguish between the nested-guest or guest VMCS.
1125 *
1126 * @returns The active VMCS information.
1127 * @param pVCpu The cross context virtual CPU structure.
1128 *
1129 * @thread EMT.
1130 * @remarks This function may be called with preemption or interrupts disabled!
1131 */
1132VMM_INT_DECL(PVMXVMCSINFOSHARED) hmGetVmxActiveVmcsInfoShared(PVMCPUCC pVCpu)
1133{
1134#ifdef IN_RING0
1135 if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
1136#else
1137 if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3)
1138#endif
1139 return &pVCpu->hm.s.vmx.VmcsInfo;
1140 return &pVCpu->hm.s.vmx.VmcsInfoNstGst;
1141}
1142
1143
1144/**
1145 * Converts a VMX event type into an appropriate TRPM event type.
1146 *
1147 * @returns TRPM event.
1148 * @param uIntInfo The VMX event.
1149 */
1150VMM_INT_DECL(TRPMEVENT) HMVmxEventTypeToTrpmEventType(uint32_t uIntInfo)
1151{
1152 Assert(VMX_IDT_VECTORING_INFO_IS_VALID(uIntInfo));
1153
1154 TRPMEVENT enmTrapType;
1155 uint8_t const uType = VMX_IDT_VECTORING_INFO_TYPE(uIntInfo);
1156 uint8_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(uIntInfo);
1157
1158 switch (uType)
1159 {
1160 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
1161 enmTrapType = TRPM_HARDWARE_INT;
1162 break;
1163
1164 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
1165 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
1166 enmTrapType = TRPM_TRAP;
1167 break;
1168
1169 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT: /* INT1 (ICEBP). */
1170 Assert(uVector == X86_XCPT_DB); NOREF(uVector);
1171 enmTrapType = TRPM_SOFTWARE_INT;
1172 break;
1173
1174 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* INT3 (#BP) and INTO (#OF) */
1175 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); NOREF(uVector);
1176 enmTrapType = TRPM_SOFTWARE_INT;
1177 break;
1178
1179 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
1180 enmTrapType = TRPM_SOFTWARE_INT;
1181 break;
1182
1183 default:
1184 AssertMsgFailed(("Invalid trap type %#x\n", uType));
1185 enmTrapType = TRPM_32BIT_HACK;
1186 break;
1187 }
1188
1189 return enmTrapType;
1190}
1191
1192
1193/**
1194 * Converts a TRPM event type into an appropriate VMX event type.
1195 *
1196 * @returns VMX event type mask.
1197 * @param uVector The event vector.
1198 * @param enmTrpmEvent The TRPM event.
1199 * @param fIcebp Whether the \#DB vector is caused by an INT1/ICEBP
1200 * instruction.
1201 */
1202VMM_INT_DECL(uint32_t) HMTrpmEventTypeToVmxEventType(uint8_t uVector, TRPMEVENT enmTrpmEvent, bool fIcebp)
1203{
1204 uint32_t uIntInfoType = 0;
1205 if (enmTrpmEvent == TRPM_TRAP)
1206 {
1207 Assert(!fIcebp);
1208 switch (uVector)
1209 {
1210 case X86_XCPT_NMI:
1211 uIntInfoType |= (VMX_IDT_VECTORING_INFO_TYPE_NMI << VMX_IDT_VECTORING_INFO_TYPE_SHIFT);
1212 break;
1213
1214 case X86_XCPT_BP:
1215 case X86_XCPT_OF:
1216 uIntInfoType |= (VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT << VMX_IDT_VECTORING_INFO_TYPE_SHIFT);
1217 break;
1218
1219 case X86_XCPT_PF:
1220 case X86_XCPT_DF:
1221 case X86_XCPT_TS:
1222 case X86_XCPT_NP:
1223 case X86_XCPT_SS:
1224 case X86_XCPT_GP:
1225 case X86_XCPT_AC:
1226 uIntInfoType |= VMX_IDT_VECTORING_INFO_ERROR_CODE_VALID;
1227 RT_FALL_THRU();
1228 default:
1229 uIntInfoType |= (VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT << VMX_IDT_VECTORING_INFO_TYPE_SHIFT);
1230 break;
1231 }
1232 }
1233 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
1234 {
1235 Assert(!fIcebp);
1236 uIntInfoType |= (VMX_IDT_VECTORING_INFO_TYPE_EXT_INT << VMX_IDT_VECTORING_INFO_TYPE_SHIFT);
1237 }
1238 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
1239 {
1240 switch (uVector)
1241 {
1242 case X86_XCPT_BP:
1243 case X86_XCPT_OF:
1244 uIntInfoType |= (VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT << VMX_IDT_VECTORING_INFO_TYPE_SHIFT);
1245 break;
1246
1247 case X86_XCPT_DB:
1248 {
1249 if (fIcebp)
1250 {
1251 uIntInfoType |= (VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT << VMX_IDT_VECTORING_INFO_TYPE_SHIFT);
1252 break;
1253 }
1254 RT_FALL_THRU();
1255 }
1256 default:
1257 uIntInfoType |= (VMX_IDT_VECTORING_INFO_TYPE_SW_INT << VMX_IDT_VECTORING_INFO_TYPE_SHIFT);
1258 break;
1259 }
1260 }
1261 else
1262 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
1263 return uIntInfoType;
1264}
1265
1266
1267#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1268/**
1269 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in
1270 * IEM).
1271 *
1272 * @param pVCpu The cross context virtual CPU structure.
1273 *
1274 * @remarks Can be called from ring-0 as well as ring-3.
1275 */
1276VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu)
1277{
1278 LogFlowFunc(("\n"));
1279
1280 /*
1281 * Transitions to ring-3 flag a full CPU-state change except if we transition to ring-3
1282 * in response to a physical CPU interrupt as no changes to the guest-CPU state are
1283 * expected (see VINF_EM_RAW_INTERRUPT handling in hmR0VmxExitToRing3).
1284 *
1285 * However, with nested-guests, the state -can- change on trips to ring-3 for we might
1286 * try to inject a nested-guest physical interrupt and cause a VMX_EXIT_EXT_INT VM-exit
1287 * for the nested-guest from ring-3.
1288 *
1289 * Signalling reload of just the guest-CPU state that changed with the VM-exit is -not-
1290 * sufficient since HM also needs to reload state related to VM-entry/VM-exit controls
1291 * etc. So signal reloading of the entire state. It does not seem worth making this any
1292 * more fine grained at the moment.
1293 */
1294 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ALL);
1295 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
1296
1297 /*
1298 * Make sure we need to merge the guest VMCS controls with the nested-guest
1299 * VMCS controls on the next nested-guest VM-entry.
1300 */
1301 pVCpu->hm.s.vmx.fMergedNstGstCtls = false;
1302
1303 /*
1304 * Flush the TLB before entering the outer guest execution (mainly required since the
1305 * APIC-access guest-physical address would have changed and probably more things in
1306 * the future).
1307 */
1308 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = true;
1309
1310 /** @todo Handle releasing of the page-mapping lock later. */
1311#if 0
1312 if (pVCpu->hm.s.vmx.fVirtApicPageLocked)
1313 {
1314 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->hm.s.vmx.PgMapLockVirtApic);
1315 pVCpu->hm.s.vmx.fVirtApicPageLocked = false;
1316 }
1317#endif
1318}
1319
1320
1321/**
1322 * Notification callback for when the nested hypervisor's current VMCS is loaded or
1323 * changed outside VMX R0 code (e.g. in IEM).
1324 *
1325 * This need -not- be called for modifications to the nested hypervisor's current
1326 * VMCS when the guest is in VMX non-root mode as VMCS shadowing is not applicable
1327 * there.
1328 *
1329 * @param pVCpu The cross context virtual CPU structure.
1330 *
1331 * @remarks Can be called from ring-0 as well as ring-3.
1332 */
1333VMM_INT_DECL(void) HMNotifyVmxNstGstCurrentVmcsChanged(PVMCPU pVCpu)
1334{
1335 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1336 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, CPUMCTX_EXTRN_HWVIRT);
1337
1338 /*
1339 * Make sure we need to copy the nested hypervisor's current VMCS into the shadow VMCS
1340 * on the next guest VM-entry.
1341 */
1342 pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = false;
1343}
1344
1345#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1346
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette